1
0
Fork 0
forked from forks/go-ethereum

metrics, cmd/geth: change init-process of metrics (#30814)

This PR modifies how the metrics library handles `Enabled`: previously,
the package `init` decided whether to serve real metrics or just
dummy-types.

This has several drawbacks: 
- During pkg init, we need to determine whether metrics are enabled or
not. So we first hacked in a check if certain geth-specific
commandline-flags were enabled. Then we added a similar check for
geth-env-vars. Then we almost added a very elaborate check for
toml-config-file, plus toml parsing.

- Using "real" types and dummy types interchangeably means that
everything is hidden behind interfaces. This has a performance penalty,
and also it just adds a lot of code.

This PR removes the interface stuff, uses concrete types, and allows for
the setting of Enabled to happen later. It is still assumed that
`metrics.Enable()` is invoked early on.

The somewhat 'heavy' operations, such as ticking meters and exp-decay,
now checks the enable-flag to prevent resource leak.

The change may be large, but it's mostly pretty trivial, and from the
last time I gutted the metrics, I ensured that we have fairly good test
coverage.

---------

Co-authored-by: Felix Lange <fjl@twurst.com>
This commit is contained in:
Martin HS 2024-12-10 13:27:29 +01:00 committed by GitHub
parent 4ecf08584c
commit 9045b79bc2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
58 changed files with 779 additions and 1436 deletions

View file

@ -38,7 +38,6 @@ import (
"github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb"
"github.com/ethereum/go-ethereum/internal/era" "github.com/ethereum/go-ethereum/internal/era"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/params"
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
) )
@ -282,14 +281,12 @@ func importChain(ctx *cli.Context) error {
if ctx.Args().Len() < 1 { if ctx.Args().Len() < 1 {
utils.Fatalf("This command requires an argument.") utils.Fatalf("This command requires an argument.")
} }
// Start metrics export if enabled stack, cfg := makeConfigNode(ctx)
utils.SetupMetrics(ctx)
// Start system runtime metrics collection
go metrics.CollectProcessMetrics(3 * time.Second)
stack, _ := makeConfigNode(ctx)
defer stack.Close() defer stack.Close()
// Start metrics export if enabled
utils.SetupMetrics(&cfg.Metrics)
chain, db := utils.MakeChain(ctx, stack, false) chain, db := utils.MakeChain(ctx, stack, false)
defer db.Close() defer db.Close()

View file

@ -192,6 +192,9 @@ func makeFullNode(ctx *cli.Context) *node.Node {
cfg.Eth.OverrideVerkle = &v cfg.Eth.OverrideVerkle = &v
} }
// Start metrics export if enabled
utils.SetupMetrics(&cfg.Metrics)
backend, eth := utils.RegisterEthService(stack, &cfg.Eth) backend, eth := utils.RegisterEthService(stack, &cfg.Eth)
// Create gauge with geth system and build information // Create gauge with geth system and build information
@ -325,6 +328,27 @@ func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) {
if ctx.IsSet(utils.MetricsInfluxDBOrganizationFlag.Name) { if ctx.IsSet(utils.MetricsInfluxDBOrganizationFlag.Name) {
cfg.Metrics.InfluxDBOrganization = ctx.String(utils.MetricsInfluxDBOrganizationFlag.Name) cfg.Metrics.InfluxDBOrganization = ctx.String(utils.MetricsInfluxDBOrganizationFlag.Name)
} }
// Sanity-check the commandline flags. It is fine if some unused fields is part
// of the toml-config, but we expect the commandline to only contain relevant
// arguments, otherwise it indicates an error.
var (
enableExport = ctx.Bool(utils.MetricsEnableInfluxDBFlag.Name)
enableExportV2 = ctx.Bool(utils.MetricsEnableInfluxDBV2Flag.Name)
)
if enableExport || enableExportV2 {
v1FlagIsSet := ctx.IsSet(utils.MetricsInfluxDBUsernameFlag.Name) ||
ctx.IsSet(utils.MetricsInfluxDBPasswordFlag.Name)
v2FlagIsSet := ctx.IsSet(utils.MetricsInfluxDBTokenFlag.Name) ||
ctx.IsSet(utils.MetricsInfluxDBOrganizationFlag.Name) ||
ctx.IsSet(utils.MetricsInfluxDBBucketFlag.Name)
if enableExport && v2FlagIsSet {
utils.Fatalf("Flags --influxdb.metrics.organization, --influxdb.metrics.token, --influxdb.metrics.bucket are only available for influxdb-v2")
} else if enableExportV2 && v1FlagIsSet {
utils.Fatalf("Flags --influxdb.metrics.username, --influxdb.metrics.password are only available for influxdb-v1")
}
}
} }
func setAccountManagerBackends(conf *node.Config, am *accounts.Manager, keydir string) error { func setAccountManagerBackends(conf *node.Config, am *accounts.Manager, keydir string) error {

View file

@ -34,7 +34,6 @@ import (
"github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/debug"
"github.com/ethereum/go-ethereum/internal/flags" "github.com/ethereum/go-ethereum/internal/flags"
"github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/node"
"go.uber.org/automaxprocs/maxprocs" "go.uber.org/automaxprocs/maxprocs"
@ -325,12 +324,6 @@ func prepare(ctx *cli.Context) {
ctx.Set(utils.CacheFlag.Name, strconv.Itoa(4096)) ctx.Set(utils.CacheFlag.Name, strconv.Itoa(4096))
} }
} }
// Start metrics export if enabled
utils.SetupMetrics(ctx)
// Start system runtime metrics collection
go metrics.CollectProcessMetrics(3 * time.Second)
} }
// geth is the main entry point into the system if no special subcommand is run. // geth is the main entry point into the system if no special subcommand is run.

View file

@ -1968,67 +1968,56 @@ func RegisterFullSyncTester(stack *node.Node, eth *eth.Ethereum, target common.H
log.Info("Registered full-sync tester", "hash", target) log.Info("Registered full-sync tester", "hash", target)
} }
func SetupMetrics(ctx *cli.Context) { // SetupMetrics configures the metrics system.
if metrics.Enabled { func SetupMetrics(cfg *metrics.Config) {
log.Info("Enabling metrics collection") if !cfg.Enabled {
return
var (
enableExport = ctx.Bool(MetricsEnableInfluxDBFlag.Name)
enableExportV2 = ctx.Bool(MetricsEnableInfluxDBV2Flag.Name)
)
if enableExport || enableExportV2 {
CheckExclusive(ctx, MetricsEnableInfluxDBFlag, MetricsEnableInfluxDBV2Flag)
v1FlagIsSet := ctx.IsSet(MetricsInfluxDBUsernameFlag.Name) ||
ctx.IsSet(MetricsInfluxDBPasswordFlag.Name)
v2FlagIsSet := ctx.IsSet(MetricsInfluxDBTokenFlag.Name) ||
ctx.IsSet(MetricsInfluxDBOrganizationFlag.Name) ||
ctx.IsSet(MetricsInfluxDBBucketFlag.Name)
if enableExport && v2FlagIsSet {
Fatalf("Flags --influxdb.metrics.organization, --influxdb.metrics.token, --influxdb.metrics.bucket are only available for influxdb-v2")
} else if enableExportV2 && v1FlagIsSet {
Fatalf("Flags --influxdb.metrics.username, --influxdb.metrics.password are only available for influxdb-v1")
}
}
var (
endpoint = ctx.String(MetricsInfluxDBEndpointFlag.Name)
database = ctx.String(MetricsInfluxDBDatabaseFlag.Name)
username = ctx.String(MetricsInfluxDBUsernameFlag.Name)
password = ctx.String(MetricsInfluxDBPasswordFlag.Name)
token = ctx.String(MetricsInfluxDBTokenFlag.Name)
bucket = ctx.String(MetricsInfluxDBBucketFlag.Name)
organization = ctx.String(MetricsInfluxDBOrganizationFlag.Name)
)
if enableExport {
tagsMap := SplitTagsFlag(ctx.String(MetricsInfluxDBTagsFlag.Name))
log.Info("Enabling metrics export to InfluxDB")
go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", tagsMap)
} else if enableExportV2 {
tagsMap := SplitTagsFlag(ctx.String(MetricsInfluxDBTagsFlag.Name))
log.Info("Enabling metrics export to InfluxDB (v2)")
go influxdb.InfluxDBV2WithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, token, bucket, organization, "geth.", tagsMap)
}
if ctx.IsSet(MetricsHTTPFlag.Name) {
address := net.JoinHostPort(ctx.String(MetricsHTTPFlag.Name), fmt.Sprintf("%d", ctx.Int(MetricsPortFlag.Name)))
log.Info("Enabling stand-alone metrics HTTP endpoint", "address", address)
exp.Setup(address)
} else if ctx.IsSet(MetricsPortFlag.Name) {
log.Warn(fmt.Sprintf("--%s specified without --%s, metrics server will not start.", MetricsPortFlag.Name, MetricsHTTPFlag.Name))
}
} }
log.Info("Enabling metrics collection")
metrics.Enable()
// InfluxDB exporter.
var (
enableExport = cfg.EnableInfluxDB
enableExportV2 = cfg.EnableInfluxDBV2
)
if cfg.EnableInfluxDB && cfg.EnableInfluxDBV2 {
Fatalf("Flags %v can't be used at the same time", strings.Join([]string{MetricsEnableInfluxDBFlag.Name, MetricsEnableInfluxDBV2Flag.Name}, ", "))
}
var (
endpoint = cfg.InfluxDBEndpoint
database = cfg.InfluxDBDatabase
username = cfg.InfluxDBUsername
password = cfg.InfluxDBPassword
token = cfg.InfluxDBToken
bucket = cfg.InfluxDBBucket
organization = cfg.InfluxDBOrganization
tagsMap = SplitTagsFlag(cfg.InfluxDBTags)
)
if enableExport {
log.Info("Enabling metrics export to InfluxDB")
go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", tagsMap)
} else if enableExportV2 {
tagsMap := SplitTagsFlag(cfg.InfluxDBTags)
log.Info("Enabling metrics export to InfluxDB (v2)")
go influxdb.InfluxDBV2WithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, token, bucket, organization, "geth.", tagsMap)
}
// Expvar exporter.
if cfg.HTTP != "" {
address := net.JoinHostPort(cfg.HTTP, fmt.Sprintf("%d", cfg.Port))
log.Info("Enabling stand-alone metrics HTTP endpoint", "address", address)
exp.Setup(address)
} else if cfg.HTTP == "" && cfg.Port != 0 {
log.Warn(fmt.Sprintf("--%s specified without --%s, metrics server will not start.", MetricsPortFlag.Name, MetricsHTTPFlag.Name))
}
// Enable system metrics collection.
go metrics.CollectProcessMetrics(3 * time.Second)
} }
// SplitTagsFlag parses a comma-separated list of k=v metrics tags.
func SplitTagsFlag(tagsFlag string) map[string]string { func SplitTagsFlag(tagsFlag string) map[string]string {
tags := strings.Split(tagsFlag, ",") tags := strings.Split(tagsFlag, ",")
tagsMap := map[string]string{} tagsMap := map[string]string{}

View file

@ -113,10 +113,10 @@ type freezerTable struct {
headId uint32 // number of the currently active head file headId uint32 // number of the currently active head file
tailId uint32 // number of the earliest file tailId uint32 // number of the earliest file
headBytes int64 // Number of bytes written to the head file headBytes int64 // Number of bytes written to the head file
readMeter metrics.Meter // Meter for measuring the effective amount of data read readMeter *metrics.Meter // Meter for measuring the effective amount of data read
writeMeter metrics.Meter // Meter for measuring the effective amount of data written writeMeter *metrics.Meter // Meter for measuring the effective amount of data written
sizeGauge metrics.Gauge // Gauge for tracking the combined size of all freezer tables sizeGauge *metrics.Gauge // Gauge for tracking the combined size of all freezer tables
logger log.Logger // Logger with database path and table name embedded logger log.Logger // Logger with database path and table name embedded
lock sync.RWMutex // Mutex protecting the data file descriptors lock sync.RWMutex // Mutex protecting the data file descriptors
@ -124,13 +124,13 @@ type freezerTable struct {
// newFreezerTable opens the given path as a freezer table. // newFreezerTable opens the given path as a freezer table.
func newFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerTable, error) { func newFreezerTable(path, name string, disableSnappy, readonly bool) (*freezerTable, error) {
return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, freezerTableSize, disableSnappy, readonly) return newTable(path, name, metrics.NewInactiveMeter(), metrics.NewInactiveMeter(), metrics.NewGauge(), freezerTableSize, disableSnappy, readonly)
} }
// newTable opens a freezer table, creating the data and index files if they are // newTable opens a freezer table, creating the data and index files if they are
// non-existent. Both files are truncated to the shortest common length to ensure // non-existent. Both files are truncated to the shortest common length to ensure
// they don't go out of sync. // they don't go out of sync.
func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, maxFilesize uint32, noCompression, readonly bool) (*freezerTable, error) { func newTable(path string, name string, readMeter, writeMeter *metrics.Meter, sizeGauge *metrics.Gauge, maxFilesize uint32, noCompression, readonly bool) (*freezerTable, error) {
// Ensure the containing directory exists and open the indexEntry file // Ensure the containing directory exists and open the indexEntry file
if err := os.MkdirAll(path, 0755); err != nil { if err := os.MkdirAll(path, 0755); err != nil {
return nil, err return nil, err

View file

@ -47,21 +47,21 @@ type triePrefetcher struct {
term chan struct{} // Channel to signal interruption term chan struct{} // Channel to signal interruption
noreads bool // Whether to ignore state-read-only prefetch requests noreads bool // Whether to ignore state-read-only prefetch requests
deliveryMissMeter metrics.Meter deliveryMissMeter *metrics.Meter
accountLoadReadMeter metrics.Meter accountLoadReadMeter *metrics.Meter
accountLoadWriteMeter metrics.Meter accountLoadWriteMeter *metrics.Meter
accountDupReadMeter metrics.Meter accountDupReadMeter *metrics.Meter
accountDupWriteMeter metrics.Meter accountDupWriteMeter *metrics.Meter
accountDupCrossMeter metrics.Meter accountDupCrossMeter *metrics.Meter
accountWasteMeter metrics.Meter accountWasteMeter *metrics.Meter
storageLoadReadMeter metrics.Meter storageLoadReadMeter *metrics.Meter
storageLoadWriteMeter metrics.Meter storageLoadWriteMeter *metrics.Meter
storageDupReadMeter metrics.Meter storageDupReadMeter *metrics.Meter
storageDupWriteMeter metrics.Meter storageDupWriteMeter *metrics.Meter
storageDupCrossMeter metrics.Meter storageDupCrossMeter *metrics.Meter
storageWasteMeter metrics.Meter storageWasteMeter *metrics.Meter
} }
func newTriePrefetcher(db Database, root common.Hash, namespace string, noreads bool) *triePrefetcher { func newTriePrefetcher(db Database, root common.Hash, namespace string, noreads bool) *triePrefetcher {
@ -111,7 +111,7 @@ func (p *triePrefetcher) terminate(async bool) {
// report aggregates the pre-fetching and usage metrics and reports them. // report aggregates the pre-fetching and usage metrics and reports them.
func (p *triePrefetcher) report() { func (p *triePrefetcher) report() {
if !metrics.Enabled { if !metrics.Enabled() {
return return
} }
for _, fetcher := range p.fetchers { for _, fetcher := range p.fetchers {

View file

@ -126,7 +126,7 @@ func (p *TxPool) reserver(id int, subpool SubPool) AddressReserver {
return ErrAlreadyReserved return ErrAlreadyReserved
} }
p.reservations[addr] = subpool p.reservations[addr] = subpool
if metrics.Enabled { if metrics.Enabled() {
m := fmt.Sprintf("%s/%d", reservationsGaugeName, id) m := fmt.Sprintf("%s/%d", reservationsGaugeName, id)
metrics.GetOrRegisterGauge(m, nil).Inc(1) metrics.GetOrRegisterGauge(m, nil).Inc(1)
} }
@ -143,7 +143,7 @@ func (p *TxPool) reserver(id int, subpool SubPool) AddressReserver {
return errors.New("address not owned") return errors.New("address not owned")
} }
delete(p.reservations, addr) delete(p.reservations, addr)
if metrics.Enabled { if metrics.Enabled() {
m := fmt.Sprintf("%s/%d", reservationsGaugeName, id) m := fmt.Sprintf("%s/%d", reservationsGaugeName, id)
metrics.GetOrRegisterGauge(m, nil).Dec(1) metrics.GetOrRegisterGauge(m, nil).Dec(1)
} }

View file

@ -882,7 +882,7 @@ func (q *queue) DeliverReceipts(id string, receiptList [][]*types.Receipt, recei
// to access the queue, so they already need a lock anyway. // to access the queue, so they already need a lock anyway.
func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header,
taskQueue *prque.Prque[int64, *types.Header], pendPool map[string]*fetchRequest, taskQueue *prque.Prque[int64, *types.Header], pendPool map[string]*fetchRequest,
reqTimer metrics.Timer, resInMeter metrics.Meter, resDropMeter metrics.Meter, reqTimer *metrics.Timer, resInMeter, resDropMeter *metrics.Meter,
results int, validate func(index int, header *types.Header) error, results int, validate func(index int, header *types.Header) error,
reconstruct func(index int, result *fetchResult)) (int, error) { reconstruct func(index int, result *fetchResult)) (int, error) {
// Short circuit if the data was never requested // Short circuit if the data was never requested

View file

@ -366,7 +366,7 @@ func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error
defer h.decHandlers() defer h.decHandlers()
if err := h.peers.registerSnapExtension(peer); err != nil { if err := h.peers.registerSnapExtension(peer); err != nil {
if metrics.Enabled { if metrics.Enabled() {
if peer.Inbound() { if peer.Inbound() {
snap.IngressRegistrationErrorMeter.Mark(1) snap.IngressRegistrationErrorMeter.Mark(1)
} else { } else {

View file

@ -190,7 +190,7 @@ func handleMessage(backend Backend, peer *Peer) error {
var handlers = eth68 var handlers = eth68
// Track the amount of time it takes to serve the request and run the handler // Track the amount of time it takes to serve the request and run the handler
if metrics.Enabled { if metrics.Enabled() {
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
defer func(start time.Time) { defer func(start time.Time) {
sampler := func() metrics.Sample { sampler := func() metrics.Sample {

View file

@ -112,7 +112,7 @@ func (p *Peer) readStatus(network uint64, status *StatusPacket, genesis common.H
// markError registers the error with the corresponding metric. // markError registers the error with the corresponding metric.
func markError(p *Peer, err error) { func markError(p *Peer, err error) {
if !metrics.Enabled { if !metrics.Enabled() {
return return
} }
m := meters.get(p.Inbound()) m := meters.get(p.Inbound())

View file

@ -41,23 +41,23 @@ func (h *bidirectionalMeters) get(ingress bool) *hsMeters {
type hsMeters struct { type hsMeters struct {
// peerError measures the number of errors related to incorrect peer // peerError measures the number of errors related to incorrect peer
// behaviour, such as invalid message code, size, encoding, etc. // behaviour, such as invalid message code, size, encoding, etc.
peerError metrics.Meter peerError *metrics.Meter
// timeoutError measures the number of timeouts. // timeoutError measures the number of timeouts.
timeoutError metrics.Meter timeoutError *metrics.Meter
// networkIDMismatch measures the number of network id mismatch errors. // networkIDMismatch measures the number of network id mismatch errors.
networkIDMismatch metrics.Meter networkIDMismatch *metrics.Meter
// protocolVersionMismatch measures the number of differing protocol // protocolVersionMismatch measures the number of differing protocol
// versions. // versions.
protocolVersionMismatch metrics.Meter protocolVersionMismatch *metrics.Meter
// genesisMismatch measures the number of differing genesises. // genesisMismatch measures the number of differing genesises.
genesisMismatch metrics.Meter genesisMismatch *metrics.Meter
// forkidRejected measures the number of differing forkids. // forkidRejected measures the number of differing forkids.
forkidRejected metrics.Meter forkidRejected *metrics.Meter
} }
// newHandshakeMeters registers and returns handshake meters for the given // newHandshakeMeters registers and returns handshake meters for the given

View file

@ -132,7 +132,7 @@ func HandleMessage(backend Backend, peer *Peer) error {
defer msg.Discard() defer msg.Discard()
start := time.Now() start := time.Now()
// Track the amount of time it takes to serve the request and run the handler // Track the amount of time it takes to serve the request and run the handler
if metrics.Enabled { if metrics.Enabled() {
h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code) h := fmt.Sprintf("%s/%s/%d/%#02x", p2p.HandleHistName, ProtocolName, peer.Version(), msg.Code)
defer func(start time.Time) { defer func(start time.Time) {
sampler := func() metrics.Sample { sampler := func() metrics.Sample {

View file

@ -62,21 +62,21 @@ type Database struct {
fn string // filename for reporting fn string // filename for reporting
db *leveldb.DB // LevelDB instance db *leveldb.DB // LevelDB instance
compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction compTimeMeter *metrics.Meter // Meter for measuring the total time spent in database compaction
compReadMeter metrics.Meter // Meter for measuring the data read during compaction compReadMeter *metrics.Meter // Meter for measuring the data read during compaction
compWriteMeter metrics.Meter // Meter for measuring the data written during compaction compWriteMeter *metrics.Meter // Meter for measuring the data written during compaction
writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction writeDelayNMeter *metrics.Meter // Meter for measuring the write delay number due to database compaction
writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction writeDelayMeter *metrics.Meter // Meter for measuring the write delay duration due to database compaction
diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database diskSizeGauge *metrics.Gauge // Gauge for tracking the size of all the levels in the database
diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read diskReadMeter *metrics.Meter // Meter for measuring the effective amount of data read
diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written diskWriteMeter *metrics.Meter // Meter for measuring the effective amount of data written
memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction memCompGauge *metrics.Gauge // Gauge for tracking the number of memory compaction
level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0 level0CompGauge *metrics.Gauge // Gauge for tracking the number of table compaction in level0
nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level nonlevel0CompGauge *metrics.Gauge // Gauge for tracking the number of table compaction in non0 level
seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt seekCompGauge *metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
manualMemAllocGauge metrics.Gauge // Gauge to track the amount of memory that has been manually allocated (not a part of runtime/GC) manualMemAllocGauge *metrics.Gauge // Gauge to track the amount of memory that has been manually allocated (not a part of runtime/GC)
levelsGauge []metrics.Gauge // Gauge for tracking the number of tables in levels levelsGauge []*metrics.Gauge // Gauge for tracking the number of tables in levels
quitLock sync.Mutex // Mutex protecting the quit channel access quitLock sync.Mutex // Mutex protecting the quit channel access
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database quitChan chan chan error // Quit channel to stop the metrics collection before closing the database

View file

@ -58,21 +58,21 @@ type Database struct {
fn string // filename for reporting fn string // filename for reporting
db *pebble.DB // Underlying pebble storage engine db *pebble.DB // Underlying pebble storage engine
compTimeMeter metrics.Meter // Meter for measuring the total time spent in database compaction compTimeMeter *metrics.Meter // Meter for measuring the total time spent in database compaction
compReadMeter metrics.Meter // Meter for measuring the data read during compaction compReadMeter *metrics.Meter // Meter for measuring the data read during compaction
compWriteMeter metrics.Meter // Meter for measuring the data written during compaction compWriteMeter *metrics.Meter // Meter for measuring the data written during compaction
writeDelayNMeter metrics.Meter // Meter for measuring the write delay number due to database compaction writeDelayNMeter *metrics.Meter // Meter for measuring the write delay number due to database compaction
writeDelayMeter metrics.Meter // Meter for measuring the write delay duration due to database compaction writeDelayMeter *metrics.Meter // Meter for measuring the write delay duration due to database compaction
diskSizeGauge metrics.Gauge // Gauge for tracking the size of all the levels in the database diskSizeGauge *metrics.Gauge // Gauge for tracking the size of all the levels in the database
diskReadMeter metrics.Meter // Meter for measuring the effective amount of data read diskReadMeter *metrics.Meter // Meter for measuring the effective amount of data read
diskWriteMeter metrics.Meter // Meter for measuring the effective amount of data written diskWriteMeter *metrics.Meter // Meter for measuring the effective amount of data written
memCompGauge metrics.Gauge // Gauge for tracking the number of memory compaction memCompGauge *metrics.Gauge // Gauge for tracking the number of memory compaction
level0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in level0 level0CompGauge *metrics.Gauge // Gauge for tracking the number of table compaction in level0
nonlevel0CompGauge metrics.Gauge // Gauge for tracking the number of table compaction in non0 level nonlevel0CompGauge *metrics.Gauge // Gauge for tracking the number of table compaction in non0 level
seekCompGauge metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt seekCompGauge *metrics.Gauge // Gauge for tracking the number of table compaction caused by read opt
manualMemAllocGauge metrics.Gauge // Gauge for tracking amount of non-managed memory currently allocated manualMemAllocGauge *metrics.Gauge // Gauge for tracking amount of non-managed memory currently allocated
levelsGauge []metrics.Gauge // Gauge for tracking the number of tables in levels levelsGauge []*metrics.Gauge // Gauge for tracking the number of tables in levels
quitLock sync.RWMutex // Mutex protecting the quit channel and the closed flag quitLock sync.RWMutex // Mutex protecting the quit channel and the closed flag
quitChan chan chan error // Quit channel to stop the metrics collection before closing the database quitChan chan chan error // Quit channel to stop the metrics collection before closing the database

View file

@ -4,109 +4,55 @@ import (
"sync/atomic" "sync/atomic"
) )
type CounterSnapshot interface {
Count() int64
}
// Counter hold an int64 value that can be incremented and decremented.
type Counter interface {
Clear()
Dec(int64)
Inc(int64)
Snapshot() CounterSnapshot
}
// GetOrRegisterCounter returns an existing Counter or constructs and registers // GetOrRegisterCounter returns an existing Counter or constructs and registers
// a new StandardCounter. // a new Counter.
func GetOrRegisterCounter(name string, r Registry) Counter { func GetOrRegisterCounter(name string, r Registry) *Counter {
if nil == r { if r == nil {
r = DefaultRegistry r = DefaultRegistry
} }
return r.GetOrRegister(name, NewCounter).(Counter) return r.GetOrRegister(name, NewCounter).(*Counter)
} }
// GetOrRegisterCounterForced returns an existing Counter or constructs and registers a // NewCounter constructs a new Counter.
// new Counter no matter the global switch is enabled or not. func NewCounter() *Counter {
// Be sure to unregister the counter from the registry once it is of no use to return new(Counter)
// allow for garbage collection.
func GetOrRegisterCounterForced(name string, r Registry) Counter {
if nil == r {
r = DefaultRegistry
}
return r.GetOrRegister(name, NewCounterForced).(Counter)
} }
// NewCounter constructs a new StandardCounter. // NewRegisteredCounter constructs and registers a new Counter.
func NewCounter() Counter { func NewRegisteredCounter(name string, r Registry) *Counter {
if !Enabled {
return NilCounter{}
}
return new(StandardCounter)
}
// NewCounterForced constructs a new StandardCounter and returns it no matter if
// the global switch is enabled or not.
func NewCounterForced() Counter {
return new(StandardCounter)
}
// NewRegisteredCounter constructs and registers a new StandardCounter.
func NewRegisteredCounter(name string, r Registry) Counter {
c := NewCounter() c := NewCounter()
if nil == r { if r == nil {
r = DefaultRegistry r = DefaultRegistry
} }
r.Register(name, c) r.Register(name, c)
return c return c
} }
// NewRegisteredCounterForced constructs and registers a new StandardCounter // CounterSnapshot is a read-only copy of a Counter.
// and launches a goroutine no matter the global switch is enabled or not. type CounterSnapshot int64
// Be sure to unregister the counter from the registry once it is of no use to
// allow for garbage collection.
func NewRegisteredCounterForced(name string, r Registry) Counter {
c := NewCounterForced()
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// counterSnapshot is a read-only copy of another Counter.
type counterSnapshot int64
// Count returns the count at the time the snapshot was taken. // Count returns the count at the time the snapshot was taken.
func (c counterSnapshot) Count() int64 { return int64(c) } func (c CounterSnapshot) Count() int64 { return int64(c) }
// NilCounter is a no-op Counter. // Counter hold an int64 value that can be incremented and decremented.
type NilCounter struct{} type Counter atomic.Int64
func (NilCounter) Clear() {}
func (NilCounter) Dec(i int64) {}
func (NilCounter) Inc(i int64) {}
func (NilCounter) Snapshot() CounterSnapshot { return (*emptySnapshot)(nil) }
// StandardCounter is the standard implementation of a Counter and uses the
// sync/atomic package to manage a single int64 value.
type StandardCounter atomic.Int64
// Clear sets the counter to zero. // Clear sets the counter to zero.
func (c *StandardCounter) Clear() { func (c *Counter) Clear() {
(*atomic.Int64)(c).Store(0) (*atomic.Int64)(c).Store(0)
} }
// Dec decrements the counter by the given amount. // Dec decrements the counter by the given amount.
func (c *StandardCounter) Dec(i int64) { func (c *Counter) Dec(i int64) {
(*atomic.Int64)(c).Add(-i) (*atomic.Int64)(c).Add(-i)
} }
// Inc increments the counter by the given amount. // Inc increments the counter by the given amount.
func (c *StandardCounter) Inc(i int64) { func (c *Counter) Inc(i int64) {
(*atomic.Int64)(c).Add(i) (*atomic.Int64)(c).Add(i)
} }
// Snapshot returns a read-only copy of the counter. // Snapshot returns a read-only copy of the counter.
func (c *StandardCounter) Snapshot() CounterSnapshot { func (c *Counter) Snapshot() CounterSnapshot {
return counterSnapshot((*atomic.Int64)(c).Load()) return CounterSnapshot((*atomic.Int64)(c).Load())
} }

View file

@ -5,114 +5,57 @@ import (
"sync/atomic" "sync/atomic"
) )
type CounterFloat64Snapshot interface { // GetOrRegisterCounterFloat64 returns an existing *CounterFloat64 or constructs and registers
Count() float64 // a new CounterFloat64.
} func GetOrRegisterCounterFloat64(name string, r Registry) *CounterFloat64 {
// CounterFloat64 holds a float64 value that can be incremented and decremented.
type CounterFloat64 interface {
Clear()
Dec(float64)
Inc(float64)
Snapshot() CounterFloat64Snapshot
}
// GetOrRegisterCounterFloat64 returns an existing CounterFloat64 or constructs and registers
// a new StandardCounterFloat64.
func GetOrRegisterCounterFloat64(name string, r Registry) CounterFloat64 {
if nil == r { if nil == r {
r = DefaultRegistry r = DefaultRegistry
} }
return r.GetOrRegister(name, NewCounterFloat64).(CounterFloat64) return r.GetOrRegister(name, NewCounterFloat64).(*CounterFloat64)
} }
// GetOrRegisterCounterFloat64Forced returns an existing CounterFloat64 or constructs and registers a // NewCounterFloat64 constructs a new CounterFloat64.
// new CounterFloat64 no matter the global switch is enabled or not. func NewCounterFloat64() *CounterFloat64 {
// Be sure to unregister the counter from the registry once it is of no use to return new(CounterFloat64)
// allow for garbage collection.
func GetOrRegisterCounterFloat64Forced(name string, r Registry) CounterFloat64 {
if nil == r {
r = DefaultRegistry
}
return r.GetOrRegister(name, NewCounterFloat64Forced).(CounterFloat64)
} }
// NewCounterFloat64 constructs a new StandardCounterFloat64. // NewRegisteredCounterFloat64 constructs and registers a new CounterFloat64.
func NewCounterFloat64() CounterFloat64 { func NewRegisteredCounterFloat64(name string, r Registry) *CounterFloat64 {
if !Enabled {
return NilCounterFloat64{}
}
return &StandardCounterFloat64{}
}
// NewCounterFloat64Forced constructs a new StandardCounterFloat64 and returns it no matter if
// the global switch is enabled or not.
func NewCounterFloat64Forced() CounterFloat64 {
return &StandardCounterFloat64{}
}
// NewRegisteredCounterFloat64 constructs and registers a new StandardCounterFloat64.
func NewRegisteredCounterFloat64(name string, r Registry) CounterFloat64 {
c := NewCounterFloat64() c := NewCounterFloat64()
if nil == r { if r == nil {
r = DefaultRegistry r = DefaultRegistry
} }
r.Register(name, c) r.Register(name, c)
return c return c
} }
// NewRegisteredCounterFloat64Forced constructs and registers a new StandardCounterFloat64 // CounterFloat64Snapshot is a read-only copy of a float64 counter.
// and launches a goroutine no matter the global switch is enabled or not. type CounterFloat64Snapshot float64
// Be sure to unregister the counter from the registry once it is of no use to
// allow for garbage collection.
func NewRegisteredCounterFloat64Forced(name string, r Registry) CounterFloat64 {
c := NewCounterFloat64Forced()
if nil == r {
r = DefaultRegistry
}
r.Register(name, c)
return c
}
// counterFloat64Snapshot is a read-only copy of another CounterFloat64.
type counterFloat64Snapshot float64
// Count returns the value at the time the snapshot was taken. // Count returns the value at the time the snapshot was taken.
func (c counterFloat64Snapshot) Count() float64 { return float64(c) } func (c CounterFloat64Snapshot) Count() float64 { return float64(c) }
type NilCounterFloat64 struct{} // CounterFloat64 holds a float64 value that can be incremented and decremented.
type CounterFloat64 atomic.Uint64
func (NilCounterFloat64) Clear() {}
func (NilCounterFloat64) Count() float64 { return 0.0 }
func (NilCounterFloat64) Dec(i float64) {}
func (NilCounterFloat64) Inc(i float64) {}
func (NilCounterFloat64) Snapshot() CounterFloat64Snapshot { return NilCounterFloat64{} }
// StandardCounterFloat64 is the standard implementation of a CounterFloat64 and uses the
// atomic to manage a single float64 value.
type StandardCounterFloat64 struct {
floatBits atomic.Uint64
}
// Clear sets the counter to zero. // Clear sets the counter to zero.
func (c *StandardCounterFloat64) Clear() { func (c *CounterFloat64) Clear() {
c.floatBits.Store(0) (*atomic.Uint64)(c).Store(0)
} }
// Dec decrements the counter by the given amount. // Dec decrements the counter by the given amount.
func (c *StandardCounterFloat64) Dec(v float64) { func (c *CounterFloat64) Dec(v float64) {
atomicAddFloat(&c.floatBits, -v) atomicAddFloat((*atomic.Uint64)(c), -v)
} }
// Inc increments the counter by the given amount. // Inc increments the counter by the given amount.
func (c *StandardCounterFloat64) Inc(v float64) { func (c *CounterFloat64) Inc(v float64) {
atomicAddFloat(&c.floatBits, v) atomicAddFloat((*atomic.Uint64)(c), v)
} }
// Snapshot returns a read-only copy of the counter. // Snapshot returns a read-only copy of the counter.
func (c *StandardCounterFloat64) Snapshot() CounterFloat64Snapshot { func (c *CounterFloat64) Snapshot() CounterFloat64Snapshot {
v := math.Float64frombits(c.floatBits.Load()) return CounterFloat64Snapshot(math.Float64frombits((*atomic.Uint64)(c).Load()))
return counterFloat64Snapshot(v)
} }
func atomicAddFloat(fbits *atomic.Uint64, v float64) { func atomicAddFloat(fbits *atomic.Uint64, v float64) {

View file

@ -32,61 +32,35 @@ func BenchmarkCounterFloat64Parallel(b *testing.B) {
} }
} }
func TestCounterFloat64Clear(t *testing.T) { func TestCounterFloat64(t *testing.T) {
c := NewCounterFloat64() c := NewCounterFloat64()
c.Inc(1.0)
c.Clear()
if count := c.Snapshot().Count(); count != 0 { if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 0 != %v\n", count) t.Errorf("wrong count: %v", count)
} }
}
func TestCounterFloat64Dec1(t *testing.T) {
c := NewCounterFloat64()
c.Dec(1.0) c.Dec(1.0)
if count := c.Snapshot().Count(); count != -1.0 { if count := c.Snapshot().Count(); count != -1.0 {
t.Errorf("c.Count(): -1.0 != %v\n", count) t.Errorf("wrong count: %v", count)
} }
}
func TestCounterFloat64Dec2(t *testing.T) {
c := NewCounterFloat64()
c.Dec(2.0)
if count := c.Snapshot().Count(); count != -2.0 {
t.Errorf("c.Count(): -2.0 != %v\n", count)
}
}
func TestCounterFloat64Inc1(t *testing.T) {
c := NewCounterFloat64()
c.Inc(1.0)
if count := c.Snapshot().Count(); count != 1.0 {
t.Errorf("c.Count(): 1.0 != %v\n", count)
}
}
func TestCounterFloat64Inc2(t *testing.T) {
c := NewCounterFloat64()
c.Inc(2.0)
if count := c.Snapshot().Count(); count != 2.0 {
t.Errorf("c.Count(): 2.0 != %v\n", count)
}
}
func TestCounterFloat64Snapshot(t *testing.T) {
c := NewCounterFloat64()
c.Inc(1.0)
snapshot := c.Snapshot() snapshot := c.Snapshot()
c.Inc(1.0) c.Dec(2.0)
if count := snapshot.Count(); count != 1.0 { if count := c.Snapshot().Count(); count != -3.0 {
t.Errorf("c.Count(): 1.0 != %v\n", count) t.Errorf("wrong count: %v", count)
} }
} c.Inc(1.0)
if count := c.Snapshot().Count(); count != -2.0 {
func TestCounterFloat64Zero(t *testing.T) { t.Errorf("wrong count: %v", count)
c := NewCounterFloat64() }
if count := c.Snapshot().Count(); count != 0 { c.Inc(2.0)
t.Errorf("c.Count(): 0 != %v\n", count) if count := c.Snapshot().Count(); count != 0.0 {
t.Errorf("wrong count: %v", count)
}
if count := snapshot.Count(); count != -1.0 {
t.Errorf("snapshot count wrong: %v", count)
}
c.Inc(1.0)
c.Clear()
if count := c.Snapshot().Count(); count != 0.0 {
t.Errorf("wrong count: %v", count)
} }
} }

View file

@ -19,35 +19,26 @@ func TestCounterClear(t *testing.T) {
} }
} }
func TestCounterDec1(t *testing.T) { func TestCounter(t *testing.T) {
c := NewCounter() c := NewCounter()
if count := c.Snapshot().Count(); count != 0 {
t.Errorf("wrong count: %v", count)
}
c.Dec(1) c.Dec(1)
if count := c.Snapshot().Count(); count != -1 { if count := c.Snapshot().Count(); count != -1 {
t.Errorf("c.Count(): -1 != %v\n", count) t.Errorf("wrong count: %v", count)
} }
}
func TestCounterDec2(t *testing.T) {
c := NewCounter()
c.Dec(2) c.Dec(2)
if count := c.Snapshot().Count(); count != -2 { if count := c.Snapshot().Count(); count != -3 {
t.Errorf("c.Count(): -2 != %v\n", count) t.Errorf("wrong count: %v", count)
} }
}
func TestCounterInc1(t *testing.T) {
c := NewCounter()
c.Inc(1) c.Inc(1)
if count := c.Snapshot().Count(); count != 1 { if count := c.Snapshot().Count(); count != -2 {
t.Errorf("c.Count(): 1 != %v\n", count) t.Errorf("wrong count: %v", count)
} }
}
func TestCounterInc2(t *testing.T) {
c := NewCounter()
c.Inc(2) c.Inc(2)
if count := c.Snapshot().Count(); count != 2 { if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 2 != %v\n", count) t.Errorf("wrong count: %v", count)
} }
} }
@ -61,13 +52,6 @@ func TestCounterSnapshot(t *testing.T) {
} }
} }
func TestCounterZero(t *testing.T) {
c := NewCounter()
if count := c.Snapshot().Count(); count != 0 {
t.Errorf("c.Count(): 0 != %v\n", count)
}
}
func TestGetOrRegisterCounter(t *testing.T) { func TestGetOrRegisterCounter(t *testing.T) {
r := NewRegistry() r := NewRegistry()
NewRegisteredCounter("foo", r).Inc(47) NewRegisteredCounter("foo", r).Inc(47)

View file

@ -8,13 +8,13 @@ import (
var ( var (
debugMetrics struct { debugMetrics struct {
GCStats struct { GCStats struct {
LastGC Gauge LastGC *Gauge
NumGC Gauge NumGC *Gauge
Pause Histogram Pause Histogram
//PauseQuantiles Histogram //PauseQuantiles Histogram
PauseTotal Gauge PauseTotal *Gauge
} }
ReadGCStats Timer ReadGCStats *Timer
} }
gcStats debug.GCStats gcStats debug.GCStats
) )

View file

@ -7,56 +7,36 @@ import (
"time" "time"
) )
type EWMASnapshot interface { // EWMASnapshot is a read-only copy of an EWMA.
Rate() float64 type EWMASnapshot float64
}
// EWMAs continuously calculate an exponentially-weighted moving average // Rate returns the rate of events per second at the time the snapshot was
// based on an outside source of clock ticks. // taken.
type EWMA interface { func (a EWMASnapshot) Rate() float64 { return float64(a) }
Snapshot() EWMASnapshot
Tick()
Update(int64)
}
// NewEWMA constructs a new EWMA with the given alpha. // NewEWMA constructs a new EWMA with the given alpha.
func NewEWMA(alpha float64) EWMA { func NewEWMA(alpha float64) *EWMA {
return &StandardEWMA{alpha: alpha} return &EWMA{alpha: alpha}
} }
// NewEWMA1 constructs a new EWMA for a one-minute moving average. // NewEWMA1 constructs a new EWMA for a one-minute moving average.
func NewEWMA1() EWMA { func NewEWMA1() *EWMA {
return NewEWMA(1 - math.Exp(-5.0/60.0/1)) return NewEWMA(1 - math.Exp(-5.0/60.0/1))
} }
// NewEWMA5 constructs a new EWMA for a five-minute moving average. // NewEWMA5 constructs a new EWMA for a five-minute moving average.
func NewEWMA5() EWMA { func NewEWMA5() *EWMA {
return NewEWMA(1 - math.Exp(-5.0/60.0/5)) return NewEWMA(1 - math.Exp(-5.0/60.0/5))
} }
// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. // NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
func NewEWMA15() EWMA { func NewEWMA15() *EWMA {
return NewEWMA(1 - math.Exp(-5.0/60.0/15)) return NewEWMA(1 - math.Exp(-5.0/60.0/15))
} }
// ewmaSnapshot is a read-only copy of another EWMA. // EWMA continuously calculate an exponentially-weighted moving average
type ewmaSnapshot float64 // based on an outside source of clock ticks.
type EWMA struct {
// Rate returns the rate of events per second at the time the snapshot was
// taken.
func (a ewmaSnapshot) Rate() float64 { return float64(a) }
// NilEWMA is a no-op EWMA.
type NilEWMA struct{}
func (NilEWMA) Snapshot() EWMASnapshot { return (*emptySnapshot)(nil) }
func (NilEWMA) Tick() {}
func (NilEWMA) Update(n int64) {}
// StandardEWMA is the standard implementation of an EWMA and tracks the number
// of uncounted events and processes them on each tick. It uses the
// sync/atomic package to manage uncounted events.
type StandardEWMA struct {
uncounted atomic.Int64 uncounted atomic.Int64
alpha float64 alpha float64
rate atomic.Uint64 rate atomic.Uint64
@ -65,27 +45,27 @@ type StandardEWMA struct {
} }
// Snapshot returns a read-only copy of the EWMA. // Snapshot returns a read-only copy of the EWMA.
func (a *StandardEWMA) Snapshot() EWMASnapshot { func (a *EWMA) Snapshot() EWMASnapshot {
r := math.Float64frombits(a.rate.Load()) * float64(time.Second) r := math.Float64frombits(a.rate.Load()) * float64(time.Second)
return ewmaSnapshot(r) return EWMASnapshot(r)
} }
// Tick ticks the clock to update the moving average. It assumes it is called // tick ticks the clock to update the moving average. It assumes it is called
// every five seconds. // every five seconds.
func (a *StandardEWMA) Tick() { func (a *EWMA) tick() {
// Optimization to avoid mutex locking in the hot-path. // Optimization to avoid mutex locking in the hot-path.
if a.init.Load() { if a.init.Load() {
a.updateRate(a.fetchInstantRate()) a.updateRate(a.fetchInstantRate())
return return
} }
// Slow-path: this is only needed on the first Tick() and preserves transactional updating // Slow-path: this is only needed on the first tick() and preserves transactional updating
// of init and rate in the else block. The first conditional is needed below because // of init and rate in the else block. The first conditional is needed below because
// a different thread could have set a.init = 1 between the time of the first atomic load and when // a different thread could have set a.init = 1 between the time of the first atomic load and when
// the lock was acquired. // the lock was acquired.
a.mutex.Lock() a.mutex.Lock()
if a.init.Load() { if a.init.Load() {
// The fetchInstantRate() uses atomic loading, which is unnecessary in this critical section // The fetchInstantRate() uses atomic loading, which is unnecessary in this critical section
// but again, this section is only invoked on the first successful Tick() operation. // but again, this section is only invoked on the first successful tick() operation.
a.updateRate(a.fetchInstantRate()) a.updateRate(a.fetchInstantRate())
} else { } else {
a.init.Store(true) a.init.Store(true)
@ -94,18 +74,18 @@ func (a *StandardEWMA) Tick() {
a.mutex.Unlock() a.mutex.Unlock()
} }
func (a *StandardEWMA) fetchInstantRate() float64 { func (a *EWMA) fetchInstantRate() float64 {
count := a.uncounted.Swap(0) count := a.uncounted.Swap(0)
return float64(count) / float64(5*time.Second) return float64(count) / float64(5*time.Second)
} }
func (a *StandardEWMA) updateRate(instantRate float64) { func (a *EWMA) updateRate(instantRate float64) {
currentRate := math.Float64frombits(a.rate.Load()) currentRate := math.Float64frombits(a.rate.Load())
currentRate += a.alpha * (instantRate - currentRate) currentRate += a.alpha * (instantRate - currentRate)
a.rate.Store(math.Float64bits(currentRate)) a.rate.Store(math.Float64bits(currentRate))
} }
// Update adds n uncounted events. // Update adds n uncounted events.
func (a *StandardEWMA) Update(n int64) { func (a *EWMA) Update(n int64) {
a.uncounted.Add(n) a.uncounted.Add(n)
} }

View file

@ -12,7 +12,7 @@ func BenchmarkEWMA(b *testing.B) {
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
a.Update(1) a.Update(1)
a.Tick() a.tick()
} }
} }
@ -23,7 +23,7 @@ func BenchmarkEWMAParallel(b *testing.B) {
b.RunParallel(func(pb *testing.PB) { b.RunParallel(func(pb *testing.PB) {
for pb.Next() { for pb.Next() {
a.Update(1) a.Update(1)
a.Tick() a.tick()
} }
}) })
} }
@ -31,7 +31,7 @@ func BenchmarkEWMAParallel(b *testing.B) {
func TestEWMA1(t *testing.T) { func TestEWMA1(t *testing.T) {
a := NewEWMA1() a := NewEWMA1()
a.Update(3) a.Update(3)
a.Tick() a.tick()
for i, want := range []float64{0.6, for i, want := range []float64{0.6,
0.22072766470286553, 0.08120116994196772, 0.029872241020718428, 0.22072766470286553, 0.08120116994196772, 0.029872241020718428,
0.01098938333324054, 0.004042768199451294, 0.0014872513059998212, 0.01098938333324054, 0.004042768199451294, 0.0014872513059998212,
@ -49,7 +49,7 @@ func TestEWMA1(t *testing.T) {
func TestEWMA5(t *testing.T) { func TestEWMA5(t *testing.T) {
a := NewEWMA5() a := NewEWMA5()
a.Update(3) a.Update(3)
a.Tick() a.tick()
for i, want := range []float64{ for i, want := range []float64{
0.6, 0.49123845184678905, 0.4021920276213837, 0.32928698165641596, 0.6, 0.49123845184678905, 0.4021920276213837, 0.32928698165641596,
0.269597378470333, 0.2207276647028654, 0.18071652714732128, 0.269597378470333, 0.2207276647028654, 0.18071652714732128,
@ -67,7 +67,7 @@ func TestEWMA5(t *testing.T) {
func TestEWMA15(t *testing.T) { func TestEWMA15(t *testing.T) {
a := NewEWMA15() a := NewEWMA15()
a.Update(3) a.Update(3)
a.Tick() a.tick()
for i, want := range []float64{ for i, want := range []float64{
0.6, 0.5613041910189706, 0.5251039914257684, 0.4912384518467888184678905, 0.6, 0.5613041910189706, 0.5251039914257684, 0.4912384518467888184678905,
0.459557003018789, 0.4299187863442732, 0.4021920276213831, 0.459557003018789, 0.4299187863442732, 0.4021920276213831,
@ -82,8 +82,8 @@ func TestEWMA15(t *testing.T) {
} }
} }
func elapseMinute(a EWMA) { func elapseMinute(a *EWMA) {
for i := 0; i < 12; i++ { for i := 0; i < 12; i++ {
a.Tick() a.tick()
} }
} }

View file

@ -146,7 +146,7 @@ func (exp *exp) publishHistogram(name string, metric metrics.Histogram) {
exp.getFloat(name + ".999-percentile").Set(ps[4]) exp.getFloat(name + ".999-percentile").Set(ps[4])
} }
func (exp *exp) publishMeter(name string, metric metrics.Meter) { func (exp *exp) publishMeter(name string, metric *metrics.Meter) {
m := metric.Snapshot() m := metric.Snapshot()
exp.getInt(name + ".count").Set(m.Count()) exp.getInt(name + ".count").Set(m.Count())
exp.getFloat(name + ".one-minute").Set(m.Rate1()) exp.getFloat(name + ".one-minute").Set(m.Rate1())
@ -155,7 +155,7 @@ func (exp *exp) publishMeter(name string, metric metrics.Meter) {
exp.getFloat(name + ".mean").Set(m.RateMean()) exp.getFloat(name + ".mean").Set(m.RateMean())
} }
func (exp *exp) publishTimer(name string, metric metrics.Timer) { func (exp *exp) publishTimer(name string, metric *metrics.Timer) {
t := metric.Snapshot() t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
exp.getInt(name + ".count").Set(t.Count()) exp.getInt(name + ".count").Set(t.Count())
@ -174,7 +174,7 @@ func (exp *exp) publishTimer(name string, metric metrics.Timer) {
exp.getFloat(name + ".mean-rate").Set(t.RateMean()) exp.getFloat(name + ".mean-rate").Set(t.RateMean())
} }
func (exp *exp) publishResettingTimer(name string, metric metrics.ResettingTimer) { func (exp *exp) publishResettingTimer(name string, metric *metrics.ResettingTimer) {
t := metric.Snapshot() t := metric.Snapshot()
ps := t.Percentiles([]float64{0.50, 0.75, 0.95, 0.99}) ps := t.Percentiles([]float64{0.50, 0.75, 0.95, 0.99})
exp.getInt(name + ".count").Set(int64(t.Count())) exp.getInt(name + ".count").Set(int64(t.Count()))
@ -188,23 +188,23 @@ func (exp *exp) publishResettingTimer(name string, metric metrics.ResettingTimer
func (exp *exp) syncToExpvar() { func (exp *exp) syncToExpvar() {
exp.registry.Each(func(name string, i interface{}) { exp.registry.Each(func(name string, i interface{}) {
switch i := i.(type) { switch i := i.(type) {
case metrics.Counter: case *metrics.Counter:
exp.publishCounter(name, i.Snapshot()) exp.publishCounter(name, i.Snapshot())
case metrics.CounterFloat64: case *metrics.CounterFloat64:
exp.publishCounterFloat64(name, i.Snapshot()) exp.publishCounterFloat64(name, i.Snapshot())
case metrics.Gauge: case *metrics.Gauge:
exp.publishGauge(name, i.Snapshot()) exp.publishGauge(name, i.Snapshot())
case metrics.GaugeFloat64: case *metrics.GaugeFloat64:
exp.publishGaugeFloat64(name, i.Snapshot()) exp.publishGaugeFloat64(name, i.Snapshot())
case metrics.GaugeInfo: case *metrics.GaugeInfo:
exp.publishGaugeInfo(name, i.Snapshot()) exp.publishGaugeInfo(name, i.Snapshot())
case metrics.Histogram: case metrics.Histogram:
exp.publishHistogram(name, i) exp.publishHistogram(name, i)
case metrics.Meter: case *metrics.Meter:
exp.publishMeter(name, i) exp.publishMeter(name, i)
case metrics.Timer: case *metrics.Timer:
exp.publishTimer(name, i) exp.publishTimer(name, i)
case metrics.ResettingTimer: case *metrics.ResettingTimer:
exp.publishResettingTimer(name, i) exp.publishResettingTimer(name, i)
default: default:
panic(fmt.Sprintf("unsupported type for '%s': %T", name, i)) panic(fmt.Sprintf("unsupported type for '%s': %T", name, i))

View file

@ -2,97 +2,69 @@ package metrics
import "sync/atomic" import "sync/atomic"
// GaugeSnapshot contains a readonly int64. // GaugeSnapshot is a read-only copy of a Gauge.
type GaugeSnapshot interface { type GaugeSnapshot int64
Value() int64
}
// Gauge holds an int64 value that can be set arbitrarily. // Value returns the value at the time the snapshot was taken.
type Gauge interface { func (g GaugeSnapshot) Value() int64 { return int64(g) }
Snapshot() GaugeSnapshot
Update(int64)
UpdateIfGt(int64)
Dec(int64)
Inc(int64)
}
// GetOrRegisterGauge returns an existing Gauge or constructs and registers a // GetOrRegisterGauge returns an existing Gauge or constructs and registers a
// new StandardGauge. // new Gauge.
func GetOrRegisterGauge(name string, r Registry) Gauge { func GetOrRegisterGauge(name string, r Registry) *Gauge {
if nil == r { if r == nil {
r = DefaultRegistry r = DefaultRegistry
} }
return r.GetOrRegister(name, NewGauge).(Gauge) return r.GetOrRegister(name, NewGauge).(*Gauge)
} }
// NewGauge constructs a new StandardGauge. // NewGauge constructs a new Gauge.
func NewGauge() Gauge { func NewGauge() *Gauge {
if !Enabled { return &Gauge{}
return NilGauge{}
}
return &StandardGauge{}
} }
// NewRegisteredGauge constructs and registers a new StandardGauge. // NewRegisteredGauge constructs and registers a new Gauge.
func NewRegisteredGauge(name string, r Registry) Gauge { func NewRegisteredGauge(name string, r Registry) *Gauge {
c := NewGauge() c := NewGauge()
if nil == r { if r == nil {
r = DefaultRegistry r = DefaultRegistry
} }
r.Register(name, c) r.Register(name, c)
return c return c
} }
// gaugeSnapshot is a read-only copy of another Gauge. // Gauge holds an int64 value that can be set arbitrarily.
type gaugeSnapshot int64 type Gauge atomic.Int64
// Value returns the value at the time the snapshot was taken.
func (g gaugeSnapshot) Value() int64 { return int64(g) }
// NilGauge is a no-op Gauge.
type NilGauge struct{}
func (NilGauge) Snapshot() GaugeSnapshot { return (*emptySnapshot)(nil) }
func (NilGauge) Update(v int64) {}
func (NilGauge) UpdateIfGt(v int64) {}
func (NilGauge) Dec(i int64) {}
func (NilGauge) Inc(i int64) {}
// StandardGauge is the standard implementation of a Gauge and uses the
// sync/atomic package to manage a single int64 value.
type StandardGauge struct {
value atomic.Int64
}
// Snapshot returns a read-only copy of the gauge. // Snapshot returns a read-only copy of the gauge.
func (g *StandardGauge) Snapshot() GaugeSnapshot { func (g *Gauge) Snapshot() GaugeSnapshot {
return gaugeSnapshot(g.value.Load()) return GaugeSnapshot((*atomic.Int64)(g).Load())
} }
// Update updates the gauge's value. // Update updates the gauge's value.
func (g *StandardGauge) Update(v int64) { func (g *Gauge) Update(v int64) {
g.value.Store(v) (*atomic.Int64)(g).Store(v)
} }
// UpdateIfGt updates the gauge's value if v is larger then the current value. // UpdateIfGt updates the gauge's value if v is larger then the current value.
func (g *StandardGauge) UpdateIfGt(v int64) { func (g *Gauge) UpdateIfGt(v int64) {
value := (*atomic.Int64)(g)
for { for {
exist := g.value.Load() exist := value.Load()
if exist >= v { if exist >= v {
break break
} }
if g.value.CompareAndSwap(exist, v) { if value.CompareAndSwap(exist, v) {
break break
} }
} }
} }
// Dec decrements the gauge's current value by the given amount. // Dec decrements the gauge's current value by the given amount.
func (g *StandardGauge) Dec(i int64) { func (g *Gauge) Dec(i int64) {
g.value.Add(-i) (*atomic.Int64)(g).Add(-i)
} }
// Inc increments the gauge's current value by the given amount. // Inc increments the gauge's current value by the given amount.
func (g *StandardGauge) Inc(i int64) { func (g *Gauge) Inc(i int64) {
g.value.Add(i) (*atomic.Int64)(g).Add(i)
} }

View file

@ -5,35 +5,28 @@ import (
"sync/atomic" "sync/atomic"
) )
type GaugeFloat64Snapshot interface {
Value() float64
}
// GaugeFloat64 hold a float64 value that can be set arbitrarily.
type GaugeFloat64 interface {
Snapshot() GaugeFloat64Snapshot
Update(float64)
}
// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a // GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
// new StandardGaugeFloat64. // new GaugeFloat64.
func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { func GetOrRegisterGaugeFloat64(name string, r Registry) *GaugeFloat64 {
if nil == r { if nil == r {
r = DefaultRegistry r = DefaultRegistry
} }
return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) return r.GetOrRegister(name, NewGaugeFloat64()).(*GaugeFloat64)
} }
// NewGaugeFloat64 constructs a new StandardGaugeFloat64. // GaugeFloat64Snapshot is a read-only copy of a GaugeFloat64.
func NewGaugeFloat64() GaugeFloat64 { type GaugeFloat64Snapshot float64
if !Enabled {
return NilGaugeFloat64{} // Value returns the value at the time the snapshot was taken.
} func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
return &StandardGaugeFloat64{}
// NewGaugeFloat64 constructs a new GaugeFloat64.
func NewGaugeFloat64() *GaugeFloat64 {
return new(GaugeFloat64)
} }
// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. // NewRegisteredGaugeFloat64 constructs and registers a new GaugeFloat64.
func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { func NewRegisteredGaugeFloat64(name string, r Registry) *GaugeFloat64 {
c := NewGaugeFloat64() c := NewGaugeFloat64()
if nil == r { if nil == r {
r = DefaultRegistry r = DefaultRegistry
@ -42,32 +35,16 @@ func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
return c return c
} }
// gaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. // GaugeFloat64 hold a float64 value that can be set arbitrarily.
type gaugeFloat64Snapshot float64 type GaugeFloat64 atomic.Uint64
// Value returns the value at the time the snapshot was taken.
func (g gaugeFloat64Snapshot) Value() float64 { return float64(g) }
// NilGaugeFloat64 is a no-op Gauge.
type NilGaugeFloat64 struct{}
func (NilGaugeFloat64) Snapshot() GaugeFloat64Snapshot { return NilGaugeFloat64{} }
func (NilGaugeFloat64) Update(v float64) {}
func (NilGaugeFloat64) Value() float64 { return 0.0 }
// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
// atomic to manage a single float64 value.
type StandardGaugeFloat64 struct {
floatBits atomic.Uint64
}
// Snapshot returns a read-only copy of the gauge. // Snapshot returns a read-only copy of the gauge.
func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64Snapshot { func (g *GaugeFloat64) Snapshot() GaugeFloat64Snapshot {
v := math.Float64frombits(g.floatBits.Load()) v := math.Float64frombits((*atomic.Uint64)(g).Load())
return gaugeFloat64Snapshot(v) return GaugeFloat64Snapshot(v)
} }
// Update updates the gauge's value. // Update updates the gauge's value.
func (g *StandardGaugeFloat64) Update(v float64) { func (g *GaugeFloat64) Update(v float64) {
g.floatBits.Store(math.Float64bits(v)) (*atomic.Uint64)(g).Store(math.Float64bits(v))
} }

View file

@ -5,16 +5,6 @@ import (
"sync" "sync"
) )
type GaugeInfoSnapshot interface {
Value() GaugeInfoValue
}
// GaugeInfo holds a GaugeInfoValue value that can be set arbitrarily.
type GaugeInfo interface {
Update(GaugeInfoValue)
Snapshot() GaugeInfoSnapshot
}
// GaugeInfoValue is a mapping of keys to values // GaugeInfoValue is a mapping of keys to values
type GaugeInfoValue map[string]string type GaugeInfoValue map[string]string
@ -24,26 +14,23 @@ func (val GaugeInfoValue) String() string {
} }
// GetOrRegisterGaugeInfo returns an existing GaugeInfo or constructs and registers a // GetOrRegisterGaugeInfo returns an existing GaugeInfo or constructs and registers a
// new StandardGaugeInfo. // new GaugeInfo.
func GetOrRegisterGaugeInfo(name string, r Registry) GaugeInfo { func GetOrRegisterGaugeInfo(name string, r Registry) *GaugeInfo {
if nil == r { if nil == r {
r = DefaultRegistry r = DefaultRegistry
} }
return r.GetOrRegister(name, NewGaugeInfo()).(GaugeInfo) return r.GetOrRegister(name, NewGaugeInfo()).(*GaugeInfo)
} }
// NewGaugeInfo constructs a new StandardGaugeInfo. // NewGaugeInfo constructs a new GaugeInfo.
func NewGaugeInfo() GaugeInfo { func NewGaugeInfo() *GaugeInfo {
if !Enabled { return &GaugeInfo{
return NilGaugeInfo{}
}
return &StandardGaugeInfo{
value: GaugeInfoValue{}, value: GaugeInfoValue{},
} }
} }
// NewRegisteredGaugeInfo constructs and registers a new StandardGaugeInfo. // NewRegisteredGaugeInfo constructs and registers a new GaugeInfo.
func NewRegisteredGaugeInfo(name string, r Registry) GaugeInfo { func NewRegisteredGaugeInfo(name string, r Registry) *GaugeInfo {
c := NewGaugeInfo() c := NewGaugeInfo()
if nil == r { if nil == r {
r = DefaultRegistry r = DefaultRegistry
@ -53,31 +40,24 @@ func NewRegisteredGaugeInfo(name string, r Registry) GaugeInfo {
} }
// gaugeInfoSnapshot is a read-only copy of another GaugeInfo. // gaugeInfoSnapshot is a read-only copy of another GaugeInfo.
type gaugeInfoSnapshot GaugeInfoValue type GaugeInfoSnapshot GaugeInfoValue
// Value returns the value at the time the snapshot was taken. // Value returns the value at the time the snapshot was taken.
func (g gaugeInfoSnapshot) Value() GaugeInfoValue { return GaugeInfoValue(g) } func (g GaugeInfoSnapshot) Value() GaugeInfoValue { return GaugeInfoValue(g) }
type NilGaugeInfo struct{} // GaugeInfo maintains a set of key/value mappings.
type GaugeInfo struct {
func (NilGaugeInfo) Snapshot() GaugeInfoSnapshot { return NilGaugeInfo{} }
func (NilGaugeInfo) Update(v GaugeInfoValue) {}
func (NilGaugeInfo) Value() GaugeInfoValue { return GaugeInfoValue{} }
// StandardGaugeInfo is the standard implementation of a GaugeInfo and uses
// sync.Mutex to manage a single string value.
type StandardGaugeInfo struct {
mutex sync.Mutex mutex sync.Mutex
value GaugeInfoValue value GaugeInfoValue
} }
// Snapshot returns a read-only copy of the gauge. // Snapshot returns a read-only copy of the gauge.
func (g *StandardGaugeInfo) Snapshot() GaugeInfoSnapshot { func (g *GaugeInfo) Snapshot() GaugeInfoSnapshot {
return gaugeInfoSnapshot(g.value) return GaugeInfoSnapshot(g.value)
} }
// Update updates the gauge's value. // Update updates the gauge's value.
func (g *StandardGaugeInfo) Update(v GaugeInfoValue) { func (g *GaugeInfo) Update(v GaugeInfoValue) {
g.mutex.Lock() g.mutex.Lock()
defer g.mutex.Unlock() defer g.mutex.Unlock()
g.value = v g.value = v

View file

@ -1,117 +0,0 @@
package metrics
import (
"bufio"
"fmt"
"log"
"net"
"strconv"
"strings"
"time"
)
// GraphiteConfig provides a container with configuration parameters for
// the Graphite exporter
type GraphiteConfig struct {
Addr *net.TCPAddr // Network address to connect to
Registry Registry // Registry to be exported
FlushInterval time.Duration // Flush interval
DurationUnit time.Duration // Time conversion unit for durations
Prefix string // Prefix to be prepended to metric names
Percentiles []float64 // Percentiles to export from timers and histograms
}
// Graphite is a blocking exporter function which reports metrics in r
// to a graphite server located at addr, flushing them every d duration
// and prepending metric names with prefix.
func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
GraphiteWithConfig(GraphiteConfig{
Addr: addr,
Registry: r,
FlushInterval: d,
DurationUnit: time.Nanosecond,
Prefix: prefix,
Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999},
})
}
// GraphiteWithConfig is a blocking exporter function just like Graphite,
// but it takes a GraphiteConfig instead.
func GraphiteWithConfig(c GraphiteConfig) {
log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
for range time.Tick(c.FlushInterval) {
if err := graphite(&c); nil != err {
log.Println(err)
}
}
}
// GraphiteOnce performs a single submission to Graphite, returning a
// non-nil error on failed connections. This can be used in a loop
// similar to GraphiteWithConfig for custom error handling.
func GraphiteOnce(c GraphiteConfig) error {
log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
return graphite(&c)
}
func graphite(c *GraphiteConfig) error {
now := time.Now().Unix()
du := float64(c.DurationUnit)
conn, err := net.DialTCP("tcp", nil, c.Addr)
if nil != err {
return err
}
defer conn.Close()
w := bufio.NewWriter(conn)
c.Registry.Each(func(name string, i interface{}) {
switch metric := i.(type) {
case Counter:
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Snapshot().Count(), now)
case CounterFloat64:
fmt.Fprintf(w, "%s.%s.count %f %d\n", c.Prefix, name, metric.Snapshot().Count(), now)
case Gauge:
fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Snapshot().Value(), now)
case GaugeFloat64:
fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Snapshot().Value(), now)
case GaugeInfo:
fmt.Fprintf(w, "%s.%s.value %s %d\n", c.Prefix, name, metric.Snapshot().Value().String(), now)
case Histogram:
h := metric.Snapshot()
ps := h.Percentiles(c.Percentiles)
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
for psIdx, psKey := range c.Percentiles {
key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
}
case Meter:
m := metric.Snapshot()
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
case Timer:
t := metric.Snapshot()
ps := t.Percentiles(c.Percentiles)
fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
for psIdx, psKey := range c.Percentiles {
key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
}
fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
}
w.Flush()
})
return nil
}

View file

@ -1,22 +0,0 @@
package metrics
import (
"net"
"time"
)
func ExampleGraphite() {
addr, _ := net.ResolveTCPAddr("net", ":2003")
go Graphite(DefaultRegistry, 1*time.Second, "some.prefix", addr)
}
func ExampleGraphiteWithConfig() {
addr, _ := net.ResolveTCPAddr("net", ":2003")
go GraphiteWithConfig(GraphiteConfig{
Addr: addr,
Registry: DefaultRegistry,
FlushInterval: 1 * time.Second,
DurationUnit: time.Millisecond,
Percentiles: []float64{0.5, 0.75, 0.99, 0.999},
})
}

View file

@ -1,61 +1,35 @@
package metrics package metrics
// Healthcheck holds an error value describing an arbitrary up/down status.
type Healthcheck interface {
Check()
Error() error
Healthy()
Unhealthy(error)
}
// NewHealthcheck constructs a new Healthcheck which will use the given // NewHealthcheck constructs a new Healthcheck which will use the given
// function to update its status. // function to update its status.
func NewHealthcheck(f func(Healthcheck)) Healthcheck { func NewHealthcheck(f func(*Healthcheck)) *Healthcheck {
if !Enabled { return &Healthcheck{nil, f}
return NilHealthcheck{}
}
return &StandardHealthcheck{nil, f}
} }
// NilHealthcheck is a no-op. // Healthcheck is the standard implementation of a Healthcheck and
type NilHealthcheck struct{}
// Check is a no-op.
func (NilHealthcheck) Check() {}
// Error is a no-op.
func (NilHealthcheck) Error() error { return nil }
// Healthy is a no-op.
func (NilHealthcheck) Healthy() {}
// Unhealthy is a no-op.
func (NilHealthcheck) Unhealthy(error) {}
// StandardHealthcheck is the standard implementation of a Healthcheck and
// stores the status and a function to call to update the status. // stores the status and a function to call to update the status.
type StandardHealthcheck struct { type Healthcheck struct {
err error err error
f func(Healthcheck) f func(*Healthcheck)
} }
// Check runs the healthcheck function to update the healthcheck's status. // Check runs the healthcheck function to update the healthcheck's status.
func (h *StandardHealthcheck) Check() { func (h *Healthcheck) Check() {
h.f(h) h.f(h)
} }
// Error returns the healthcheck's status, which will be nil if it is healthy. // Error returns the healthcheck's status, which will be nil if it is healthy.
func (h *StandardHealthcheck) Error() error { func (h *Healthcheck) Error() error {
return h.err return h.err
} }
// Healthy marks the healthcheck as healthy. // Healthy marks the healthcheck as healthy.
func (h *StandardHealthcheck) Healthy() { func (h *Healthcheck) Healthy() {
h.err = nil h.err = nil
} }
// Unhealthy marks the healthcheck as unhealthy. The error is stored and // Unhealthy marks the healthcheck as unhealthy. The error is stored and
// may be retrieved by the Error method. // may be retrieved by the Error method.
func (h *StandardHealthcheck) Unhealthy(err error) { func (h *Healthcheck) Unhealthy(err error) {
h.err = err h.err = err
} }

View file

@ -1,7 +1,16 @@
package metrics package metrics
type HistogramSnapshot interface { type HistogramSnapshot interface {
SampleSnapshot Count() int64
Max() int64
Mean() float64
Min() int64
Percentile(float64) float64
Percentiles([]float64) []float64
Size() int
StdDev() float64
Sum() int64
Variance() float64
} }
// Histogram calculates distribution statistics from a series of int64 values. // Histogram calculates distribution statistics from a series of int64 values.
@ -31,10 +40,7 @@ func GetOrRegisterHistogramLazy(name string, r Registry, s func() Sample) Histog
// NewHistogram constructs a new StandardHistogram from a Sample. // NewHistogram constructs a new StandardHistogram from a Sample.
func NewHistogram(s Sample) Histogram { func NewHistogram(s Sample) Histogram {
if !Enabled { return &StandardHistogram{s}
return NilHistogram{}
}
return &StandardHistogram{sample: s}
} }
// NewRegisteredHistogram constructs and registers a new StandardHistogram from // NewRegisteredHistogram constructs and registers a new StandardHistogram from
@ -48,13 +54,6 @@ func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
return c return c
} }
// NilHistogram is a no-op Histogram.
type NilHistogram struct{}
func (NilHistogram) Clear() {}
func (NilHistogram) Snapshot() HistogramSnapshot { return (*emptySnapshot)(nil) }
func (NilHistogram) Update(v int64) {}
// StandardHistogram is the standard implementation of a Histogram and uses a // StandardHistogram is the standard implementation of a Histogram and uses a
// Sample to bound its memory use. // Sample to bound its memory use.
type StandardHistogram struct { type StandardHistogram struct {

View file

@ -1,48 +0,0 @@
// Copyright 2023 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package metrics
// compile-time checks that interfaces are implemented.
var (
_ SampleSnapshot = (*emptySnapshot)(nil)
_ HistogramSnapshot = (*emptySnapshot)(nil)
_ CounterSnapshot = (*emptySnapshot)(nil)
_ GaugeSnapshot = (*emptySnapshot)(nil)
_ MeterSnapshot = (*emptySnapshot)(nil)
_ EWMASnapshot = (*emptySnapshot)(nil)
_ TimerSnapshot = (*emptySnapshot)(nil)
)
type emptySnapshot struct{}
func (*emptySnapshot) Count() int64 { return 0 }
func (*emptySnapshot) Max() int64 { return 0 }
func (*emptySnapshot) Mean() float64 { return 0.0 }
func (*emptySnapshot) Min() int64 { return 0 }
func (*emptySnapshot) Percentile(p float64) float64 { return 0.0 }
func (*emptySnapshot) Percentiles(ps []float64) []float64 { return make([]float64, len(ps)) }
func (*emptySnapshot) Size() int { return 0 }
func (*emptySnapshot) StdDev() float64 { return 0.0 }
func (*emptySnapshot) Sum() int64 { return 0 }
func (*emptySnapshot) Values() []int64 { return []int64{} }
func (*emptySnapshot) Variance() float64 { return 0.0 }
func (*emptySnapshot) Value() int64 { return 0 }
func (*emptySnapshot) Rate() float64 { return 0.0 }
func (*emptySnapshot) Rate1() float64 { return 0.0 }
func (*emptySnapshot) Rate5() float64 { return 0.0 }
func (*emptySnapshot) Rate15() float64 { return 0.0 }
func (*emptySnapshot) RateMean() float64 { return 0.0 }

View file

@ -8,31 +8,31 @@ import (
func readMeter(namespace, name string, i interface{}) (string, map[string]interface{}) { func readMeter(namespace, name string, i interface{}) (string, map[string]interface{}) {
switch metric := i.(type) { switch metric := i.(type) {
case metrics.Counter: case *metrics.Counter:
measurement := fmt.Sprintf("%s%s.count", namespace, name) measurement := fmt.Sprintf("%s%s.count", namespace, name)
fields := map[string]interface{}{ fields := map[string]interface{}{
"value": metric.Snapshot().Count(), "value": metric.Snapshot().Count(),
} }
return measurement, fields return measurement, fields
case metrics.CounterFloat64: case *metrics.CounterFloat64:
measurement := fmt.Sprintf("%s%s.count", namespace, name) measurement := fmt.Sprintf("%s%s.count", namespace, name)
fields := map[string]interface{}{ fields := map[string]interface{}{
"value": metric.Snapshot().Count(), "value": metric.Snapshot().Count(),
} }
return measurement, fields return measurement, fields
case metrics.Gauge: case *metrics.Gauge:
measurement := fmt.Sprintf("%s%s.gauge", namespace, name) measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
fields := map[string]interface{}{ fields := map[string]interface{}{
"value": metric.Snapshot().Value(), "value": metric.Snapshot().Value(),
} }
return measurement, fields return measurement, fields
case metrics.GaugeFloat64: case *metrics.GaugeFloat64:
measurement := fmt.Sprintf("%s%s.gauge", namespace, name) measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
fields := map[string]interface{}{ fields := map[string]interface{}{
"value": metric.Snapshot().Value(), "value": metric.Snapshot().Value(),
} }
return measurement, fields return measurement, fields
case metrics.GaugeInfo: case *metrics.GaugeInfo:
ms := metric.Snapshot() ms := metric.Snapshot()
measurement := fmt.Sprintf("%s%s.gauge", namespace, name) measurement := fmt.Sprintf("%s%s.gauge", namespace, name)
fields := map[string]interface{}{ fields := map[string]interface{}{
@ -62,7 +62,7 @@ func readMeter(namespace, name string, i interface{}) (string, map[string]interf
"p9999": ps[6], "p9999": ps[6],
} }
return measurement, fields return measurement, fields
case metrics.Meter: case *metrics.Meter:
ms := metric.Snapshot() ms := metric.Snapshot()
measurement := fmt.Sprintf("%s%s.meter", namespace, name) measurement := fmt.Sprintf("%s%s.meter", namespace, name)
fields := map[string]interface{}{ fields := map[string]interface{}{
@ -73,7 +73,7 @@ func readMeter(namespace, name string, i interface{}) (string, map[string]interf
"mean": ms.RateMean(), "mean": ms.RateMean(),
} }
return measurement, fields return measurement, fields
case metrics.Timer: case *metrics.Timer:
ms := metric.Snapshot() ms := metric.Snapshot()
ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}) ps := ms.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999})
@ -97,7 +97,7 @@ func readMeter(namespace, name string, i interface{}) (string, map[string]interf
"meanrate": ms.RateMean(), "meanrate": ms.RateMean(),
} }
return measurement, fields return measurement, fields
case metrics.ResettingTimer: case *metrics.ResettingTimer:
ms := metric.Snapshot() ms := metric.Snapshot()
if ms.Count() == 0 { if ms.Count() == 0 {
break break

View file

@ -33,7 +33,7 @@ import (
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
metrics.Enabled = true metrics.Enable()
os.Exit(m.Run()) os.Exit(m.Run())
} }

View file

@ -1,5 +1,5 @@
package metrics package metrics
func init() { func init() {
Enabled = true metricsEnabled = true
} }

View file

@ -21,25 +21,21 @@ func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
for range time.Tick(freq) { for range time.Tick(freq) {
r.Each(func(name string, i interface{}) { r.Each(func(name string, i interface{}) {
switch metric := i.(type) { switch metric := i.(type) {
case Counter: case *Counter:
l.Printf("counter %s\n", name) l.Printf("counter %s\n", name)
l.Printf(" count: %9d\n", metric.Snapshot().Count()) l.Printf(" count: %9d\n", metric.Snapshot().Count())
case CounterFloat64: case *CounterFloat64:
l.Printf("counter %s\n", name) l.Printf("counter %s\n", name)
l.Printf(" count: %f\n", metric.Snapshot().Count()) l.Printf(" count: %f\n", metric.Snapshot().Count())
case Gauge: case *Gauge:
l.Printf("gauge %s\n", name) l.Printf("gauge %s\n", name)
l.Printf(" value: %9d\n", metric.Snapshot().Value()) l.Printf(" value: %9d\n", metric.Snapshot().Value())
case GaugeFloat64: case *GaugeFloat64:
l.Printf("gauge %s\n", name) l.Printf("gauge %s\n", name)
l.Printf(" value: %f\n", metric.Snapshot().Value()) l.Printf(" value: %f\n", metric.Snapshot().Value())
case GaugeInfo: case *GaugeInfo:
l.Printf("gauge %s\n", name) l.Printf("gauge %s\n", name)
l.Printf(" value: %s\n", metric.Snapshot().Value()) l.Printf(" value: %s\n", metric.Snapshot().Value())
case Healthcheck:
metric.Check()
l.Printf("healthcheck %s\n", name)
l.Printf(" error: %v\n", metric.Error())
case Histogram: case Histogram:
h := metric.Snapshot() h := metric.Snapshot()
ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
@ -54,7 +50,7 @@ func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
l.Printf(" 95%%: %12.2f\n", ps[2]) l.Printf(" 95%%: %12.2f\n", ps[2])
l.Printf(" 99%%: %12.2f\n", ps[3]) l.Printf(" 99%%: %12.2f\n", ps[3])
l.Printf(" 99.9%%: %12.2f\n", ps[4]) l.Printf(" 99.9%%: %12.2f\n", ps[4])
case Meter: case *Meter:
m := metric.Snapshot() m := metric.Snapshot()
l.Printf("meter %s\n", name) l.Printf("meter %s\n", name)
l.Printf(" count: %9d\n", m.Count()) l.Printf(" count: %9d\n", m.Count())
@ -62,7 +58,7 @@ func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) l.Printf(" 5-min rate: %12.2f\n", m.Rate5())
l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) l.Printf(" 15-min rate: %12.2f\n", m.Rate15())
l.Printf(" mean rate: %12.2f\n", m.RateMean()) l.Printf(" mean rate: %12.2f\n", m.RateMean())
case Timer: case *Timer:
t := metric.Snapshot() t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
l.Printf("timer %s\n", name) l.Printf("timer %s\n", name)

View file

@ -7,114 +7,78 @@ import (
"time" "time"
) )
type MeterSnapshot interface {
Count() int64
Rate1() float64
Rate5() float64
Rate15() float64
RateMean() float64
}
// Meters count events to produce exponentially-weighted moving average rates
// at one-, five-, and fifteen-minutes and a mean rate.
type Meter interface {
Mark(int64)
Snapshot() MeterSnapshot
Stop()
}
// GetOrRegisterMeter returns an existing Meter or constructs and registers a // GetOrRegisterMeter returns an existing Meter or constructs and registers a
// new StandardMeter. // new Meter.
// Be sure to unregister the meter from the registry once it is of no use to // Be sure to unregister the meter from the registry once it is of no use to
// allow for garbage collection. // allow for garbage collection.
func GetOrRegisterMeter(name string, r Registry) Meter { func GetOrRegisterMeter(name string, r Registry) *Meter {
if nil == r { if r == nil {
r = DefaultRegistry r = DefaultRegistry
} }
return r.GetOrRegister(name, NewMeter).(Meter) return r.GetOrRegister(name, NewMeter).(*Meter)
} }
// NewMeter constructs a new StandardMeter and launches a goroutine. // NewMeter constructs a new Meter and launches a goroutine.
// Be sure to call Stop() once the meter is of no use to allow for garbage collection. // Be sure to call Stop() once the meter is of no use to allow for garbage collection.
func NewMeter() Meter { func NewMeter() *Meter {
if !Enabled { m := newMeter()
return NilMeter{} arbiter.add(m)
}
m := newStandardMeter()
arbiter.Lock()
defer arbiter.Unlock()
arbiter.meters[m] = struct{}{}
if !arbiter.started {
arbiter.started = true
go arbiter.tick()
}
return m return m
} }
// NewInactiveMeter returns a meter but does not start any goroutines. This // NewInactiveMeter returns a meter but does not start any goroutines. This
// method is mainly intended for testing. // method is mainly intended for testing.
func NewInactiveMeter() Meter { func NewInactiveMeter() *Meter {
if !Enabled { return newMeter()
return NilMeter{}
}
m := newStandardMeter()
return m
} }
// NewRegisteredMeter constructs and registers a new StandardMeter // NewRegisteredMeter constructs and registers a new Meter
// and launches a goroutine. // and launches a goroutine.
// Be sure to unregister the meter from the registry once it is of no use to // Be sure to unregister the meter from the registry once it is of no use to
// allow for garbage collection. // allow for garbage collection.
func NewRegisteredMeter(name string, r Registry) Meter { func NewRegisteredMeter(name string, r Registry) *Meter {
return GetOrRegisterMeter(name, r) return GetOrRegisterMeter(name, r)
} }
// meterSnapshot is a read-only copy of the meter's internal values. // MeterSnapshot is a read-only copy of the meter's internal values.
type meterSnapshot struct { type MeterSnapshot struct {
count int64 count int64
rate1, rate5, rate15, rateMean float64 rate1, rate5, rate15, rateMean float64
} }
// Count returns the count of events at the time the snapshot was taken. // Count returns the count of events at the time the snapshot was taken.
func (m *meterSnapshot) Count() int64 { return m.count } func (m *MeterSnapshot) Count() int64 { return m.count }
// Rate1 returns the one-minute moving average rate of events per second at the // Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken. // time the snapshot was taken.
func (m *meterSnapshot) Rate1() float64 { return m.rate1 } func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
// Rate5 returns the five-minute moving average rate of events per second at // Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken. // the time the snapshot was taken.
func (m *meterSnapshot) Rate5() float64 { return m.rate5 } func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
// Rate15 returns the fifteen-minute moving average rate of events per second // Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken. // at the time the snapshot was taken.
func (m *meterSnapshot) Rate15() float64 { return m.rate15 } func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
// RateMean returns the meter's mean rate of events per second at the time the // RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken. // snapshot was taken.
func (m *meterSnapshot) RateMean() float64 { return m.rateMean } func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
// NilMeter is a no-op Meter. // Meter count events to produce exponentially-weighted moving average rates
type NilMeter struct{} // at one-, five-, and fifteen-minutes and a mean rate.
type Meter struct {
func (NilMeter) Count() int64 { return 0 }
func (NilMeter) Mark(n int64) {}
func (NilMeter) Snapshot() MeterSnapshot { return (*emptySnapshot)(nil) }
func (NilMeter) Stop() {}
// StandardMeter is the standard implementation of a Meter.
type StandardMeter struct {
count atomic.Int64 count atomic.Int64
uncounted atomic.Int64 // not yet added to the EWMAs uncounted atomic.Int64 // not yet added to the EWMAs
rateMean atomic.Uint64 rateMean atomic.Uint64
a1, a5, a15 EWMA a1, a5, a15 *EWMA
startTime time.Time startTime time.Time
stopped atomic.Bool stopped atomic.Bool
} }
func newStandardMeter() *StandardMeter { func newMeter() *Meter {
return &StandardMeter{ return &Meter{
a1: NewEWMA1(), a1: NewEWMA1(),
a5: NewEWMA5(), a5: NewEWMA5(),
a15: NewEWMA15(), a15: NewEWMA15(),
@ -123,22 +87,20 @@ func newStandardMeter() *StandardMeter {
} }
// Stop stops the meter, Mark() will be a no-op if you use it after being stopped. // Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
func (m *StandardMeter) Stop() { func (m *Meter) Stop() {
if stopped := m.stopped.Swap(true); !stopped { if stopped := m.stopped.Swap(true); !stopped {
arbiter.Lock() arbiter.remove(m)
delete(arbiter.meters, m)
arbiter.Unlock()
} }
} }
// Mark records the occurrence of n events. // Mark records the occurrence of n events.
func (m *StandardMeter) Mark(n int64) { func (m *Meter) Mark(n int64) {
m.uncounted.Add(n) m.uncounted.Add(n)
} }
// Snapshot returns a read-only copy of the meter. // Snapshot returns a read-only copy of the meter.
func (m *StandardMeter) Snapshot() MeterSnapshot { func (m *Meter) Snapshot() *MeterSnapshot {
return &meterSnapshot{ return &MeterSnapshot{
count: m.count.Load() + m.uncounted.Load(), count: m.count.Load() + m.uncounted.Load(),
rate1: m.a1.Snapshot().Rate(), rate1: m.a1.Snapshot().Rate(),
rate5: m.a5.Snapshot().Rate(), rate5: m.a5.Snapshot().Rate(),
@ -147,7 +109,7 @@ func (m *StandardMeter) Snapshot() MeterSnapshot {
} }
} }
func (m *StandardMeter) tick() { func (m *Meter) tick() {
// Take the uncounted values, add to count // Take the uncounted values, add to count
n := m.uncounted.Swap(0) n := m.uncounted.Swap(0)
count := m.count.Add(n) count := m.count.Add(n)
@ -157,33 +119,51 @@ func (m *StandardMeter) tick() {
m.a5.Update(n) m.a5.Update(n)
m.a15.Update(n) m.a15.Update(n)
// And trigger them to calculate the rates // And trigger them to calculate the rates
m.a1.Tick() m.a1.tick()
m.a5.Tick() m.a5.tick()
m.a15.Tick() m.a15.tick()
} }
// meterArbiter ticks meters every 5s from a single goroutine. var arbiter = meterTicker{meters: make(map[*Meter]struct{})}
// meterTicker ticks meters every 5s from a single goroutine.
// meters are references in a set for future stopping. // meters are references in a set for future stopping.
type meterArbiter struct { type meterTicker struct {
sync.RWMutex mu sync.RWMutex
started bool started bool
meters map[*StandardMeter]struct{} meters map[*Meter]struct{}
ticker *time.Ticker
} }
var arbiter = meterArbiter{ticker: time.NewTicker(5 * time.Second), meters: make(map[*StandardMeter]struct{})} // add adds another *Meter ot the arbiter, and starts the arbiter ticker.
func (ma *meterTicker) add(m *Meter) {
// tick meters on the scheduled interval ma.mu.Lock()
func (ma *meterArbiter) tick() { defer ma.mu.Unlock()
for range ma.ticker.C { ma.meters[m] = struct{}{}
ma.tickMeters() if !ma.started {
ma.started = true
go ma.loop()
} }
} }
func (ma *meterArbiter) tickMeters() { // remove removes a meter from the set of ticked meters.
ma.RLock() func (ma *meterTicker) remove(m *Meter) {
defer ma.RUnlock() ma.mu.Lock()
for meter := range ma.meters { delete(ma.meters, m)
meter.tick() ma.mu.Unlock()
}
// loop ticks meters on a 5 second interval.
func (ma *meterTicker) loop() {
ticker := time.NewTicker(5 * time.Second)
for range ticker.C {
if !metricsEnabled {
continue
}
ma.mu.RLock()
for meter := range ma.meters {
meter.tick()
}
ma.mu.RUnlock()
} }
} }

View file

@ -28,18 +28,12 @@ func TestGetOrRegisterMeter(t *testing.T) {
} }
func TestMeterDecay(t *testing.T) { func TestMeterDecay(t *testing.T) {
ma := meterArbiter{ m := newMeter()
ticker: time.NewTicker(time.Millisecond),
meters: make(map[*StandardMeter]struct{}),
}
defer ma.ticker.Stop()
m := newStandardMeter()
ma.meters[m] = struct{}{}
m.Mark(1) m.Mark(1)
ma.tickMeters() m.tick()
rateMean := m.Snapshot().RateMean() rateMean := m.Snapshot().RateMean()
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
ma.tickMeters() m.tick()
if m.Snapshot().RateMean() >= rateMean { if m.Snapshot().RateMean() >= rateMean {
t.Error("m.RateMean() didn't decrease") t.Error("m.RateMean() didn't decrease")
} }

View file

@ -6,52 +6,29 @@
package metrics package metrics
import ( import (
"os"
"runtime/metrics" "runtime/metrics"
"runtime/pprof" "runtime/pprof"
"strconv"
"strings"
"syscall"
"time" "time"
"github.com/ethereum/go-ethereum/log"
) )
// Enabled is checked by the constructor functions for all of the var (
// standard metrics. If it is true, the metric returned is a stub. metricsEnabled = false
)
// Enabled is checked by functions that are deemed 'expensive', e.g. if a
// meter-type does locking and/or non-trivial math operations during update.
func Enabled() bool {
return metricsEnabled
}
// Enable enables the metrics system.
// The Enabled-flag is expected to be set, once, during startup, but toggling off and on
// is not supported.
// //
// This global kill-switch helps quantify the observer effect and makes // Enable is not safe to call concurrently. You need to call this as early as possible in
// for less cluttered pprof profiles. // the program, before any metrics collection will happen.
var Enabled = false func Enable() {
metricsEnabled = true
// enablerFlags is the CLI flag names to use to enable metrics collections.
var enablerFlags = []string{"metrics"}
// enablerEnvVars is the env var names to use to enable metrics collections.
var enablerEnvVars = []string{"GETH_METRICS"}
// init enables or disables the metrics system. Since we need this to run before
// any other code gets to create meters and timers, we'll actually do an ugly hack
// and peek into the command line args for the metrics flag.
func init() {
for _, enabler := range enablerEnvVars {
if val, found := syscall.Getenv(enabler); found && !Enabled {
if enable, _ := strconv.ParseBool(val); enable { // ignore error, flag parser will choke on it later
log.Info("Enabling metrics collection")
Enabled = true
}
}
}
for _, arg := range os.Args {
flag := strings.TrimLeft(arg, "-")
for _, enabler := range enablerFlags {
if !Enabled && flag == enabler {
log.Info("Enabling metrics collection")
Enabled = true
}
}
}
} }
var threadCreateProfile = pprof.Lookup("threadcreate") var threadCreateProfile = pprof.Lookup("threadcreate")
@ -128,7 +105,7 @@ func readRuntimeStats(v *runtimeStats) {
// CollectProcessMetrics periodically collects various metrics about the running process. // CollectProcessMetrics periodically collects various metrics about the running process.
func CollectProcessMetrics(refresh time.Duration) { func CollectProcessMetrics(refresh time.Duration) {
// Short circuit if the metrics system is disabled // Short circuit if the metrics system is disabled
if !Enabled { if !metricsEnabled {
return return
} }

View file

@ -7,8 +7,6 @@ import (
"time" "time"
) )
const FANOUT = 128
func TestReadRuntimeValues(t *testing.T) { func TestReadRuntimeValues(t *testing.T) {
var v runtimeStats var v runtimeStats
readRuntimeStats(&v) readRuntimeStats(&v)
@ -16,60 +14,23 @@ func TestReadRuntimeValues(t *testing.T) {
} }
func BenchmarkMetrics(b *testing.B) { func BenchmarkMetrics(b *testing.B) {
r := NewRegistry() var (
c := NewRegisteredCounter("counter", r) r = NewRegistry()
cf := NewRegisteredCounterFloat64("counterfloat64", r) c = NewRegisteredCounter("counter", r)
g := NewRegisteredGauge("gauge", r) cf = NewRegisteredCounterFloat64("counterfloat64", r)
gf := NewRegisteredGaugeFloat64("gaugefloat64", r) g = NewRegisteredGauge("gauge", r)
h := NewRegisteredHistogram("histogram", r, NewUniformSample(100)) gf = NewRegisteredGaugeFloat64("gaugefloat64", r)
m := NewRegisteredMeter("meter", r) h = NewRegisteredHistogram("histogram", r, NewUniformSample(100))
t := NewRegisteredTimer("timer", r) m = NewRegisteredMeter("meter", r)
t = NewRegisteredTimer("timer", r)
)
RegisterDebugGCStats(r) RegisterDebugGCStats(r)
b.ResetTimer() b.ResetTimer()
ch := make(chan bool) var wg sync.WaitGroup
wg.Add(128)
wgD := &sync.WaitGroup{} for i := 0; i < 128; i++ {
/*
wgD.Add(1)
go func() { go func() {
defer wgD.Done()
//log.Println("go CaptureDebugGCStats")
for {
select {
case <-ch:
//log.Println("done CaptureDebugGCStats")
return
default:
CaptureDebugGCStatsOnce(r)
}
}
}()
//*/
wgW := &sync.WaitGroup{}
/*
wgW.Add(1)
go func() {
defer wgW.Done()
//log.Println("go Write")
for {
select {
case <-ch:
//log.Println("done Write")
return
default:
WriteOnce(r, io.Discard)
}
}
}()
//*/
wg := &sync.WaitGroup{}
wg.Add(FANOUT)
for i := 0; i < FANOUT; i++ {
go func(i int) {
defer wg.Done() defer wg.Done()
//log.Println("go", i)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
c.Inc(1) c.Inc(1)
cf.Inc(1.0) cf.Inc(1.0)
@ -79,13 +40,9 @@ func BenchmarkMetrics(b *testing.B) {
m.Mark(1) m.Mark(1)
t.Update(1) t.Update(1)
} }
//log.Println("done", i) }()
}(i)
} }
wg.Wait() wg.Wait()
close(ch)
wgD.Wait()
wgW.Wait()
} }
func Example() { func Example() {

View file

@ -64,15 +64,15 @@ func (c *OpenTSDBConfig) writeRegistry(w io.Writer, now int64, shortHostname str
c.Registry.Each(func(name string, i interface{}) { c.Registry.Each(func(name string, i interface{}) {
switch metric := i.(type) { switch metric := i.(type) {
case Counter: case *Counter:
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname) fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname)
case CounterFloat64: case *CounterFloat64:
fmt.Fprintf(w, "put %s.%s.count %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname) fmt.Fprintf(w, "put %s.%s.count %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Count(), shortHostname)
case Gauge: case *Gauge:
fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname) fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname)
case GaugeFloat64: case *GaugeFloat64:
fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname) fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Snapshot().Value(), shortHostname)
case GaugeInfo: case *GaugeInfo:
fmt.Fprintf(w, "put %s.%s.value %d %s host=%s\n", c.Prefix, name, now, metric.Snapshot().Value().String(), shortHostname) fmt.Fprintf(w, "put %s.%s.value %d %s host=%s\n", c.Prefix, name, now, metric.Snapshot().Value().String(), shortHostname)
case Histogram: case Histogram:
h := metric.Snapshot() h := metric.Snapshot()
@ -87,14 +87,14 @@ func (c *OpenTSDBConfig) writeRegistry(w io.Writer, now int64, shortHostname str
fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
case Meter: case *Meter:
m := metric.Snapshot() m := metric.Snapshot()
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
case Timer: case *Timer:
t := metric.Snapshot() t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)

View file

@ -51,23 +51,23 @@ func newCollector() *collector {
// metric type is not supported/known. // metric type is not supported/known.
func (c *collector) Add(name string, i any) error { func (c *collector) Add(name string, i any) error {
switch m := i.(type) { switch m := i.(type) {
case metrics.Counter: case *metrics.Counter:
c.addCounter(name, m.Snapshot()) c.addCounter(name, m.Snapshot())
case metrics.CounterFloat64: case *metrics.CounterFloat64:
c.addCounterFloat64(name, m.Snapshot()) c.addCounterFloat64(name, m.Snapshot())
case metrics.Gauge: case *metrics.Gauge:
c.addGauge(name, m.Snapshot()) c.addGauge(name, m.Snapshot())
case metrics.GaugeFloat64: case *metrics.GaugeFloat64:
c.addGaugeFloat64(name, m.Snapshot()) c.addGaugeFloat64(name, m.Snapshot())
case metrics.GaugeInfo: case *metrics.GaugeInfo:
c.addGaugeInfo(name, m.Snapshot()) c.addGaugeInfo(name, m.Snapshot())
case metrics.Histogram: case metrics.Histogram:
c.addHistogram(name, m.Snapshot()) c.addHistogram(name, m.Snapshot())
case metrics.Meter: case *metrics.Meter:
c.addMeter(name, m.Snapshot()) c.addMeter(name, m.Snapshot())
case metrics.Timer: case *metrics.Timer:
c.addTimer(name, m.Snapshot()) c.addTimer(name, m.Snapshot())
case metrics.ResettingTimer: case *metrics.ResettingTimer:
c.addResettingTimer(name, m.Snapshot()) c.addResettingTimer(name, m.Snapshot())
default: default:
return fmt.Errorf("unknown prometheus metric type %T", i) return fmt.Errorf("unknown prometheus metric type %T", i)
@ -106,11 +106,11 @@ func (c *collector) addHistogram(name string, m metrics.HistogramSnapshot) {
c.buff.WriteRune('\n') c.buff.WriteRune('\n')
} }
func (c *collector) addMeter(name string, m metrics.MeterSnapshot) { func (c *collector) addMeter(name string, m *metrics.MeterSnapshot) {
c.writeGaugeCounter(name, m.Count()) c.writeGaugeCounter(name, m.Count())
} }
func (c *collector) addTimer(name string, m metrics.TimerSnapshot) { func (c *collector) addTimer(name string, m *metrics.TimerSnapshot) {
pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999} pv := []float64{0.5, 0.75, 0.95, 0.99, 0.999, 0.9999}
ps := m.Percentiles(pv) ps := m.Percentiles(pv)
c.writeSummaryCounter(name, m.Count()) c.writeSummaryCounter(name, m.Count())
@ -121,7 +121,7 @@ func (c *collector) addTimer(name string, m metrics.TimerSnapshot) {
c.buff.WriteRune('\n') c.buff.WriteRune('\n')
} }
func (c *collector) addResettingTimer(name string, m metrics.ResettingTimerSnapshot) { func (c *collector) addResettingTimer(name string, m *metrics.ResettingTimerSnapshot) {
if m.Count() <= 0 { if m.Count() <= 0 {
return return
} }

View file

@ -27,7 +27,7 @@ import (
) )
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
metrics.Enabled = true metrics.Enable()
os.Exit(m.Run()) os.Exit(m.Run())
} }

View file

@ -1,6 +1,7 @@
package metrics package metrics
import ( import (
"errors"
"fmt" "fmt"
"reflect" "reflect"
"sort" "sort"
@ -8,14 +9,10 @@ import (
"sync" "sync"
) )
// DuplicateMetric is the error returned by Registry. Register when a metric // ErrDuplicateMetric is the error returned by Registry.Register when a metric
// already exists. If you mean to Register that metric you must first // already exists. If you mean to Register that metric you must first
// Unregister the existing metric. // Unregister the existing metric.
type DuplicateMetric string var ErrDuplicateMetric = errors.New("duplicate metric")
func (err DuplicateMetric) Error() string {
return fmt.Sprintf("duplicate metric: %s", string(err))
}
// A Registry holds references to a set of metrics by name and can iterate // A Registry holds references to a set of metrics by name and can iterate
// over them, calling callback functions provided by the user. // over them, calling callback functions provided by the user.
@ -114,13 +111,13 @@ func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{}
return item return item
} }
// Register the given metric under the given name. Returns a DuplicateMetric // Register the given metric under the given name. Returns a ErrDuplicateMetric
// if a metric by the given name is already registered. // if a metric by the given name is already registered.
func (r *StandardRegistry) Register(name string, i interface{}) error { func (r *StandardRegistry) Register(name string, i interface{}) error {
// fast path // fast path
_, ok := r.metrics.Load(name) _, ok := r.metrics.Load(name)
if ok { if ok {
return DuplicateMetric(name) return fmt.Errorf("%w: %v", ErrDuplicateMetric, name)
} }
if v := reflect.ValueOf(i); v.Kind() == reflect.Func { if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
@ -128,7 +125,7 @@ func (r *StandardRegistry) Register(name string, i interface{}) error {
} }
_, loaded, _ := r.loadOrRegister(name, i) _, loaded, _ := r.loadOrRegister(name, i)
if loaded { if loaded {
return DuplicateMetric(name) return fmt.Errorf("%w: %v", ErrDuplicateMetric, name)
} }
return nil return nil
} }
@ -136,7 +133,7 @@ func (r *StandardRegistry) Register(name string, i interface{}) error {
// RunHealthchecks run all registered healthchecks. // RunHealthchecks run all registered healthchecks.
func (r *StandardRegistry) RunHealthchecks() { func (r *StandardRegistry) RunHealthchecks() {
r.metrics.Range(func(key, value any) bool { r.metrics.Range(func(key, value any) bool {
if h, ok := value.(Healthcheck); ok { if h, ok := value.(*Healthcheck); ok {
h.Check() h.Check()
} }
return true return true
@ -149,15 +146,15 @@ func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
r.Each(func(name string, i interface{}) { r.Each(func(name string, i interface{}) {
values := make(map[string]interface{}) values := make(map[string]interface{})
switch metric := i.(type) { switch metric := i.(type) {
case Counter: case *Counter:
values["count"] = metric.Snapshot().Count() values["count"] = metric.Snapshot().Count()
case CounterFloat64: case *CounterFloat64:
values["count"] = metric.Snapshot().Count() values["count"] = metric.Snapshot().Count()
case Gauge: case *Gauge:
values["value"] = metric.Snapshot().Value() values["value"] = metric.Snapshot().Value()
case GaugeFloat64: case *GaugeFloat64:
values["value"] = metric.Snapshot().Value() values["value"] = metric.Snapshot().Value()
case Healthcheck: case *Healthcheck:
values["error"] = nil values["error"] = nil
metric.Check() metric.Check()
if err := metric.Error(); nil != err { if err := metric.Error(); nil != err {
@ -176,14 +173,14 @@ func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
values["95%"] = ps[2] values["95%"] = ps[2]
values["99%"] = ps[3] values["99%"] = ps[3]
values["99.9%"] = ps[4] values["99.9%"] = ps[4]
case Meter: case *Meter:
m := metric.Snapshot() m := metric.Snapshot()
values["count"] = m.Count() values["count"] = m.Count()
values["1m.rate"] = m.Rate1() values["1m.rate"] = m.Rate1()
values["5m.rate"] = m.Rate5() values["5m.rate"] = m.Rate5()
values["15m.rate"] = m.Rate15() values["15m.rate"] = m.Rate15()
values["mean.rate"] = m.RateMean() values["mean.rate"] = m.RateMean()
case Timer: case *Timer:
t := metric.Snapshot() t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
values["count"] = t.Count() values["count"] = t.Count()
@ -214,7 +211,7 @@ func (r *StandardRegistry) Unregister(name string) {
func (r *StandardRegistry) loadOrRegister(name string, i interface{}) (interface{}, bool, bool) { func (r *StandardRegistry) loadOrRegister(name string, i interface{}) (interface{}, bool, bool) {
switch i.(type) { switch i.(type) {
case Counter, CounterFloat64, Gauge, GaugeFloat64, GaugeInfo, Healthcheck, Histogram, Meter, Timer, ResettingTimer: case *Counter, *CounterFloat64, *Gauge, *GaugeFloat64, *GaugeInfo, *Healthcheck, Histogram, *Meter, *Timer, *ResettingTimer:
default: default:
return nil, false, false return nil, false, false
} }
@ -326,9 +323,7 @@ func (r *PrefixedRegistry) Unregister(name string) {
} }
var ( var (
DefaultRegistry = NewRegistry() DefaultRegistry = NewRegistry()
EphemeralRegistry = NewRegistry()
AccountingRegistry = NewRegistry() // registry used in swarm
) )
// Each call the given function for each registered metric. // Each call the given function for each registered metric.
@ -347,7 +342,7 @@ func GetOrRegister(name string, i interface{}) interface{} {
return DefaultRegistry.GetOrRegister(name, i) return DefaultRegistry.GetOrRegister(name, i)
} }
// Register the given metric under the given name. Returns a DuplicateMetric // Register the given metric under the given name. Returns a ErrDuplicateMetric
// if a metric by the given name is already registered. // if a metric by the given name is already registered.
func Register(name string, i interface{}) error { func Register(name string, i interface{}) error {
return DefaultRegistry.Register(name, i) return DefaultRegistry.Register(name, i)

View file

@ -47,7 +47,7 @@ func TestRegistry(t *testing.T) {
if name != "foo" { if name != "foo" {
t.Fatal(name) t.Fatal(name)
} }
if _, ok := iface.(Counter); !ok { if _, ok := iface.(*Counter); !ok {
t.Fatal(iface) t.Fatal(iface)
} }
}) })
@ -73,7 +73,7 @@ func TestRegistryDuplicate(t *testing.T) {
i := 0 i := 0
r.Each(func(name string, iface interface{}) { r.Each(func(name string, iface interface{}) {
i++ i++
if _, ok := iface.(Counter); !ok { if _, ok := iface.(*Counter); !ok {
t.Fatal(iface) t.Fatal(iface)
} }
}) })
@ -85,11 +85,11 @@ func TestRegistryDuplicate(t *testing.T) {
func TestRegistryGet(t *testing.T) { func TestRegistryGet(t *testing.T) {
r := NewRegistry() r := NewRegistry()
r.Register("foo", NewCounter()) r.Register("foo", NewCounter())
if count := r.Get("foo").(Counter).Snapshot().Count(); count != 0 { if count := r.Get("foo").(*Counter).Snapshot().Count(); count != 0 {
t.Fatal(count) t.Fatal(count)
} }
r.Get("foo").(Counter).Inc(1) r.Get("foo").(*Counter).Inc(1)
if count := r.Get("foo").(Counter).Snapshot().Count(); count != 1 { if count := r.Get("foo").(*Counter).Snapshot().Count(); count != 1 {
t.Fatal(count) t.Fatal(count)
} }
} }
@ -100,7 +100,7 @@ func TestRegistryGetOrRegister(t *testing.T) {
// First metric wins with GetOrRegister // First metric wins with GetOrRegister
_ = r.GetOrRegister("foo", NewCounter()) _ = r.GetOrRegister("foo", NewCounter())
m := r.GetOrRegister("foo", NewGauge()) m := r.GetOrRegister("foo", NewGauge())
if _, ok := m.(Counter); !ok { if _, ok := m.(*Counter); !ok {
t.Fatal(m) t.Fatal(m)
} }
@ -110,7 +110,7 @@ func TestRegistryGetOrRegister(t *testing.T) {
if name != "foo" { if name != "foo" {
t.Fatal(name) t.Fatal(name)
} }
if _, ok := iface.(Counter); !ok { if _, ok := iface.(*Counter); !ok {
t.Fatal(iface) t.Fatal(iface)
} }
}) })
@ -125,7 +125,7 @@ func TestRegistryGetOrRegisterWithLazyInstantiation(t *testing.T) {
// First metric wins with GetOrRegister // First metric wins with GetOrRegister
_ = r.GetOrRegister("foo", NewCounter) _ = r.GetOrRegister("foo", NewCounter)
m := r.GetOrRegister("foo", NewGauge) m := r.GetOrRegister("foo", NewGauge)
if _, ok := m.(Counter); !ok { if _, ok := m.(*Counter); !ok {
t.Fatal(m) t.Fatal(m)
} }
@ -135,7 +135,7 @@ func TestRegistryGetOrRegisterWithLazyInstantiation(t *testing.T) {
if name != "foo" { if name != "foo" {
t.Fatal(name) t.Fatal(name)
} }
if _, ok := iface.(Counter); !ok { if _, ok := iface.(*Counter); !ok {
t.Fatal(iface) t.Fatal(iface)
} }
}) })

View file

@ -17,7 +17,7 @@ type resettingSample struct {
} }
// Snapshot returns a read-only copy of the sample with the original reset. // Snapshot returns a read-only copy of the sample with the original reset.
func (rs *resettingSample) Snapshot() SampleSnapshot { func (rs *resettingSample) Snapshot() *sampleSnapshot {
s := rs.Sample.Snapshot() s := rs.Sample.Snapshot()
rs.Sample.Clear() rs.Sample.Clear()
return s return s

View file

@ -5,36 +5,17 @@ import (
"time" "time"
) )
// Initial slice capacity for the values stored in a ResettingTimer
const InitialResettingTimerSliceCap = 10
type ResettingTimerSnapshot interface {
Count() int
Mean() float64
Max() int64
Min() int64
Percentiles([]float64) []float64
}
// ResettingTimer is used for storing aggregated values for timers, which are reset on every flush interval.
type ResettingTimer interface {
Snapshot() ResettingTimerSnapshot
Time(func())
Update(time.Duration)
UpdateSince(time.Time)
}
// GetOrRegisterResettingTimer returns an existing ResettingTimer or constructs and registers a // GetOrRegisterResettingTimer returns an existing ResettingTimer or constructs and registers a
// new StandardResettingTimer. // new ResettingTimer.
func GetOrRegisterResettingTimer(name string, r Registry) ResettingTimer { func GetOrRegisterResettingTimer(name string, r Registry) *ResettingTimer {
if nil == r { if nil == r {
r = DefaultRegistry r = DefaultRegistry
} }
return r.GetOrRegister(name, NewResettingTimer).(ResettingTimer) return r.GetOrRegister(name, NewResettingTimer).(*ResettingTimer)
} }
// NewRegisteredResettingTimer constructs and registers a new StandardResettingTimer. // NewRegisteredResettingTimer constructs and registers a new ResettingTimer.
func NewRegisteredResettingTimer(name string, r Registry) ResettingTimer { func NewRegisteredResettingTimer(name string, r Registry) *ResettingTimer {
c := NewResettingTimer() c := NewResettingTimer()
if nil == r { if nil == r {
r = DefaultRegistry r = DefaultRegistry
@ -43,33 +24,15 @@ func NewRegisteredResettingTimer(name string, r Registry) ResettingTimer {
return c return c
} }
// NewResettingTimer constructs a new StandardResettingTimer // NewResettingTimer constructs a new ResettingTimer
func NewResettingTimer() ResettingTimer { func NewResettingTimer() *ResettingTimer {
if !Enabled { return &ResettingTimer{
return NilResettingTimer{} values: make([]int64, 0, 10),
}
return &StandardResettingTimer{
values: make([]int64, 0, InitialResettingTimerSliceCap),
} }
} }
// NilResettingTimer is a no-op ResettingTimer. // ResettingTimer is used for storing aggregated values for timers, which are reset on every flush interval.
type NilResettingTimer struct{} type ResettingTimer struct {
func (NilResettingTimer) Values() []int64 { return nil }
func (n NilResettingTimer) Snapshot() ResettingTimerSnapshot { return n }
func (NilResettingTimer) Time(f func()) { f() }
func (NilResettingTimer) Update(time.Duration) {}
func (NilResettingTimer) Percentiles([]float64) []float64 { return nil }
func (NilResettingTimer) Mean() float64 { return 0.0 }
func (NilResettingTimer) Max() int64 { return 0 }
func (NilResettingTimer) Min() int64 { return 0 }
func (NilResettingTimer) UpdateSince(time.Time) {}
func (NilResettingTimer) Count() int { return 0 }
// StandardResettingTimer is the standard implementation of a ResettingTimer.
// and Meter.
type StandardResettingTimer struct {
values []int64 values []int64
sum int64 // sum is a running count of the total sum, used later to calculate mean sum int64 // sum is a running count of the total sum, used later to calculate mean
@ -77,28 +40,31 @@ type StandardResettingTimer struct {
} }
// Snapshot resets the timer and returns a read-only copy of its contents. // Snapshot resets the timer and returns a read-only copy of its contents.
func (t *StandardResettingTimer) Snapshot() ResettingTimerSnapshot { func (t *ResettingTimer) Snapshot() *ResettingTimerSnapshot {
t.mutex.Lock() t.mutex.Lock()
defer t.mutex.Unlock() defer t.mutex.Unlock()
snapshot := &resettingTimerSnapshot{} snapshot := &ResettingTimerSnapshot{}
if len(t.values) > 0 { if len(t.values) > 0 {
snapshot.mean = float64(t.sum) / float64(len(t.values)) snapshot.mean = float64(t.sum) / float64(len(t.values))
snapshot.values = t.values snapshot.values = t.values
t.values = make([]int64, 0, InitialResettingTimerSliceCap) t.values = make([]int64, 0, 10)
} }
t.sum = 0 t.sum = 0
return snapshot return snapshot
} }
// Record the duration of the execution of the given function. // Record the duration of the execution of the given function.
func (t *StandardResettingTimer) Time(f func()) { func (t *ResettingTimer) Time(f func()) {
ts := time.Now() ts := time.Now()
f() f()
t.Update(time.Since(ts)) t.Update(time.Since(ts))
} }
// Record the duration of an event. // Record the duration of an event.
func (t *StandardResettingTimer) Update(d time.Duration) { func (t *ResettingTimer) Update(d time.Duration) {
if !metricsEnabled {
return
}
t.mutex.Lock() t.mutex.Lock()
defer t.mutex.Unlock() defer t.mutex.Unlock()
t.values = append(t.values, int64(d)) t.values = append(t.values, int64(d))
@ -106,12 +72,12 @@ func (t *StandardResettingTimer) Update(d time.Duration) {
} }
// Record the duration of an event that started at a time and ends now. // Record the duration of an event that started at a time and ends now.
func (t *StandardResettingTimer) UpdateSince(ts time.Time) { func (t *ResettingTimer) UpdateSince(ts time.Time) {
t.Update(time.Since(ts)) t.Update(time.Since(ts))
} }
// resettingTimerSnapshot is a point-in-time copy of another ResettingTimer. // ResettingTimerSnapshot is a point-in-time copy of another ResettingTimer.
type resettingTimerSnapshot struct { type ResettingTimerSnapshot struct {
values []int64 values []int64
mean float64 mean float64
max int64 max int64
@ -121,20 +87,20 @@ type resettingTimerSnapshot struct {
} }
// Count return the length of the values from snapshot. // Count return the length of the values from snapshot.
func (t *resettingTimerSnapshot) Count() int { func (t *ResettingTimerSnapshot) Count() int {
return len(t.values) return len(t.values)
} }
// Percentiles returns the boundaries for the input percentiles. // Percentiles returns the boundaries for the input percentiles.
// note: this method is not thread safe // note: this method is not thread safe
func (t *resettingTimerSnapshot) Percentiles(percentiles []float64) []float64 { func (t *ResettingTimerSnapshot) Percentiles(percentiles []float64) []float64 {
t.calc(percentiles) t.calc(percentiles)
return t.thresholdBoundaries return t.thresholdBoundaries
} }
// Mean returns the mean of the snapshotted values // Mean returns the mean of the snapshotted values
// note: this method is not thread safe // note: this method is not thread safe
func (t *resettingTimerSnapshot) Mean() float64 { func (t *ResettingTimerSnapshot) Mean() float64 {
if !t.calculated { if !t.calculated {
t.calc(nil) t.calc(nil)
} }
@ -144,7 +110,7 @@ func (t *resettingTimerSnapshot) Mean() float64 {
// Max returns the max of the snapshotted values // Max returns the max of the snapshotted values
// note: this method is not thread safe // note: this method is not thread safe
func (t *resettingTimerSnapshot) Max() int64 { func (t *ResettingTimerSnapshot) Max() int64 {
if !t.calculated { if !t.calculated {
t.calc(nil) t.calc(nil)
} }
@ -153,14 +119,14 @@ func (t *resettingTimerSnapshot) Max() int64 {
// Min returns the min of the snapshotted values // Min returns the min of the snapshotted values
// note: this method is not thread safe // note: this method is not thread safe
func (t *resettingTimerSnapshot) Min() int64 { func (t *ResettingTimerSnapshot) Min() int64 {
if !t.calculated { if !t.calculated {
t.calc(nil) t.calc(nil)
} }
return t.min return t.min
} }
func (t *resettingTimerSnapshot) calc(percentiles []float64) { func (t *ResettingTimerSnapshot) calc(percentiles []float64) {
scores := CalculatePercentiles(t.values, percentiles) scores := CalculatePercentiles(t.values, percentiles)
t.thresholdBoundaries = scores t.thresholdBoundaries = scores
if len(t.values) == 0 { if len(t.values) == 0 {

View file

@ -10,178 +10,21 @@ import (
const rescaleThreshold = time.Hour const rescaleThreshold = time.Hour
type SampleSnapshot interface { // Sample maintains a statistically-significant selection of values from
Count() int64
Max() int64
Mean() float64
Min() int64
Percentile(float64) float64
Percentiles([]float64) []float64
Size() int
StdDev() float64
Sum() int64
Variance() float64
}
// Samples maintain a statistically-significant selection of values from
// a stream. // a stream.
type Sample interface { type Sample interface {
Snapshot() SampleSnapshot Snapshot() *sampleSnapshot
Clear() Clear()
Update(int64) Update(int64)
} }
// ExpDecaySample is an exponentially-decaying sample using a forward-decaying var (
// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time _ Sample = (*ExpDecaySample)(nil)
// Decay Model for Streaming Systems". _ Sample = (*UniformSample)(nil)
// _ Sample = (*resettingSample)(nil)
// <http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf> )
type ExpDecaySample struct {
alpha float64
count int64
mutex sync.Mutex
reservoirSize int
t0, t1 time.Time
values *expDecaySampleHeap
rand *rand.Rand
}
// NewExpDecaySample constructs a new exponentially-decaying sample with the // sampleSnapshot is a read-only copy of a Sample.
// given reservoir size and alpha.
func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
if !Enabled {
return NilSample{}
}
s := &ExpDecaySample{
alpha: alpha,
reservoirSize: reservoirSize,
t0: time.Now(),
values: newExpDecaySampleHeap(reservoirSize),
}
s.t1 = s.t0.Add(rescaleThreshold)
return s
}
// SetRand sets the random source (useful in tests)
func (s *ExpDecaySample) SetRand(prng *rand.Rand) Sample {
s.rand = prng
return s
}
// Clear clears all samples.
func (s *ExpDecaySample) Clear() {
s.mutex.Lock()
defer s.mutex.Unlock()
s.count = 0
s.t0 = time.Now()
s.t1 = s.t0.Add(rescaleThreshold)
s.values.Clear()
}
// Snapshot returns a read-only copy of the sample.
func (s *ExpDecaySample) Snapshot() SampleSnapshot {
s.mutex.Lock()
defer s.mutex.Unlock()
var (
samples = s.values.Values()
values = make([]int64, len(samples))
max int64 = math.MinInt64
min int64 = math.MaxInt64
sum int64
)
for i, item := range samples {
v := item.v
values[i] = v
sum += v
if v > max {
max = v
}
if v < min {
min = v
}
}
return newSampleSnapshotPrecalculated(s.count, values, min, max, sum)
}
// Update samples a new value.
func (s *ExpDecaySample) Update(v int64) {
s.update(time.Now(), v)
}
// update samples a new value at a particular timestamp. This is a method all
// its own to facilitate testing.
func (s *ExpDecaySample) update(t time.Time, v int64) {
s.mutex.Lock()
defer s.mutex.Unlock()
s.count++
if s.values.Size() == s.reservoirSize {
s.values.Pop()
}
var f64 float64
if s.rand != nil {
f64 = s.rand.Float64()
} else {
f64 = rand.Float64()
}
s.values.Push(expDecaySample{
k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / f64,
v: v,
})
if t.After(s.t1) {
values := s.values.Values()
t0 := s.t0
s.values.Clear()
s.t0 = t
s.t1 = s.t0.Add(rescaleThreshold)
for _, v := range values {
v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
s.values.Push(v)
}
}
}
// NilSample is a no-op Sample.
type NilSample struct{}
func (NilSample) Clear() {}
func (NilSample) Snapshot() SampleSnapshot { return (*emptySnapshot)(nil) }
func (NilSample) Update(v int64) {}
// SamplePercentile returns an arbitrary percentile of the slice of int64.
func SamplePercentile(values []int64, p float64) float64 {
return CalculatePercentiles(values, []float64{p})[0]
}
// CalculatePercentiles returns a slice of arbitrary percentiles of the slice of
// int64. This method returns interpolated results, so e.g. if there are only two
// values, [0, 10], a 50% percentile will land between them.
//
// Note: As a side-effect, this method will also sort the slice of values.
// Note2: The input format for percentiles is NOT percent! To express 50%, use 0.5, not 50.
func CalculatePercentiles(values []int64, ps []float64) []float64 {
scores := make([]float64, len(ps))
size := len(values)
if size == 0 {
return scores
}
slices.Sort(values)
for i, p := range ps {
pos := p * float64(size+1)
if pos < 1.0 {
scores[i] = float64(values[0])
} else if pos >= float64(size) {
scores[i] = float64(values[size-1])
} else {
lower := float64(values[int(pos)-1])
upper := float64(values[int(pos)])
scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
}
}
return scores
}
// sampleSnapshot is a read-only copy of another Sample.
type sampleSnapshot struct { type sampleSnapshot struct {
count int64 count int64
values []int64 values []int64
@ -259,9 +102,6 @@ func (s *sampleSnapshot) Percentiles(ps []float64) []float64 {
// Size returns the size of the sample at the time the snapshot was taken. // Size returns the size of the sample at the time the snapshot was taken.
func (s *sampleSnapshot) Size() int { return len(s.values) } func (s *sampleSnapshot) Size() int { return len(s.values) }
// Snapshot returns the snapshot.
func (s *sampleSnapshot) Snapshot() SampleSnapshot { return s }
// StdDev returns the standard deviation of values at the time the snapshot was // StdDev returns the standard deviation of values at the time the snapshot was
// taken. // taken.
func (s *sampleSnapshot) StdDev() float64 { func (s *sampleSnapshot) StdDev() float64 {
@ -276,9 +116,7 @@ func (s *sampleSnapshot) Sum() int64 { return s.sum }
// Values returns a copy of the values in the sample. // Values returns a copy of the values in the sample.
func (s *sampleSnapshot) Values() []int64 { func (s *sampleSnapshot) Values() []int64 {
values := make([]int64, len(s.values)) return slices.Clone(s.values)
copy(values, s.values)
return values
} }
// Variance returns the variance of values at the time the snapshot was taken. // Variance returns the variance of values at the time the snapshot was taken.
@ -289,6 +127,149 @@ func (s *sampleSnapshot) Variance() float64 {
return s.variance return s.variance
} }
// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
// Decay Model for Streaming Systems".
//
// <http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf>
type ExpDecaySample struct {
alpha float64
count int64
mutex sync.Mutex
reservoirSize int
t0, t1 time.Time
values *expDecaySampleHeap
rand *rand.Rand
}
// NewExpDecaySample constructs a new exponentially-decaying sample with the
// given reservoir size and alpha.
func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
s := &ExpDecaySample{
alpha: alpha,
reservoirSize: reservoirSize,
t0: time.Now(),
values: newExpDecaySampleHeap(reservoirSize),
}
s.t1 = s.t0.Add(rescaleThreshold)
return s
}
// SetRand sets the random source (useful in tests)
func (s *ExpDecaySample) SetRand(prng *rand.Rand) Sample {
s.rand = prng
return s
}
// Clear clears all samples.
func (s *ExpDecaySample) Clear() {
s.mutex.Lock()
defer s.mutex.Unlock()
s.count = 0
s.t0 = time.Now()
s.t1 = s.t0.Add(rescaleThreshold)
s.values.Clear()
}
// Snapshot returns a read-only copy of the sample.
func (s *ExpDecaySample) Snapshot() *sampleSnapshot {
s.mutex.Lock()
defer s.mutex.Unlock()
var (
samples = s.values.Values()
values = make([]int64, len(samples))
max int64 = math.MinInt64
min int64 = math.MaxInt64
sum int64
)
for i, item := range samples {
v := item.v
values[i] = v
sum += v
if v > max {
max = v
}
if v < min {
min = v
}
}
return newSampleSnapshotPrecalculated(s.count, values, min, max, sum)
}
// Update samples a new value.
func (s *ExpDecaySample) Update(v int64) {
if !metricsEnabled {
return
}
s.update(time.Now(), v)
}
// update samples a new value at a particular timestamp. This is a method all
// its own to facilitate testing.
func (s *ExpDecaySample) update(t time.Time, v int64) {
s.mutex.Lock()
defer s.mutex.Unlock()
s.count++
if s.values.Size() == s.reservoirSize {
s.values.Pop()
}
var f64 float64
if s.rand != nil {
f64 = s.rand.Float64()
} else {
f64 = rand.Float64()
}
s.values.Push(expDecaySample{
k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / f64,
v: v,
})
if t.After(s.t1) {
values := s.values.Values()
t0 := s.t0
s.values.Clear()
s.t0 = t
s.t1 = s.t0.Add(rescaleThreshold)
for _, v := range values {
v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
s.values.Push(v)
}
}
}
// SamplePercentile returns an arbitrary percentile of the slice of int64.
func SamplePercentile(values []int64, p float64) float64 {
return CalculatePercentiles(values, []float64{p})[0]
}
// CalculatePercentiles returns a slice of arbitrary percentiles of the slice of
// int64. This method returns interpolated results, so e.g. if there are only two
// values, [0, 10], a 50% percentile will land between them.
//
// Note: As a side-effect, this method will also sort the slice of values.
// Note2: The input format for percentiles is NOT percent! To express 50%, use 0.5, not 50.
func CalculatePercentiles(values []int64, ps []float64) []float64 {
scores := make([]float64, len(ps))
size := len(values)
if size == 0 {
return scores
}
slices.Sort(values)
for i, p := range ps {
pos := p * float64(size+1)
if pos < 1.0 {
scores[i] = float64(values[0])
} else if pos >= float64(size) {
scores[i] = float64(values[size-1])
} else {
lower := float64(values[int(pos)-1])
upper := float64(values[int(pos)])
scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
}
}
return scores
}
// SampleVariance returns the variance of the slice of int64. // SampleVariance returns the variance of the slice of int64.
func SampleVariance(mean float64, values []int64) float64 { func SampleVariance(mean float64, values []int64) float64 {
if len(values) == 0 { if len(values) == 0 {
@ -302,7 +283,7 @@ func SampleVariance(mean float64, values []int64) float64 {
return sum / float64(len(values)) return sum / float64(len(values))
} }
// A uniform sample using Vitter's Algorithm R. // UniformSample implements a uniform sample using Vitter's Algorithm R.
// //
// <http://www.cs.umd.edu/~samir/498/vitter.pdf> // <http://www.cs.umd.edu/~samir/498/vitter.pdf>
type UniformSample struct { type UniformSample struct {
@ -316,9 +297,6 @@ type UniformSample struct {
// NewUniformSample constructs a new uniform sample with the given reservoir // NewUniformSample constructs a new uniform sample with the given reservoir
// size. // size.
func NewUniformSample(reservoirSize int) Sample { func NewUniformSample(reservoirSize int) Sample {
if !Enabled {
return NilSample{}
}
return &UniformSample{ return &UniformSample{
reservoirSize: reservoirSize, reservoirSize: reservoirSize,
values: make([]int64, 0, reservoirSize), values: make([]int64, 0, reservoirSize),
@ -336,14 +314,13 @@ func (s *UniformSample) Clear() {
s.mutex.Lock() s.mutex.Lock()
defer s.mutex.Unlock() defer s.mutex.Unlock()
s.count = 0 s.count = 0
s.values = make([]int64, 0, s.reservoirSize) clear(s.values)
} }
// Snapshot returns a read-only copy of the sample. // Snapshot returns a read-only copy of the sample.
func (s *UniformSample) Snapshot() SampleSnapshot { func (s *UniformSample) Snapshot() *sampleSnapshot {
s.mutex.Lock() s.mutex.Lock()
values := make([]int64, len(s.values)) values := slices.Clone(s.values)
copy(values, s.values)
count := s.count count := s.count
s.mutex.Unlock() s.mutex.Unlock()
return newSampleSnapshot(count, values) return newSampleSnapshot(count, values)
@ -351,21 +328,24 @@ func (s *UniformSample) Snapshot() SampleSnapshot {
// Update samples a new value. // Update samples a new value.
func (s *UniformSample) Update(v int64) { func (s *UniformSample) Update(v int64) {
if !metricsEnabled {
return
}
s.mutex.Lock() s.mutex.Lock()
defer s.mutex.Unlock() defer s.mutex.Unlock()
s.count++ s.count++
if len(s.values) < s.reservoirSize { if len(s.values) < s.reservoirSize {
s.values = append(s.values, v) s.values = append(s.values, v)
return
}
var r int64
if s.rand != nil {
r = s.rand.Int63n(s.count)
} else { } else {
var r int64 r = rand.Int63n(s.count)
if s.rand != nil { }
r = s.rand.Int63n(s.count) if r < int64(len(s.values)) {
} else { s.values[int(r)] = v
r = rand.Int63n(s.count)
}
if r < int64(len(s.values)) {
s.values[int(r)] = v
}
} }
} }

View file

@ -86,7 +86,7 @@ func TestExpDecaySample(t *testing.T) {
if have, want := snap.Size(), min(tc.updates, tc.reservoirSize); have != want { if have, want := snap.Size(), min(tc.updates, tc.reservoirSize); have != want {
t.Errorf("unexpected size: have %d want %d", have, want) t.Errorf("unexpected size: have %d want %d", have, want)
} }
values := snap.(*sampleSnapshot).values values := snap.values
if have, want := len(values), min(tc.updates, tc.reservoirSize); have != want { if have, want := len(values), min(tc.updates, tc.reservoirSize); have != want {
t.Errorf("unexpected values length: have %d want %d", have, want) t.Errorf("unexpected values length: have %d want %d", have, want)
} }
@ -111,8 +111,7 @@ func TestExpDecaySampleNanosecondRegression(t *testing.T) {
for i := 0; i < 1000; i++ { for i := 0; i < 1000; i++ {
sw.Update(20) sw.Update(20)
} }
s := sw.Snapshot() v := sw.Snapshot().values
v := s.(*sampleSnapshot).values
avg := float64(0) avg := float64(0)
for i := 0; i < len(v); i++ { for i := 0; i < len(v); i++ {
avg += float64(v[i]) avg += float64(v[i])
@ -166,7 +165,7 @@ func TestUniformSample(t *testing.T) {
if size := s.Size(); size != 100 { if size := s.Size(); size != 100 {
t.Errorf("s.Size(): 100 != %v\n", size) t.Errorf("s.Size(): 100 != %v\n", size)
} }
values := s.(*sampleSnapshot).values values := s.values
if l := len(values); l != 100 { if l := len(values); l != 100 {
t.Errorf("len(s.Values()): 100 != %v\n", l) t.Errorf("len(s.Values()): 100 != %v\n", l)
@ -184,8 +183,7 @@ func TestUniformSampleIncludesTail(t *testing.T) {
for i := 0; i < max; i++ { for i := 0; i < max; i++ {
sw.Update(int64(i)) sw.Update(int64(i))
} }
s := sw.Snapshot() v := sw.Snapshot().values
v := s.(*sampleSnapshot).values
sum := 0 sum := 0
exp := (max - 1) * max / 2 exp := (max - 1) * max / 2
for i := 0; i < len(v); i++ { for i := 0; i < len(v); i++ {
@ -220,7 +218,7 @@ func benchmarkSample(b *testing.B, s Sample) {
} }
} }
func testExpDecaySampleStatistics(t *testing.T, s SampleSnapshot) { func testExpDecaySampleStatistics(t *testing.T, s *sampleSnapshot) {
if sum := s.Sum(); sum != 496598 { if sum := s.Sum(); sum != 496598 {
t.Errorf("s.Sum(): 496598 != %v\n", sum) t.Errorf("s.Sum(): 496598 != %v\n", sum)
} }
@ -251,7 +249,7 @@ func testExpDecaySampleStatistics(t *testing.T, s SampleSnapshot) {
} }
} }
func testUniformSampleStatistics(t *testing.T, s SampleSnapshot) { func testUniformSampleStatistics(t *testing.T, s *sampleSnapshot) {
if count := s.Count(); count != 10000 { if count := s.Count(); count != 10000 {
t.Errorf("s.Count(): 10000 != %v\n", count) t.Errorf("s.Count(): 10000 != %v\n", count)
} }

View file

@ -15,17 +15,17 @@ func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
for range time.Tick(d) { for range time.Tick(d) {
r.Each(func(name string, i interface{}) { r.Each(func(name string, i interface{}) {
switch metric := i.(type) { switch metric := i.(type) {
case Counter: case *Counter:
w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Snapshot().Count())) w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Snapshot().Count()))
case CounterFloat64: case *CounterFloat64:
w.Info(fmt.Sprintf("counter %s: count: %f", name, metric.Snapshot().Count())) w.Info(fmt.Sprintf("counter %s: count: %f", name, metric.Snapshot().Count()))
case Gauge: case *Gauge:
w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Snapshot().Value())) w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Snapshot().Value()))
case GaugeFloat64: case *GaugeFloat64:
w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Snapshot().Value())) w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Snapshot().Value()))
case GaugeInfo: case *GaugeInfo:
w.Info(fmt.Sprintf("gauge %s: value: %s", name, metric.Snapshot().Value())) w.Info(fmt.Sprintf("gauge %s: value: %s", name, metric.Snapshot().Value()))
case Healthcheck: case *Healthcheck:
metric.Check() metric.Check()
w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
case Histogram: case Histogram:
@ -45,7 +45,7 @@ func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
ps[3], ps[3],
ps[4], ps[4],
)) ))
case Meter: case *Meter:
m := metric.Snapshot() m := metric.Snapshot()
w.Info(fmt.Sprintf( w.Info(fmt.Sprintf(
"meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
@ -56,7 +56,7 @@ func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
m.Rate15(), m.Rate15(),
m.RateMean(), m.RateMean(),
)) ))
case Timer: case *Timer:
t := metric.Snapshot() t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
w.Info(fmt.Sprintf( w.Info(fmt.Sprintf(

View file

@ -5,47 +5,30 @@ import (
"time" "time"
) )
type TimerSnapshot interface {
HistogramSnapshot
MeterSnapshot
}
// Timer capture the duration and rate of events.
type Timer interface {
Snapshot() TimerSnapshot
Stop()
Time(func())
UpdateSince(time.Time)
Update(time.Duration)
}
// GetOrRegisterTimer returns an existing Timer or constructs and registers a // GetOrRegisterTimer returns an existing Timer or constructs and registers a
// new StandardTimer. // new Timer.
// Be sure to unregister the meter from the registry once it is of no use to // Be sure to unregister the meter from the registry once it is of no use to
// allow for garbage collection. // allow for garbage collection.
func GetOrRegisterTimer(name string, r Registry) Timer { func GetOrRegisterTimer(name string, r Registry) *Timer {
if nil == r { if nil == r {
r = DefaultRegistry r = DefaultRegistry
} }
return r.GetOrRegister(name, NewTimer).(Timer) return r.GetOrRegister(name, NewTimer).(*Timer)
} }
// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. // NewCustomTimer constructs a new Timer from a Histogram and a Meter.
// Be sure to call Stop() once the timer is of no use to allow for garbage collection. // Be sure to call Stop() once the timer is of no use to allow for garbage collection.
func NewCustomTimer(h Histogram, m Meter) Timer { func NewCustomTimer(h Histogram, m *Meter) *Timer {
if !Enabled { return &Timer{
return NilTimer{}
}
return &StandardTimer{
histogram: h, histogram: h,
meter: m, meter: m,
} }
} }
// NewRegisteredTimer constructs and registers a new StandardTimer. // NewRegisteredTimer constructs and registers a new Timer.
// Be sure to unregister the meter from the registry once it is of no use to // Be sure to unregister the meter from the registry once it is of no use to
// allow for garbage collection. // allow for garbage collection.
func NewRegisteredTimer(name string, r Registry) Timer { func NewRegisteredTimer(name string, r Registry) *Timer {
c := NewTimer() c := NewTimer()
if nil == r { if nil == r {
r = DefaultRegistry r = DefaultRegistry
@ -54,60 +37,47 @@ func NewRegisteredTimer(name string, r Registry) Timer {
return c return c
} }
// NewTimer constructs a new StandardTimer using an exponentially-decaying // NewTimer constructs a new Timer using an exponentially-decaying
// sample with the same reservoir size and alpha as UNIX load averages. // sample with the same reservoir size and alpha as UNIX load averages.
// Be sure to call Stop() once the timer is of no use to allow for garbage collection. // Be sure to call Stop() once the timer is of no use to allow for garbage collection.
func NewTimer() Timer { func NewTimer() *Timer {
if !Enabled { return &Timer{
return NilTimer{}
}
return &StandardTimer{
histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
meter: NewMeter(), meter: NewMeter(),
} }
} }
// NilTimer is a no-op Timer. // Timer captures the duration and rate of events, using a Histogram and a Meter.
type NilTimer struct{} type Timer struct {
func (NilTimer) Snapshot() TimerSnapshot { return (*emptySnapshot)(nil) }
func (NilTimer) Stop() {}
func (NilTimer) Time(f func()) { f() }
func (NilTimer) Update(time.Duration) {}
func (NilTimer) UpdateSince(time.Time) {}
// StandardTimer is the standard implementation of a Timer and uses a Histogram
// and Meter.
type StandardTimer struct {
histogram Histogram histogram Histogram
meter Meter meter *Meter
mutex sync.Mutex mutex sync.Mutex
} }
// Snapshot returns a read-only copy of the timer. // Snapshot returns a read-only copy of the timer.
func (t *StandardTimer) Snapshot() TimerSnapshot { func (t *Timer) Snapshot() *TimerSnapshot {
t.mutex.Lock() t.mutex.Lock()
defer t.mutex.Unlock() defer t.mutex.Unlock()
return &timerSnapshot{ return &TimerSnapshot{
histogram: t.histogram.Snapshot(), histogram: t.histogram.Snapshot(),
meter: t.meter.Snapshot(), meter: t.meter.Snapshot(),
} }
} }
// Stop stops the meter. // Stop stops the meter.
func (t *StandardTimer) Stop() { func (t *Timer) Stop() {
t.meter.Stop() t.meter.Stop()
} }
// Time record the duration of the execution of the given function. // Time record the duration of the execution of the given function.
func (t *StandardTimer) Time(f func()) { func (t *Timer) Time(f func()) {
ts := time.Now() ts := time.Now()
f() f()
t.Update(time.Since(ts)) t.Update(time.Since(ts))
} }
// Update the duration of an event, in nanoseconds. // Update the duration of an event, in nanoseconds.
func (t *StandardTimer) Update(d time.Duration) { func (t *Timer) Update(d time.Duration) {
t.mutex.Lock() t.mutex.Lock()
defer t.mutex.Unlock() defer t.mutex.Unlock()
t.histogram.Update(d.Nanoseconds()) t.histogram.Update(d.Nanoseconds())
@ -116,67 +86,67 @@ func (t *StandardTimer) Update(d time.Duration) {
// UpdateSince update the duration of an event that started at a time and ends now. // UpdateSince update the duration of an event that started at a time and ends now.
// The record uses nanoseconds. // The record uses nanoseconds.
func (t *StandardTimer) UpdateSince(ts time.Time) { func (t *Timer) UpdateSince(ts time.Time) {
t.Update(time.Since(ts)) t.Update(time.Since(ts))
} }
// timerSnapshot is a read-only copy of another Timer. // TimerSnapshot is a read-only copy of another Timer.
type timerSnapshot struct { type TimerSnapshot struct {
histogram HistogramSnapshot histogram HistogramSnapshot
meter MeterSnapshot meter *MeterSnapshot
} }
// Count returns the number of events recorded at the time the snapshot was // Count returns the number of events recorded at the time the snapshot was
// taken. // taken.
func (t *timerSnapshot) Count() int64 { return t.histogram.Count() } func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
// Max returns the maximum value at the time the snapshot was taken. // Max returns the maximum value at the time the snapshot was taken.
func (t *timerSnapshot) Max() int64 { return t.histogram.Max() } func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
// Size returns the size of the sample at the time the snapshot was taken. // Size returns the size of the sample at the time the snapshot was taken.
func (t *timerSnapshot) Size() int { return t.histogram.Size() } func (t *TimerSnapshot) Size() int { return t.histogram.Size() }
// Mean returns the mean value at the time the snapshot was taken. // Mean returns the mean value at the time the snapshot was taken.
func (t *timerSnapshot) Mean() float64 { return t.histogram.Mean() } func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
// Min returns the minimum value at the time the snapshot was taken. // Min returns the minimum value at the time the snapshot was taken.
func (t *timerSnapshot) Min() int64 { return t.histogram.Min() } func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
// Percentile returns an arbitrary percentile of sampled values at the time the // Percentile returns an arbitrary percentile of sampled values at the time the
// snapshot was taken. // snapshot was taken.
func (t *timerSnapshot) Percentile(p float64) float64 { func (t *TimerSnapshot) Percentile(p float64) float64 {
return t.histogram.Percentile(p) return t.histogram.Percentile(p)
} }
// Percentiles returns a slice of arbitrary percentiles of sampled values at // Percentiles returns a slice of arbitrary percentiles of sampled values at
// the time the snapshot was taken. // the time the snapshot was taken.
func (t *timerSnapshot) Percentiles(ps []float64) []float64 { func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
return t.histogram.Percentiles(ps) return t.histogram.Percentiles(ps)
} }
// Rate1 returns the one-minute moving average rate of events per second at the // Rate1 returns the one-minute moving average rate of events per second at the
// time the snapshot was taken. // time the snapshot was taken.
func (t *timerSnapshot) Rate1() float64 { return t.meter.Rate1() } func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
// Rate5 returns the five-minute moving average rate of events per second at // Rate5 returns the five-minute moving average rate of events per second at
// the time the snapshot was taken. // the time the snapshot was taken.
func (t *timerSnapshot) Rate5() float64 { return t.meter.Rate5() } func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
// Rate15 returns the fifteen-minute moving average rate of events per second // Rate15 returns the fifteen-minute moving average rate of events per second
// at the time the snapshot was taken. // at the time the snapshot was taken.
func (t *timerSnapshot) Rate15() float64 { return t.meter.Rate15() } func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
// RateMean returns the meter's mean rate of events per second at the time the // RateMean returns the meter's mean rate of events per second at the time the
// snapshot was taken. // snapshot was taken.
func (t *timerSnapshot) RateMean() float64 { return t.meter.RateMean() } func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
// StdDev returns the standard deviation of the values at the time the snapshot // StdDev returns the standard deviation of the values at the time the snapshot
// was taken. // was taken.
func (t *timerSnapshot) StdDev() float64 { return t.histogram.StdDev() } func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
// Sum returns the sum at the time the snapshot was taken. // Sum returns the sum at the time the snapshot was taken.
func (t *timerSnapshot) Sum() int64 { return t.histogram.Sum() } func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
// Variance returns the variance of the values at the time the snapshot was // Variance returns the variance of the values at the time the snapshot was
// taken. // taken.
func (t *timerSnapshot) Variance() float64 { return t.histogram.Variance() } func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }

View file

@ -26,22 +26,22 @@ func WriteOnce(r Registry, w io.Writer) {
slices.SortFunc(namedMetrics, namedMetric.cmp) slices.SortFunc(namedMetrics, namedMetric.cmp)
for _, namedMetric := range namedMetrics { for _, namedMetric := range namedMetrics {
switch metric := namedMetric.m.(type) { switch metric := namedMetric.m.(type) {
case Counter: case *Counter:
fmt.Fprintf(w, "counter %s\n", namedMetric.name) fmt.Fprintf(w, "counter %s\n", namedMetric.name)
fmt.Fprintf(w, " count: %9d\n", metric.Snapshot().Count()) fmt.Fprintf(w, " count: %9d\n", metric.Snapshot().Count())
case CounterFloat64: case *CounterFloat64:
fmt.Fprintf(w, "counter %s\n", namedMetric.name) fmt.Fprintf(w, "counter %s\n", namedMetric.name)
fmt.Fprintf(w, " count: %f\n", metric.Snapshot().Count()) fmt.Fprintf(w, " count: %f\n", metric.Snapshot().Count())
case Gauge: case *Gauge:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name) fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
fmt.Fprintf(w, " value: %9d\n", metric.Snapshot().Value()) fmt.Fprintf(w, " value: %9d\n", metric.Snapshot().Value())
case GaugeFloat64: case *GaugeFloat64:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name) fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
fmt.Fprintf(w, " value: %f\n", metric.Snapshot().Value()) fmt.Fprintf(w, " value: %f\n", metric.Snapshot().Value())
case GaugeInfo: case *GaugeInfo:
fmt.Fprintf(w, "gauge %s\n", namedMetric.name) fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
fmt.Fprintf(w, " value: %s\n", metric.Snapshot().Value().String()) fmt.Fprintf(w, " value: %s\n", metric.Snapshot().Value().String())
case Healthcheck: case *Healthcheck:
metric.Check() metric.Check()
fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
fmt.Fprintf(w, " error: %v\n", metric.Error()) fmt.Fprintf(w, " error: %v\n", metric.Error())
@ -59,7 +59,7 @@ func WriteOnce(r Registry, w io.Writer) {
fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
case Meter: case *Meter:
m := metric.Snapshot() m := metric.Snapshot()
fmt.Fprintf(w, "meter %s\n", namedMetric.name) fmt.Fprintf(w, "meter %s\n", namedMetric.name)
fmt.Fprintf(w, " count: %9d\n", m.Count()) fmt.Fprintf(w, " count: %9d\n", m.Count())
@ -67,7 +67,7 @@ func WriteOnce(r Registry, w io.Writer) {
fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5())
fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15())
fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean())
case Timer: case *Timer:
t := metric.Snapshot() t := metric.Snapshot()
ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
fmt.Fprintf(w, "timer %s\n", namedMetric.name) fmt.Fprintf(w, "timer %s\n", namedMetric.name)

View file

@ -34,7 +34,7 @@ const (
) )
var ( var (
bucketsCounter []metrics.Counter bucketsCounter []*metrics.Counter
ingressTrafficMeter = metrics.NewRegisteredMeter(ingressMeterName, nil) ingressTrafficMeter = metrics.NewRegisteredMeter(ingressMeterName, nil)
egressTrafficMeter = metrics.NewRegisteredMeter(egressMeterName, nil) egressTrafficMeter = metrics.NewRegisteredMeter(egressMeterName, nil)
) )
@ -53,7 +53,7 @@ type meteredUdpConn struct {
func newMeteredConn(conn UDPConn) UDPConn { func newMeteredConn(conn UDPConn) UDPConn {
// Short circuit if metrics are disabled // Short circuit if metrics are disabled
if !metrics.Enabled { if !metrics.Enabled() {
return conn return conn
} }
return &meteredUdpConn{udpConn: conn} return &meteredUdpConn{udpConn: conn}

View file

@ -570,7 +570,7 @@ func (tab *Table) nodeAdded(b *bucket, n *tableNode) {
if tab.nodeAddedHook != nil { if tab.nodeAddedHook != nil {
tab.nodeAddedHook(b, n) tab.nodeAddedHook(b, n)
} }
if metrics.Enabled { if metrics.Enabled() {
bucketsCounter[b.index].Inc(1) bucketsCounter[b.index].Inc(1)
} }
} }
@ -580,7 +580,7 @@ func (tab *Table) nodeRemoved(b *bucket, n *tableNode) {
if tab.nodeRemovedHook != nil { if tab.nodeRemovedHook != nil {
tab.nodeRemovedHook(b, n) tab.nodeRemovedHook(b, n)
} }
if metrics.Enabled { if metrics.Enabled() {
bucketsCounter[b.index].Dec(1) bucketsCounter[b.index].Dec(1)
} }
} }

View file

@ -37,19 +37,19 @@ const (
) )
var ( var (
activePeerGauge metrics.Gauge = metrics.NilGauge{} activePeerGauge = metrics.NewRegisteredGauge("p2p/peers", nil)
activeInboundPeerGauge metrics.Gauge = metrics.NilGauge{} activeInboundPeerGauge = metrics.NewRegisteredGauge("p2p/peers/inbound", nil)
activeOutboundPeerGauge metrics.Gauge = metrics.NilGauge{} activeOutboundPeerGauge = metrics.NewRegisteredGauge("p2p/peers/outbound", nil)
ingressTrafficMeter = metrics.NewRegisteredMeter("p2p/ingress", nil) ingressTrafficMeter = metrics.NewRegisteredMeter("p2p/ingress", nil)
egressTrafficMeter = metrics.NewRegisteredMeter("p2p/egress", nil) egressTrafficMeter = metrics.NewRegisteredMeter("p2p/egress", nil)
// general ingress/egress connection meters // general ingress/egress connection meters
serveMeter metrics.Meter = metrics.NilMeter{} serveMeter = metrics.NewRegisteredMeter("p2p/serves", nil)
serveSuccessMeter metrics.Meter = metrics.NilMeter{} serveSuccessMeter = metrics.NewRegisteredMeter("p2p/serves/success", nil)
dialMeter metrics.Meter = metrics.NilMeter{} dialMeter = metrics.NewRegisteredMeter("p2p/dials", nil)
dialSuccessMeter metrics.Meter = metrics.NilMeter{} dialSuccessMeter = metrics.NewRegisteredMeter("p2p/dials/success", nil)
dialConnectionError metrics.Meter = metrics.NilMeter{} dialConnectionError = metrics.NewRegisteredMeter("p2p/dials/error/connection", nil)
// handshake error meters // handshake error meters
dialTooManyPeers = metrics.NewRegisteredMeter("p2p/dials/error/saturated", nil) dialTooManyPeers = metrics.NewRegisteredMeter("p2p/dials/error/saturated", nil)
@ -61,25 +61,10 @@ var (
dialProtoHandshakeError = metrics.NewRegisteredMeter("p2p/dials/error/rlpx/proto", nil) dialProtoHandshakeError = metrics.NewRegisteredMeter("p2p/dials/error/rlpx/proto", nil)
) )
func init() {
if !metrics.Enabled {
return
}
activePeerGauge = metrics.NewRegisteredGauge("p2p/peers", nil)
activeInboundPeerGauge = metrics.NewRegisteredGauge("p2p/peers/inbound", nil)
activeOutboundPeerGauge = metrics.NewRegisteredGauge("p2p/peers/outbound", nil)
serveMeter = metrics.NewRegisteredMeter("p2p/serves", nil)
serveSuccessMeter = metrics.NewRegisteredMeter("p2p/serves/success", nil)
dialMeter = metrics.NewRegisteredMeter("p2p/dials", nil)
dialSuccessMeter = metrics.NewRegisteredMeter("p2p/dials/success", nil)
dialConnectionError = metrics.NewRegisteredMeter("p2p/dials/error/connection", nil)
}
// markDialError matches errors that occur while setting up a dial connection // markDialError matches errors that occur while setting up a dial connection
// to the corresponding meter. // to the corresponding meter.
func markDialError(err error) { func markDialError(err error) {
if !metrics.Enabled { if !metrics.Enabled() {
return return
} }
if err2 := errors.Unwrap(err); err2 != nil { if err2 := errors.Unwrap(err); err2 != nil {
@ -113,7 +98,7 @@ type meteredConn struct {
// connection meter and also increases the metered peer count. If the metrics // connection meter and also increases the metered peer count. If the metrics
// system is disabled, function returns the original connection. // system is disabled, function returns the original connection.
func newMeteredConn(conn net.Conn) net.Conn { func newMeteredConn(conn net.Conn) net.Conn {
if !metrics.Enabled { if !metrics.Enabled() {
return conn return conn
} }
return &meteredConn{Conn: conn} return &meteredConn{Conn: conn}

View file

@ -357,7 +357,7 @@ func (p *Peer) handle(msg Msg) error {
if err != nil { if err != nil {
return fmt.Errorf("msg code out of range: %v", msg.Code) return fmt.Errorf("msg code out of range: %v", msg.Code)
} }
if metrics.Enabled { if metrics.Enabled() {
m := fmt.Sprintf("%s/%s/%d/%#02x", ingressMeterName, proto.Name, proto.Version, msg.Code-proto.offset) m := fmt.Sprintf("%s/%s/%d/%#02x", ingressMeterName, proto.Name, proto.Version, msg.Code-proto.offset)
metrics.GetOrRegisterMeter(m, nil).Mark(int64(msg.meterSize)) metrics.GetOrRegisterMeter(m, nil).Mark(int64(msg.meterSize))
metrics.GetOrRegisterMeter(m+"/packets", nil).Mark(1) metrics.GetOrRegisterMeter(m+"/packets", nil).Mark(1)

View file

@ -84,7 +84,7 @@ func New(protocol string, timeout time.Duration) *Tracker {
// Track adds a network request to the tracker to wait for a response to arrive // Track adds a network request to the tracker to wait for a response to arrive
// or until the request it cancelled or times out. // or until the request it cancelled or times out.
func (t *Tracker) Track(peer string, version uint, reqCode uint64, resCode uint64, id uint64) { func (t *Tracker) Track(peer string, version uint, reqCode uint64, resCode uint64, id uint64) {
if !metrics.Enabled { if !metrics.Enabled() {
return return
} }
t.lock.Lock() t.lock.Lock()
@ -163,7 +163,7 @@ func (t *Tracker) schedule() {
// Fulfil fills a pending request, if any is available, reporting on various metrics. // Fulfil fills a pending request, if any is available, reporting on various metrics.
func (t *Tracker) Fulfil(peer string, version uint, code uint64, id uint64) { func (t *Tracker) Fulfil(peer string, version uint, code uint64, id uint64) {
if !metrics.Enabled { if !metrics.Enabled() {
return return
} }
t.lock.Lock() t.lock.Lock()

View file

@ -98,7 +98,7 @@ func (t *rlpxTransport) WriteMsg(msg Msg) error {
// Set metrics. // Set metrics.
msg.meterSize = size msg.meterSize = size
if metrics.Enabled && msg.meterCap.Name != "" { // don't meter non-subprotocol messages if metrics.Enabled() && msg.meterCap.Name != "" { // don't meter non-subprotocol messages
m := fmt.Sprintf("%s/%s/%d/%#02x", egressMeterName, msg.meterCap.Name, msg.meterCap.Version, msg.meterCode) m := fmt.Sprintf("%s/%s/%d/%#02x", egressMeterName, msg.meterCap.Name, msg.meterCap.Version, msg.meterCode)
metrics.GetOrRegisterMeter(m, nil).Mark(int64(msg.meterSize)) metrics.GetOrRegisterMeter(m, nil).Mark(int64(msg.meterSize))
metrics.GetOrRegisterMeter(m+"/packets", nil).Mark(1) metrics.GetOrRegisterMeter(m+"/packets", nil).Mark(1)

View file

@ -43,7 +43,7 @@ func (c *counter) add(size int) {
} }
// report uploads the cached statistics to meters. // report uploads the cached statistics to meters.
func (c *counter) report(count metrics.Meter, size metrics.Meter) { func (c *counter) report(count, size *metrics.Meter) {
count.Mark(int64(c.n)) count.Mark(int64(c.n))
size.Mark(int64(c.size)) size.Mark(int64(c.size))
} }