mirror of
https://github.com/ethereum/go-ethereum.git
synced 2026-04-27 20:12:23 +00:00
After profiling geth startup times for #16874 it turned out that most of the time was spent resizing accounts slice for ordered insertion. This change uses map to track all accounts instead of slice to improve performance. geth startup time using keystore with one million accounts reduced from 23 minutes to 2.5 minutes. Benchmarks show relatively-small overhead increase for small keystores and large decrease for huge: ``` goos: linux goarch: amd64 pkg: github.com/ethereum/go-ethereum/accounts/keystore │ HEAD~1 │ HEAD │ │ sec/op │ sec/op vs base │ Add/preload=10-8 1.030µ ± 5% 1.447µ ± 7% +40.51% (p=0.000 n=10) Add/preload=100-8 1.109µ ± 4% 1.463µ ± 3% +31.88% (p=0.000 n=10) Add/preload=1000-8 1.860µ ± 3% 1.477µ ± 5% -20.57% (p=0.000 n=10) Add/preload=1000000-8 5177.640µ ± 2% 1.654µ ± 11% -99.97% (p=0.000 n=10) Find/preload=10/by_address-8 23.70n ± 1% 23.88n ± 3% ~ (p=0.271 n=10) Find/preload=10/by_path-8 50.43n ± 2% 39.88n ± 5% -20.94% (p=0.000 n=10) Find/preload=10/ambiguous-8 323.6n ± 1% 1049.0n ± 3% +224.17% (p=0.000 n=10) Find/preload=100/by_address-8 23.69n ± 1% 23.63n ± 6% ~ (p=0.739 n=10) Find/preload=100/by_path-8 362.70n ± 1% 37.84n ± 3% -89.57% (p=0.000 n=10) Find/preload=100/ambiguous-8 2.683µ ± 2% 19.235µ ± 2% +617.05% (p=0.000 n=10) Find/preload=1000/by_address-8 26.45n ± 1% 27.73n ± 2% +4.82% (p=0.000 n=10) Find/preload=1000/by_path-8 3211.00n ± 3% 38.22n ± 8% -98.81% (p=0.000 n=10) Find/preload=1000/ambiguous-8 26.14µ ± 2% 263.59µ ± 1% +908.41% (p=0.000 n=10) Find/preload=1000000/by_address-8 26.47n ± 4% 26.41n ± 1% ~ (p=0.566 n=10) Find/preload=1000000/by_path-8 3683325.50n ± 4% 44.09n ± 45% -100.00% (p=0.000 n=10) Find/preload=1000000/ambiguous-8 39.68m ± 14% 819.48m ± 7% +1965.01% (p=0.000 n=10) geomean 2.346µ 791.4n -66.27% │ HEAD~1 │ HEAD │ │ B/op │ B/op vs base │ Add/preload=10-8 643.0 ± 0% 662.0 ± 0% +2.95% (p=0.000 n=10) Add/preload=100-8 643.0 ± 0% 662.0 ± 0% +2.95% (p=0.000 n=10) Add/preload=1000-8 584.0 ± 5% 662.0 ± 0% +13.36% (p=0.000 n=10) Add/preload=1000000-8 88.00 ± 0% 662.00 ± 17% +652.27% (p=0.000 n=10) Find/preload=10/by_address-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=10/by_path-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=10/ambiguous-8 624.0 ± 0% 1200.0 ± 0% +92.31% (p=0.000 n=10) Find/preload=100/by_address-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=100/by_path-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=100/ambiguous-8 6.047Ki ± 0% 12.047Ki ± 0% +99.22% (p=0.000 n=10) Find/preload=1000/by_address-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=1000/by_path-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=1000/ambiguous-8 56.05Ki ± 0% 112.05Ki ± 0% +99.92% (p=0.000 n=10) Find/preload=1000000/by_address-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=1000000/by_path-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=1000000/ambiguous-8 53.41Mi ± 0% 106.81Mi ± 0% +100.00% (p=0.000 n=10) geomean ² +36.09% ² ¹ all samples are equal ² summaries must be >0 to compute geomean │ HEAD~1 │ HEAD │ │ allocs/op │ allocs/op vs base │ Add/preload=10-8 3.000 ± 0% 4.000 ± 0% +33.33% (p=0.000 n=10) Add/preload=100-8 3.000 ± 0% 4.000 ± 0% +33.33% (p=0.000 n=10) Add/preload=1000-8 3.000 ± 0% 4.000 ± 0% +33.33% (p=0.000 n=10) Add/preload=1000000-8 2.000 ± 0% 4.000 ± 0% +100.00% (p=0.000 n=10) Find/preload=10/by_address-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=10/by_path-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=10/ambiguous-8 2.000 ± 0% 3.000 ± 0% +50.00% (p=0.000 n=10) Find/preload=100/by_address-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=100/by_path-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=100/ambiguous-8 2.000 ± 0% 3.000 ± 0% +50.00% (p=0.000 n=10) Find/preload=1000/by_address-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=1000/by_path-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=1000/ambiguous-8 2.000 ± 0% 3.000 ± 0% +50.00% (p=0.000 n=10) Find/preload=1000000/by_address-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=1000000/by_path-8 0.000 ± 0% 0.000 ± 0% ~ (p=1.000 n=10) ¹ Find/preload=1000000/ambiguous-8 2.000 ± 0% 3.000 ± 0% +50.00% (p=0.000 n=10) geomean ² +21.97% ² ¹ all samples are equal ² summaries must be >0 to compute geomean ``` Updates #16874
326 lines
8.6 KiB
Go
326 lines
8.6 KiB
Go
// Copyright 2017 The go-ethereum Authors
|
|
// This file is part of the go-ethereum library.
|
|
//
|
|
// The go-ethereum library is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Lesser General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// The go-ethereum library is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU Lesser General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU Lesser General Public License
|
|
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
package keystore
|
|
|
|
import (
|
|
"bufio"
|
|
"encoding/json"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
"slices"
|
|
"sort"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
mapset "github.com/deckarep/golang-set/v2"
|
|
"github.com/ethereum/go-ethereum/accounts"
|
|
"github.com/ethereum/go-ethereum/common"
|
|
"github.com/ethereum/go-ethereum/log"
|
|
)
|
|
|
|
// Minimum amount of time between cache reloads. This limit applies if the platform does
|
|
// not support change notifications. It also applies if the keystore directory does not
|
|
// exist yet, the code will attempt to create a watcher at most this often.
|
|
const minReloadInterval = 2 * time.Second
|
|
|
|
// byURL defines the sorting order for accounts.
|
|
func byURL(a, b accounts.Account) int {
|
|
return a.URL.Cmp(b.URL)
|
|
}
|
|
|
|
// AmbiguousAddrError is returned when an address matches multiple files.
|
|
type AmbiguousAddrError struct {
|
|
Addr common.Address
|
|
Matches []accounts.Account
|
|
}
|
|
|
|
func (err *AmbiguousAddrError) Error() string {
|
|
files := ""
|
|
for i, a := range err.Matches {
|
|
files += a.URL.Path
|
|
if i < len(err.Matches)-1 {
|
|
files += ", "
|
|
}
|
|
}
|
|
return fmt.Sprintf("multiple keys match address (%s)", files)
|
|
}
|
|
|
|
// accountCache is a live index of all accounts in the keystore.
|
|
type accountCache struct {
|
|
keydir string
|
|
watcher *watcher
|
|
mu sync.Mutex
|
|
byURL map[accounts.URL][]accounts.Account
|
|
byAddr map[common.Address][]accounts.Account
|
|
throttle *time.Timer
|
|
notify chan struct{}
|
|
fileC fileCache
|
|
}
|
|
|
|
func newAccountCache(keydir string) (*accountCache, chan struct{}) {
|
|
ac := &accountCache{
|
|
keydir: keydir,
|
|
byURL: make(map[accounts.URL][]accounts.Account),
|
|
byAddr: make(map[common.Address][]accounts.Account),
|
|
notify: make(chan struct{}, 1),
|
|
fileC: fileCache{all: mapset.NewThreadUnsafeSet[string]()},
|
|
}
|
|
ac.watcher = newWatcher(ac)
|
|
return ac, ac.notify
|
|
}
|
|
|
|
func (ac *accountCache) accounts() []accounts.Account {
|
|
ac.maybeReload()
|
|
ac.mu.Lock()
|
|
defer ac.mu.Unlock()
|
|
cpy := make([]accounts.Account, 0, len(ac.byURL))
|
|
for _, accs := range ac.byURL {
|
|
cpy = append(cpy, accs...)
|
|
}
|
|
sort.SliceStable(cpy, func(i, j int) bool { return cpy[i].URL.Cmp(cpy[j].URL) < 0 })
|
|
return cpy
|
|
}
|
|
|
|
func (ac *accountCache) hasAddress(addr common.Address) bool {
|
|
ac.maybeReload()
|
|
ac.mu.Lock()
|
|
defer ac.mu.Unlock()
|
|
return len(ac.byAddr[addr]) > 0
|
|
}
|
|
|
|
func (ac *accountCache) add(newAccount accounts.Account) {
|
|
ac.mu.Lock()
|
|
defer ac.mu.Unlock()
|
|
|
|
if accs, ok := ac.byURL[newAccount.URL]; ok && slices.Contains(accs, newAccount) {
|
|
return
|
|
}
|
|
// newAccount is not in the cache.
|
|
ac.byURL[newAccount.URL] = append(ac.byURL[newAccount.URL], newAccount)
|
|
ac.byAddr[newAccount.Address] = append(ac.byAddr[newAccount.Address], newAccount)
|
|
}
|
|
|
|
// note: removed needs to be unique here (i.e. both File and Address must be set).
|
|
func (ac *accountCache) delete(removed accounts.Account) {
|
|
ac.mu.Lock()
|
|
defer ac.mu.Unlock()
|
|
|
|
if bu := removeAccount(ac.byURL[removed.URL], removed); len(bu) == 0 {
|
|
delete(ac.byURL, removed.URL)
|
|
} else {
|
|
ac.byURL[removed.URL] = bu
|
|
}
|
|
|
|
if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 {
|
|
delete(ac.byAddr, removed.Address)
|
|
} else {
|
|
ac.byAddr[removed.Address] = ba
|
|
}
|
|
}
|
|
|
|
// deleteByFile removes an account referenced by the given path.
|
|
func (ac *accountCache) deleteByFile(path string) {
|
|
ac.mu.Lock()
|
|
defer ac.mu.Unlock()
|
|
url := accounts.URL{Scheme: KeyStoreScheme, Path: path}
|
|
if accs, ok := ac.byURL[url]; ok {
|
|
removed := accs[0]
|
|
if len(accs) == 1 {
|
|
delete(ac.byURL, url)
|
|
} else {
|
|
ac.byURL[url] = accs[1:]
|
|
}
|
|
if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 {
|
|
delete(ac.byAddr, removed.Address)
|
|
} else {
|
|
ac.byAddr[removed.Address] = ba
|
|
}
|
|
}
|
|
}
|
|
|
|
// watcherStarted returns true if the watcher loop started running (even if it
|
|
// has since also ended).
|
|
func (ac *accountCache) watcherStarted() bool {
|
|
ac.mu.Lock()
|
|
defer ac.mu.Unlock()
|
|
return ac.watcher.running || ac.watcher.runEnded
|
|
}
|
|
|
|
func removeAccount(slice []accounts.Account, elem accounts.Account) []accounts.Account {
|
|
for i := range slice {
|
|
if slice[i] == elem {
|
|
return append(slice[:i], slice[i+1:]...)
|
|
}
|
|
}
|
|
return slice
|
|
}
|
|
|
|
// find returns the cached account for address if there is a unique match.
|
|
// The exact matching rules are explained by the documentation of accounts.Account.
|
|
// Callers must hold ac.mu.
|
|
func (ac *accountCache) find(a accounts.Account) (accounts.Account, error) {
|
|
if a.URL.Path != "" {
|
|
// If only the basename is specified, complete the path.
|
|
if !strings.ContainsRune(a.URL.Path, filepath.Separator) {
|
|
a.URL.Path = filepath.Join(ac.keydir, a.URL.Path)
|
|
}
|
|
}
|
|
// Limit search to address candidates if possible.
|
|
var matches []accounts.Account
|
|
if (a.Address != common.Address{}) {
|
|
matches = ac.byAddr[a.Address]
|
|
if a.URL.Path != "" {
|
|
for i := range matches {
|
|
if matches[i].URL == a.URL {
|
|
return matches[i], nil
|
|
}
|
|
}
|
|
}
|
|
} else {
|
|
if a.URL.Path != "" {
|
|
if accs, ok := ac.byURL[a.URL]; ok {
|
|
return accs[0], nil
|
|
}
|
|
return accounts.Account{}, ErrNoMatch
|
|
}
|
|
matches = make([]accounts.Account, 0, len(ac.byURL))
|
|
for _, accs := range ac.byURL {
|
|
matches = append(matches, accs...)
|
|
}
|
|
}
|
|
switch len(matches) {
|
|
case 1:
|
|
return matches[0], nil
|
|
case 0:
|
|
return accounts.Account{}, ErrNoMatch
|
|
default:
|
|
err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]accounts.Account, len(matches))}
|
|
copy(err.Matches, matches)
|
|
slices.SortStableFunc(err.Matches, byURL)
|
|
return accounts.Account{}, err
|
|
}
|
|
}
|
|
|
|
func (ac *accountCache) maybeReload() {
|
|
ac.mu.Lock()
|
|
|
|
if ac.watcher.running {
|
|
ac.mu.Unlock()
|
|
return // A watcher is running and will keep the cache up-to-date.
|
|
}
|
|
if ac.throttle == nil {
|
|
ac.throttle = time.NewTimer(0)
|
|
} else {
|
|
select {
|
|
case <-ac.throttle.C:
|
|
default:
|
|
ac.mu.Unlock()
|
|
return // The cache was reloaded recently.
|
|
}
|
|
}
|
|
// No watcher running, start it.
|
|
ac.watcher.start()
|
|
ac.throttle.Reset(minReloadInterval)
|
|
ac.mu.Unlock()
|
|
ac.scanAccounts()
|
|
}
|
|
|
|
func (ac *accountCache) close() {
|
|
ac.mu.Lock()
|
|
ac.watcher.close()
|
|
if ac.throttle != nil {
|
|
ac.throttle.Stop()
|
|
}
|
|
if ac.notify != nil {
|
|
close(ac.notify)
|
|
ac.notify = nil
|
|
}
|
|
ac.mu.Unlock()
|
|
}
|
|
|
|
// scanAccounts checks if any changes have occurred on the filesystem, and
|
|
// updates the account cache accordingly
|
|
func (ac *accountCache) scanAccounts() error {
|
|
// Scan the entire folder metadata for file changes
|
|
creates, deletes, updates, err := ac.fileC.scan(ac.keydir)
|
|
if err != nil {
|
|
log.Debug("Failed to reload keystore contents", "err", err)
|
|
return err
|
|
}
|
|
if creates.Cardinality() == 0 && deletes.Cardinality() == 0 && updates.Cardinality() == 0 {
|
|
return nil
|
|
}
|
|
// Create a helper method to scan the contents of the key files
|
|
var (
|
|
buf = new(bufio.Reader)
|
|
key struct {
|
|
Address string `json:"address"`
|
|
}
|
|
)
|
|
readAccount := func(path string) *accounts.Account {
|
|
fd, err := os.Open(path)
|
|
if err != nil {
|
|
log.Trace("Failed to open keystore file", "path", path, "err", err)
|
|
return nil
|
|
}
|
|
defer fd.Close()
|
|
buf.Reset(fd)
|
|
// Parse the address.
|
|
key.Address = ""
|
|
err = json.NewDecoder(buf).Decode(&key)
|
|
addr := common.HexToAddress(key.Address)
|
|
switch {
|
|
case err != nil:
|
|
log.Debug("Failed to decode keystore key", "path", path, "err", err)
|
|
case addr == common.Address{}:
|
|
log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
|
|
default:
|
|
return &accounts.Account{
|
|
Address: addr,
|
|
URL: accounts.URL{Scheme: KeyStoreScheme, Path: path},
|
|
}
|
|
}
|
|
return nil
|
|
}
|
|
// Process all the file diffs
|
|
start := time.Now()
|
|
|
|
for _, path := range creates.ToSlice() {
|
|
if a := readAccount(path); a != nil {
|
|
ac.add(*a)
|
|
}
|
|
}
|
|
for _, path := range deletes.ToSlice() {
|
|
ac.deleteByFile(path)
|
|
}
|
|
for _, path := range updates.ToSlice() {
|
|
ac.deleteByFile(path)
|
|
if a := readAccount(path); a != nil {
|
|
ac.add(*a)
|
|
}
|
|
}
|
|
end := time.Now()
|
|
|
|
select {
|
|
case ac.notify <- struct{}{}:
|
|
default:
|
|
}
|
|
log.Trace("Handled keystore changes", "time", end.Sub(start))
|
|
return nil
|
|
}
|