server+htlcswitch: auto compact sphinx DB too

This commit is contained in:
Oliver Gugger 2020-11-09 10:21:30 +01:00
parent a8ef4fc158
commit 32ee527f6b
No known key found for this signature in database
GPG Key ID: 8E4256593F177720
3 changed files with 37 additions and 30 deletions

View File

@ -51,7 +51,7 @@ type DecayedLog struct {
started int32 // To be used atomically.
stopped int32 // To be used atomically.
dbPath string
cfg *kvdb.BoltBackendConfig
db kvdb.Backend
@ -64,16 +64,24 @@ type DecayedLog struct {
// NewDecayedLog creates a new DecayedLog, which caches recently seen hash
// shared secrets. Entries are evicted as their cltv expires using block epochs
// from the given notifier.
func NewDecayedLog(dbPath string,
func NewDecayedLog(dbPath, dbFileName string, boltCfg *kvdb.BoltConfig,
notifier chainntnfs.ChainNotifier) *DecayedLog {
cfg := &kvdb.BoltBackendConfig{
DBPath: dbPath,
DBFileName: dbFileName,
NoFreelistSync: true,
AutoCompact: boltCfg.AutoCompact,
AutoCompactMinAge: boltCfg.AutoCompactMinAge,
}
// Use default path for log database
if dbPath == "" {
dbPath = defaultDbDirectory
cfg.DBPath = defaultDbDirectory
}
return &DecayedLog{
dbPath: dbPath,
cfg: cfg,
notifier: notifier,
quit: make(chan struct{}),
}
@ -89,9 +97,7 @@ func (d *DecayedLog) Start() error {
// Open the boltdb for use.
var err error
d.db, err = kvdb.Create(
kvdb.BoltBackendName, d.dbPath, true,
)
d.db, err = kvdb.GetBoltBackend(d.cfg)
if err != nil {
return fmt.Errorf("could not open boltdb: %v", err)
}

View File

@ -4,12 +4,12 @@ import (
"crypto/rand"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntest/mock"
)
@ -19,17 +19,17 @@ const (
// tempDecayedLogPath creates a new temporary database path to back a single
// deccayed log instance.
func tempDecayedLogPath(t *testing.T) string {
func tempDecayedLogPath(t *testing.T) (string, string) {
dir, err := ioutil.TempDir("", "decayedlog")
if err != nil {
t.Fatalf("unable to create temporary decayed log dir: %v", err)
}
return filepath.Join(dir, "sphinxreplay.db")
return dir, "sphinxreplay.db"
}
// startup sets up the DecayedLog and possibly the garbage collector.
func startup(dbPath string, notifier bool) (sphinx.ReplayLog,
func startup(dbPath, dbFileName string, notifier bool) (sphinx.ReplayLog,
*mock.ChainNotifier, *sphinx.HashPrefix, error) {
var log sphinx.ReplayLog
@ -44,10 +44,12 @@ func startup(dbPath string, notifier bool) (sphinx.ReplayLog,
}
// Initialize the DecayedLog object
log = NewDecayedLog(dbPath, chainNotifier)
log = NewDecayedLog(
dbPath, dbFileName, &kvdb.BoltConfig{}, chainNotifier,
)
} else {
// Initialize the DecayedLog object
log = NewDecayedLog(dbPath, nil)
log = NewDecayedLog(dbPath, dbFileName, &kvdb.BoltConfig{}, nil)
}
// Open the channeldb (start the garbage collector)
@ -81,9 +83,9 @@ func shutdown(dir string, d sphinx.ReplayLog) {
func TestDecayedLogGarbageCollector(t *testing.T) {
t.Parallel()
dbPath := tempDecayedLogPath(t)
dbPath, dbFileName := tempDecayedLogPath(t)
d, notifier, hashedSecret, err := startup(dbPath, true)
d, notifier, hashedSecret, err := startup(dbPath, dbFileName, true)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
@ -142,9 +144,9 @@ func TestDecayedLogGarbageCollector(t *testing.T) {
func TestDecayedLogPersistentGarbageCollector(t *testing.T) {
t.Parallel()
dbPath := tempDecayedLogPath(t)
dbPath, dbFileName := tempDecayedLogPath(t)
d, _, hashedSecret, err := startup(dbPath, true)
d, _, hashedSecret, err := startup(dbPath, dbFileName, true)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
@ -164,7 +166,7 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) {
// Shut down DecayedLog and the garbage collector along with it.
d.Stop()
d2, notifier2, _, err := startup(dbPath, true)
d2, notifier2, _, err := startup(dbPath, dbFileName, true)
if err != nil {
t.Fatalf("Unable to restart DecayedLog: %v", err)
}
@ -198,9 +200,9 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) {
func TestDecayedLogInsertionAndDeletion(t *testing.T) {
t.Parallel()
dbPath := tempDecayedLogPath(t)
dbPath, dbFileName := tempDecayedLogPath(t)
d, _, hashedSecret, err := startup(dbPath, false)
d, _, hashedSecret, err := startup(dbPath, dbFileName, false)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
@ -236,9 +238,9 @@ func TestDecayedLogInsertionAndDeletion(t *testing.T) {
func TestDecayedLogStartAndStop(t *testing.T) {
t.Parallel()
dbPath := tempDecayedLogPath(t)
dbPath, dbFileName := tempDecayedLogPath(t)
d, _, hashedSecret, err := startup(dbPath, false)
d, _, hashedSecret, err := startup(dbPath, dbFileName, false)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
@ -253,7 +255,7 @@ func TestDecayedLogStartAndStop(t *testing.T) {
// Shutdown the DecayedLog's channeldb
d.Stop()
d2, _, hashedSecret2, err := startup(dbPath, false)
d2, _, hashedSecret2, err := startup(dbPath, dbFileName, false)
if err != nil {
t.Fatalf("Unable to restart DecayedLog: %v", err)
}
@ -280,7 +282,7 @@ func TestDecayedLogStartAndStop(t *testing.T) {
// Shutdown the DecayedLog's channeldb
d2.Stop()
d3, _, hashedSecret3, err := startup(dbPath, false)
d3, _, hashedSecret3, err := startup(dbPath, dbFileName, false)
if err != nil {
t.Fatalf("Unable to restart DecayedLog: %v", err)
}
@ -302,9 +304,9 @@ func TestDecayedLogStartAndStop(t *testing.T) {
func TestDecayedLogStorageAndRetrieval(t *testing.T) {
t.Parallel()
dbPath := tempDecayedLogPath(t)
dbPath, dbFileName := tempDecayedLogPath(t)
d, _, hashedSecret, err := startup(dbPath, false)
d, _, hashedSecret, err := startup(dbPath, dbFileName, false)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}

View File

@ -10,7 +10,6 @@ import (
"math/big"
prand "math/rand"
"net"
"path/filepath"
"regexp"
"strconv"
"sync"
@ -371,10 +370,10 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
// Initialize the sphinx router, placing it's persistent replay log in
// the same directory as the channel graph database. We don't need to
// replicate this data, so we'll store it locally.
sharedSecretPath := filepath.Join(
cfg.localDatabaseDir(), defaultSphinxDbName,
replayLog := htlcswitch.NewDecayedLog(
cfg.localDatabaseDir(), defaultSphinxDbName, cfg.DB.Bolt,
cc.ChainNotifier,
)
replayLog := htlcswitch.NewDecayedLog(sharedSecretPath, cc.ChainNotifier)
sphinxRouter := sphinx.NewRouter(
nodeKeyECDH, cfg.ActiveNetParams.Params, replayLog,
)