Merge pull request #4667 from guggero/auto-compact

channeldb/kvdb/bolt: automatically compact database on startup
This commit is contained in:
Olaoluwa Osuntokun 2020-11-16 17:50:14 -08:00 committed by GitHub
commit e4f0f2aa69
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 659 additions and 94 deletions

@ -129,12 +129,6 @@ jobs:
cross-compile:
name: cross compilation
runs-on: ubuntu-latest
strategy:
matrix:
build_sys:
- windows-amd64
- freebsd-amd64
- solaris-amd64
steps:
- name: git checkout
uses: actions/checkout@v2
@ -155,8 +149,8 @@ jobs:
with:
go-version: '~${{ env.GO_VERSION }}'
- name: build release for architecture
run: make release sys=${{ matrix.build_sys }}
- name: build release for all architectures
run: make release
########################
# mobile compilation

@ -238,7 +238,13 @@ func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) {
modifier(&opts)
}
backend, err := kvdb.GetBoltBackend(dbPath, dbName, opts.NoFreelistSync)
backend, err := kvdb.GetBoltBackend(&kvdb.BoltBackendConfig{
DBPath: dbPath,
DBFileName: dbName,
NoFreelistSync: opts.NoFreelistSync,
AutoCompact: opts.AutoCompact,
AutoCompactMinAge: opts.AutoCompactMinAge,
})
if err != nil {
return nil, err
}

@ -1,13 +1,32 @@
package kvdb
import (
"encoding/binary"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
_ "github.com/btcsuite/btcwallet/walletdb/bdb" // Import to register backend.
)
const (
// DefaultTempDBFileName is the default name of the temporary bolt DB
// file that we'll use to atomically compact the primary DB file on
// startup.
DefaultTempDBFileName = "temp-dont-use.db"
// LastCompactionFileNameSuffix is the suffix we append to the file name
// of a database file to record the timestamp when the last compaction
// occurred.
LastCompactionFileNameSuffix = ".last-compacted"
)
var (
byteOrder = binary.BigEndian
)
// fileExists returns true if the file exists, and false otherwise.
func fileExists(path string) bool {
if _, err := os.Stat(path); err != nil {
@ -19,32 +38,187 @@ func fileExists(path string) bool {
return true
}
// GetBoltBackend opens (or creates if doesn't exits) a bbolt
// backed database and returns a kvdb.Backend wrapping it.
func GetBoltBackend(path, name string, noFreeListSync bool) (Backend, error) {
dbFilePath := filepath.Join(path, name)
var (
db Backend
err error
)
// BoltBackendConfig is a struct that holds settings specific to the bolt
// database backend.
type BoltBackendConfig struct {
// DBPath is the directory path in which the database file should be
// stored.
DBPath string
// DBFileName is the name of the database file.
DBFileName string
// NoFreelistSync, if true, prevents the database from syncing its
// freelist to disk, resulting in improved performance at the expense of
// increased startup time.
NoFreelistSync bool
// AutoCompact specifies if a Bolt based database backend should be
// automatically compacted on startup (if the minimum age of the
// database file is reached). This will require additional disk space
// for the compacted copy of the database but will result in an overall
// lower database size after the compaction.
AutoCompact bool
// AutoCompactMinAge specifies the minimum time that must have passed
// since a bolt database file was last compacted for the compaction to
// be considered again.
AutoCompactMinAge time.Duration
}
// GetBoltBackend opens (or creates if doesn't exits) a bbolt backed database
// and returns a kvdb.Backend wrapping it.
func GetBoltBackend(cfg *BoltBackendConfig) (Backend, error) {
dbFilePath := filepath.Join(cfg.DBPath, cfg.DBFileName)
// Is this a new database?
if !fileExists(dbFilePath) {
if !fileExists(path) {
if err := os.MkdirAll(path, 0700); err != nil {
if !fileExists(cfg.DBPath) {
if err := os.MkdirAll(cfg.DBPath, 0700); err != nil {
return nil, err
}
}
db, err = Create(BoltBackendName, dbFilePath, noFreeListSync)
} else {
db, err = Open(BoltBackendName, dbFilePath, noFreeListSync)
return Create(BoltBackendName, dbFilePath, cfg.NoFreelistSync)
}
// This is an existing database. We might want to compact it on startup
// to free up some space.
if cfg.AutoCompact {
if err := compactAndSwap(cfg); err != nil {
return nil, err
}
}
return Open(BoltBackendName, dbFilePath, cfg.NoFreelistSync)
}
// compactAndSwap will attempt to write a new temporary DB file to disk with
// the compacted database content, then atomically swap (via rename) the old
// file for the new file by updating the name of the new file to the old.
func compactAndSwap(cfg *BoltBackendConfig) error {
sourceName := cfg.DBFileName
// If the main DB file isn't set, then we can't proceed.
if sourceName == "" {
return fmt.Errorf("cannot compact DB with empty name")
}
sourceFilePath := filepath.Join(cfg.DBPath, sourceName)
tempDestFilePath := filepath.Join(cfg.DBPath, DefaultTempDBFileName)
// Let's find out how long ago the last compaction of the source file
// occurred and possibly skip compacting it again now.
lastCompactionDate, err := lastCompactionDate(sourceFilePath)
if err != nil {
return nil, err
return fmt.Errorf("cannot determine last compaction date of "+
"source DB file: %v", err)
}
compactAge := time.Since(lastCompactionDate)
if cfg.AutoCompactMinAge != 0 && compactAge <= cfg.AutoCompactMinAge {
log.Infof("Not compacting database file at %v, it was last "+
"compacted at %v (%v ago), min age is set to %v",
sourceFilePath, lastCompactionDate,
compactAge.Truncate(time.Second), cfg.AutoCompactMinAge)
return nil
}
return db, nil
log.Infof("Compacting database file at %v", sourceFilePath)
// If the old temporary DB file still exists, then we'll delete it
// before proceeding.
if _, err := os.Stat(tempDestFilePath); err == nil {
log.Infof("Found old temp DB @ %v, removing before swap",
tempDestFilePath)
err = os.Remove(tempDestFilePath)
if err != nil {
return fmt.Errorf("unable to remove old temp DB file: "+
"%v", err)
}
}
// Now that we know the staging area is clear, we'll create the new
// temporary DB file and close it before we write the new DB to it.
tempFile, err := os.Create(tempDestFilePath)
if err != nil {
return fmt.Errorf("unable to create temp DB file: %v", err)
}
if err := tempFile.Close(); err != nil {
return fmt.Errorf("unable to close file: %v", err)
}
// With the file created, we'll start the compaction and remove the
// temporary file all together once this method exits.
defer func() {
// This will only succeed if the rename below fails. If the
// compaction is successful, the file won't exist on exit
// anymore so no need to log an error here.
_ = os.Remove(tempDestFilePath)
}()
c := &compacter{
srcPath: sourceFilePath,
dstPath: tempDestFilePath,
}
initialSize, newSize, err := c.execute()
if err != nil {
return fmt.Errorf("error during compact: %v", err)
}
log.Infof("DB compaction of %v successful, %d -> %d bytes (gain=%.2fx)",
sourceFilePath, initialSize, newSize,
float64(initialSize)/float64(newSize))
// We try to store the current timestamp in a file with the suffix
// .last-compacted so we can figure out how long ago the last compaction
// was. But since this shouldn't fail the compaction process itself, we
// only log the error. Worst case if this file cannot be written is that
// we compact on every startup.
err = updateLastCompactionDate(sourceFilePath)
if err != nil {
log.Warnf("Could not update last compaction timestamp in "+
"%s%s: %v", sourceFilePath,
LastCompactionFileNameSuffix, err)
}
log.Infof("Swapping old DB file from %v to %v", tempDestFilePath,
sourceFilePath)
// Finally, we'll attempt to atomically rename the temporary file to
// the main back up file. If this succeeds, then we'll only have a
// single file on disk once this method exits.
return os.Rename(tempDestFilePath, sourceFilePath)
}
// lastCompactionDate returns the date the given database file was last
// compacted or a zero time.Time if no compaction was recorded before. The
// compaction date is read from a file in the same directory and with the same
// name as the DB file, but with the suffix ".last-compacted".
func lastCompactionDate(dbFile string) (time.Time, error) {
zeroTime := time.Unix(0, 0)
tsFile := fmt.Sprintf("%s%s", dbFile, LastCompactionFileNameSuffix)
if !fileExists(tsFile) {
return zeroTime, nil
}
tsBytes, err := ioutil.ReadFile(tsFile)
if err != nil {
return zeroTime, err
}
tsNano := byteOrder.Uint64(tsBytes)
return time.Unix(0, int64(tsNano)), nil
}
// updateLastCompactionDate stores the current time as a timestamp in a file
// in the same directory and with the same name as the DB file, but with the
// suffix ".last-compacted".
func updateLastCompactionDate(dbFile string) error {
var tsBytes [8]byte
byteOrder.PutUint64(tsBytes[:], uint64(time.Now().UnixNano()))
tsFile := fmt.Sprintf("%s%s", dbFile, LastCompactionFileNameSuffix)
return ioutil.WriteFile(tsFile, tsBytes[:], 0600)
}
// GetTestBackend opens (or creates if doesn't exist) a bbolt or etcd
@ -57,7 +231,11 @@ func GetTestBackend(path, name string) (Backend, func(), error) {
empty := func() {}
if TestBackend == BoltBackendName {
db, err := GetBoltBackend(path, name, true)
db, err := GetBoltBackend(&BoltBackendConfig{
DBPath: path,
DBFileName: name,
NoFreelistSync: true,
})
if err != nil {
return nil, nil, err
}

@ -0,0 +1,246 @@
// The code in this file is an adapted version of the bbolt compact command
// implemented in this file:
// https://github.com/etcd-io/bbolt/blob/master/cmd/bbolt/main.go
package kvdb
import (
"fmt"
"os"
"path"
"github.com/lightningnetwork/lnd/healthcheck"
"go.etcd.io/bbolt"
)
const (
// defaultResultFileSizeMultiplier is the default multiplier we apply to
// the current database size to calculate how big it could possibly get
// after compacting, in case the database is already at its optimal size
// and compaction causes it to grow. This should normally not be the
// case but we really want to avoid not having enough disk space for the
// compaction, so we apply a safety margin of 10%.
defaultResultFileSizeMultiplier = float64(1.1)
// defaultTxMaxSize is the default maximum number of operations that
// are allowed to be executed in a single transaction.
defaultTxMaxSize = 65536
// bucketFillSize is the fill size setting that is used for each new
// bucket that is created in the compacted database. This setting is not
// persisted and is therefore only effective for the compaction itself.
// Because during the compaction we only append data a fill percent of
// 100% is optimal for performance.
bucketFillSize = 1.0
)
type compacter struct {
srcPath string
dstPath string
txMaxSize int64
}
// execute opens the source and destination databases and then compacts the
// source into destination and returns the size of both files as a result.
func (cmd *compacter) execute() (int64, int64, error) {
if cmd.txMaxSize == 0 {
cmd.txMaxSize = defaultTxMaxSize
}
// Ensure source file exists.
fi, err := os.Stat(cmd.srcPath)
if err != nil {
return 0, 0, fmt.Errorf("error determining source database "+
"size: %v", err)
}
initialSize := fi.Size()
marginSize := float64(initialSize) * defaultResultFileSizeMultiplier
// Before opening any of the databases, let's first make sure we have
// enough free space on the destination file system to create a full
// copy of the source DB (worst-case scenario if the compaction doesn't
// actually shrink the file size).
destFolder := path.Dir(cmd.dstPath)
freeSpace, err := healthcheck.AvailableDiskSpace(destFolder)
if err != nil {
return 0, 0, fmt.Errorf("error determining free disk space on "+
"%s: %v", destFolder, err)
}
log.Debugf("Free disk space on compaction destination file system: "+
"%d bytes", freeSpace)
if freeSpace < uint64(marginSize) {
return 0, 0, fmt.Errorf("could not start compaction, "+
"destination folder %s only has %d bytes of free disk "+
"space available while we need at least %d for worst-"+
"case compaction", destFolder, freeSpace, initialSize)
}
// Open source database. We open it in read only mode to avoid (and fix)
// possible freelist sync problems.
src, err := bbolt.Open(cmd.srcPath, 0444, &bbolt.Options{
ReadOnly: true,
})
if err != nil {
return 0, 0, fmt.Errorf("error opening source database: %v",
err)
}
defer func() {
if err := src.Close(); err != nil {
log.Errorf("Compact error: closing source DB: %v", err)
}
}()
// Open destination database.
dst, err := bbolt.Open(cmd.dstPath, fi.Mode(), nil)
if err != nil {
return 0, 0, fmt.Errorf("error opening destination database: "+
"%v", err)
}
defer func() {
if err := dst.Close(); err != nil {
log.Errorf("Compact error: closing dest DB: %v", err)
}
}()
// Run compaction.
if err := cmd.compact(dst, src); err != nil {
return 0, 0, fmt.Errorf("error running compaction: %v", err)
}
// Report stats on new size.
fi, err = os.Stat(cmd.dstPath)
if err != nil {
return 0, 0, fmt.Errorf("error determining destination "+
"database size: %v", err)
} else if fi.Size() == 0 {
return 0, 0, fmt.Errorf("zero db size")
}
return initialSize, fi.Size(), nil
}
// compact tries to create a compacted copy of the source database in a new
// destination database.
func (cmd *compacter) compact(dst, src *bbolt.DB) error {
// Commit regularly, or we'll run out of memory for large datasets if
// using one transaction.
var size int64
tx, err := dst.Begin(true)
if err != nil {
return err
}
defer func() {
_ = tx.Rollback()
}()
if err := cmd.walk(src, func(keys [][]byte, k, v []byte, seq uint64) error {
// On each key/value, check if we have exceeded tx size.
sz := int64(len(k) + len(v))
if size+sz > cmd.txMaxSize && cmd.txMaxSize != 0 {
// Commit previous transaction.
if err := tx.Commit(); err != nil {
return err
}
// Start new transaction.
tx, err = dst.Begin(true)
if err != nil {
return err
}
size = 0
}
size += sz
// Create bucket on the root transaction if this is the first
// level.
nk := len(keys)
if nk == 0 {
bkt, err := tx.CreateBucket(k)
if err != nil {
return err
}
if err := bkt.SetSequence(seq); err != nil {
return err
}
return nil
}
// Create buckets on subsequent levels, if necessary.
b := tx.Bucket(keys[0])
if nk > 1 {
for _, k := range keys[1:] {
b = b.Bucket(k)
}
}
// Fill the entire page for best compaction.
b.FillPercent = bucketFillSize
// If there is no value then this is a bucket call.
if v == nil {
bkt, err := b.CreateBucket(k)
if err != nil {
return err
}
if err := bkt.SetSequence(seq); err != nil {
return err
}
return nil
}
// Otherwise treat it as a key/value pair.
return b.Put(k, v)
}); err != nil {
return err
}
return tx.Commit()
}
// walkFunc is the type of the function called for keys (buckets and "normal"
// values) discovered by Walk. keys is the list of keys to descend to the bucket
// owning the discovered key/value pair k/v.
type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error
// walk walks recursively the bolt database db, calling walkFn for each key it
// finds.
func (cmd *compacter) walk(db *bbolt.DB, walkFn walkFunc) error {
return db.View(func(tx *bbolt.Tx) error {
return tx.ForEach(func(name []byte, b *bbolt.Bucket) error {
// This will log the top level buckets only to give the
// user some sense of progress.
log.Debugf("Compacting top level bucket %s", name)
return cmd.walkBucket(
b, nil, name, nil, b.Sequence(), walkFn,
)
})
})
}
// walkBucket recursively walks through a bucket.
func (cmd *compacter) walkBucket(b *bbolt.Bucket, keyPath [][]byte, k, v []byte,
seq uint64, fn walkFunc) error {
// Execute callback.
if err := fn(keyPath, k, v, seq); err != nil {
return err
}
// If this is not a bucket then stop.
if v != nil {
return nil
}
// Iterate over each child key/value.
keyPath = append(keyPath, k)
return b.ForEach(func(k, v []byte) error {
if v == nil {
bkt := b.Bucket(k)
return cmd.walkBucket(
bkt, keyPath, k, nil, bkt.Sequence(), fn,
)
}
return cmd.walkBucket(b, keyPath, k, v, b.Sequence(), fn)
})
}

@ -1,18 +1,31 @@
package kvdb
// BoltBackendName is the name of the backend that should be passed into
// kvdb.Create to initialize a new instance of kvdb.Backend backed by a live
// instance of bbolt.
const BoltBackendName = "bdb"
import "time"
// EtcdBackendName is the name of the backend that should be passed into
// kvdb.Create to initialize a new instance of kvdb.Backend backed by a live
// instance of etcd.
const EtcdBackendName = "etcd"
const (
// BoltBackendName is the name of the backend that should be passed into
// kvdb.Create to initialize a new instance of kvdb.Backend backed by a
// live instance of bbolt.
BoltBackendName = "bdb"
// EtcdBackendName is the name of the backend that should be passed into
// kvdb.Create to initialize a new instance of kvdb.Backend backed by a
// live instance of etcd.
EtcdBackendName = "etcd"
// DefaultBoltAutoCompactMinAge is the default minimum time that must
// have passed since a bolt database file was last compacted for the
// compaction to be considered again.
DefaultBoltAutoCompactMinAge = time.Hour * 24 * 7
)
// BoltConfig holds bolt configuration.
type BoltConfig struct {
SyncFreelist bool `long:"nofreelistsync" description:"Whether the databases used within lnd should sync their freelist to disk. This is disabled by default resulting in improved memory performance during operation, but with an increase in startup time."`
AutoCompact bool `long:"auto-compact" description:"Whether the databases used within lnd should automatically be compacted on every startup (and if the database has the configured minimum age). This is disabled by default because it requires additional disk space to be available during the compaction that is freed afterwards. In general compaction leads to smaller database files."`
AutoCompactMinAge time.Duration `long:"auto-compact-min-age" description:"How long ago the last compaction of a database file must be for it to be considered for auto compaction again. Can be set to 0 to compact on every startup."`
}
// EtcdConfig holds etcd configuration.

12
channeldb/kvdb/log.go Normal file

@ -0,0 +1,12 @@
package kvdb
import "github.com/btcsuite/btclog"
// log is a logger that is initialized as disabled. This means the package will
// not perform any logging by default until a logger is set.
var log = btclog.Disabled
// UseLogger uses a specified Logger to output package logging info.
func UseLogger(logger btclog.Logger) {
log = logger
}

@ -3,6 +3,7 @@ package channeldb
import (
"github.com/btcsuite/btclog"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
mig "github.com/lightningnetwork/lnd/channeldb/migration"
"github.com/lightningnetwork/lnd/channeldb/migration12"
"github.com/lightningnetwork/lnd/channeldb/migration13"
@ -35,4 +36,5 @@ func UseLogger(logger btclog.Logger) {
migration12.UseLogger(logger)
migration13.UseLogger(logger)
migration16.UseLogger(logger)
kvdb.UseLogger(logger)
}

@ -1,6 +1,11 @@
package channeldb
import "github.com/lightningnetwork/lnd/clock"
import (
"time"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/clock"
)
const (
// DefaultRejectCacheSize is the default number of rejectCacheEntries to
@ -16,6 +21,8 @@ const (
// Options holds parameters for tuning and customizing a channeldb.DB.
type Options struct {
kvdb.BoltBackendConfig
// RejectCacheSize is the maximum number of rejectCacheEntries to hold
// in the rejection cache.
RejectCacheSize int
@ -24,11 +31,6 @@ type Options struct {
// channel cache.
ChannelCacheSize int
// NoFreelistSync, if true, prevents the database from syncing its
// freelist to disk, resulting in improved performance at the expense of
// increased startup time.
NoFreelistSync bool
// clock is the time source used by the database.
clock clock.Clock
@ -40,9 +42,13 @@ type Options struct {
// DefaultOptions returns an Options populated with default values.
func DefaultOptions() Options {
return Options{
BoltBackendConfig: kvdb.BoltBackendConfig{
NoFreelistSync: true,
AutoCompact: false,
AutoCompactMinAge: kvdb.DefaultBoltAutoCompactMinAge,
},
RejectCacheSize: DefaultRejectCacheSize,
ChannelCacheSize: DefaultChannelCacheSize,
NoFreelistSync: true,
clock: clock.NewDefaultClock(),
}
}
@ -71,6 +77,21 @@ func OptionSetSyncFreelist(b bool) OptionModifier {
}
}
// OptionAutoCompact turns on automatic database compaction on startup.
func OptionAutoCompact() OptionModifier {
return func(o *Options) {
o.AutoCompact = true
}
}
// OptionAutoCompactMinAge sets the minimum age for automatic database
// compaction.
func OptionAutoCompactMinAge(minAge time.Duration) OptionModifier {
return func(o *Options) {
o.AutoCompactMinAge = minAge
}
}
// OptionClock sets a non-default clock dependency.
func OptionClock(clock clock.Clock) OptionModifier {
return func(o *Options) {

1
go.mod

@ -63,6 +63,7 @@ require (
github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02
github.com/urfave/cli v1.18.0
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect
go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50
go.uber.org/zap v1.14.1 // indirect
golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899
golang.org/x/net v0.0.0-20191002035440-2ec189313ef0

@ -4,8 +4,9 @@ package healthcheck
import "syscall"
// AvailableDiskSpace returns ratio of available disk space to total capacity.
func AvailableDiskSpace(path string) (float64, error) {
// AvailableDiskSpaceRatio returns ratio of available disk space to total
// capacity.
func AvailableDiskSpaceRatio(path string) (float64, error) {
s := syscall.Statfs_t{}
err := syscall.Statfs(path, &s)
if err != nil {
@ -16,3 +17,17 @@ func AvailableDiskSpace(path string) (float64, error) {
// free blocks.
return float64(s.Bfree) / float64(s.Blocks), nil
}
// AvailableDiskSpace returns the available disk space in bytes of the given
// file system.
func AvailableDiskSpace(path string) (uint64, error) {
s := syscall.Statfs_t{}
err := syscall.Statfs(path, &s)
if err != nil {
return 0, err
}
// Some OSes have s.Bavail defined as int64, others as uint64, so we
// need the explicit type conversion here.
return uint64(s.Bavail) * uint64(s.Bsize), nil // nolint:unconvert
}

@ -2,9 +2,9 @@ package healthcheck
import "golang.org/x/sys/unix"
// AvailableDiskSpace returns ratio of available disk space to total capacity
// for solaris.
func AvailableDiskSpace(path string) (float64, error) {
// AvailableDiskSpaceRatio returns ratio of available disk space to total
// capacity for netbsd.
func AvailableDiskSpaceRatio(path string) (float64, error) {
s := unix.Statvfs_t{}
err := unix.Statvfs(path, &s)
if err != nil {
@ -15,3 +15,15 @@ func AvailableDiskSpace(path string) (float64, error) {
// free blocks.
return float64(s.Bfree) / float64(s.Blocks), nil
}
// AvailableDiskSpace returns the available disk space in bytes of the given
// file system for netbsd.
func AvailableDiskSpace(path string) (uint64, error) {
s := unix.Statvfs_t{}
err := unix.Statvfs(path, &s)
if err != nil {
return 0, err
}
return s.Bavail * uint64(s.Bsize), nil
}

@ -2,9 +2,9 @@ package healthcheck
import "golang.org/x/sys/unix"
// AvailableDiskSpace returns ratio of available disk space to total capacity
// for solaris.
func AvailableDiskSpace(path string) (float64, error) {
// AvailableDiskSpaceRatio returns ratio of available disk space to total
// capacity for openbsd.
func AvailableDiskSpaceRatio(path string) (float64, error) {
s := unix.Statfs_t{}
err := unix.Statfs(path, &s)
if err != nil {
@ -15,3 +15,15 @@ func AvailableDiskSpace(path string) (float64, error) {
// free blocks.
return float64(s.F_bfree) / float64(s.F_blocks), nil
}
// AvailableDiskSpace returns the available disk space in bytes of the given
// file system for openbsd.
func AvailableDiskSpace(path string) (uint64, error) {
s := unix.Statfs_t{}
err := unix.Statfs(path, &s)
if err != nil {
return 0, err
}
return uint64(s.F_bavail) * uint64(s.F_bsize), nil
}

@ -2,9 +2,9 @@ package healthcheck
import "golang.org/x/sys/unix"
// AvailableDiskSpace returns ratio of available disk space to total capacity
// for solaris.
func AvailableDiskSpace(path string) (float64, error) {
// AvailableDiskSpaceRatio returns ratio of available disk space to total
// capacity for solaris.
func AvailableDiskSpaceRatio(path string) (float64, error) {
s := unix.Statvfs_t{}
err := unix.Statvfs(path, &s)
if err != nil {
@ -15,3 +15,15 @@ func AvailableDiskSpace(path string) (float64, error) {
// free blocks.
return float64(s.Bfree) / float64(s.Blocks), nil
}
// AvailableDiskSpace returns the available disk space in bytes of the given
// file system for solaris.
func AvailableDiskSpace(path string) (uint64, error) {
s := unix.Statvfs_t{}
err := unix.Statvfs(path, &s)
if err != nil {
return 0, err
}
return s.Bavail * uint64(s.Bsize), nil
}

@ -2,16 +2,30 @@ package healthcheck
import "golang.org/x/sys/windows"
// AvailableDiskSpace returns ratio of available disk space to total capacity
// for windows.
func AvailableDiskSpace(path string) (float64, error) {
// AvailableDiskSpaceRatio returns ratio of available disk space to total
// capacity for windows.
func AvailableDiskSpaceRatio(path string) (float64, error) {
var free, total, avail uint64
pathPtr, err := windows.UTF16PtrFromString(path)
if err != nil {
panic(err)
return 0, err
}
err = windows.GetDiskFreeSpaceEx(pathPtr, &free, &total, &avail)
return float64(avail) / float64(total), nil
}
// AvailableDiskSpace returns the available disk space in bytes of the given
// file system for windows.
func AvailableDiskSpace(path string) (uint64, error) {
var free, total, avail uint64
pathPtr, err := windows.UTF16PtrFromString(path)
if err != nil {
return 0, err
}
err = windows.GetDiskFreeSpaceEx(pathPtr, &free, &total, &avail)
return avail, nil
}

@ -51,7 +51,7 @@ type DecayedLog struct {
started int32 // To be used atomically.
stopped int32 // To be used atomically.
dbPath string
cfg *kvdb.BoltBackendConfig
db kvdb.Backend
@ -64,16 +64,24 @@ type DecayedLog struct {
// NewDecayedLog creates a new DecayedLog, which caches recently seen hash
// shared secrets. Entries are evicted as their cltv expires using block epochs
// from the given notifier.
func NewDecayedLog(dbPath string,
func NewDecayedLog(dbPath, dbFileName string, boltCfg *kvdb.BoltConfig,
notifier chainntnfs.ChainNotifier) *DecayedLog {
cfg := &kvdb.BoltBackendConfig{
DBPath: dbPath,
DBFileName: dbFileName,
NoFreelistSync: true,
AutoCompact: boltCfg.AutoCompact,
AutoCompactMinAge: boltCfg.AutoCompactMinAge,
}
// Use default path for log database
if dbPath == "" {
dbPath = defaultDbDirectory
cfg.DBPath = defaultDbDirectory
}
return &DecayedLog{
dbPath: dbPath,
cfg: cfg,
notifier: notifier,
quit: make(chan struct{}),
}
@ -89,9 +97,7 @@ func (d *DecayedLog) Start() error {
// Open the boltdb for use.
var err error
d.db, err = kvdb.Create(
kvdb.BoltBackendName, d.dbPath, true,
)
d.db, err = kvdb.GetBoltBackend(d.cfg)
if err != nil {
return fmt.Errorf("could not open boltdb: %v", err)
}

@ -4,12 +4,12 @@ import (
"crypto/rand"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntest/mock"
)
@ -19,17 +19,17 @@ const (
// tempDecayedLogPath creates a new temporary database path to back a single
// deccayed log instance.
func tempDecayedLogPath(t *testing.T) string {
func tempDecayedLogPath(t *testing.T) (string, string) {
dir, err := ioutil.TempDir("", "decayedlog")
if err != nil {
t.Fatalf("unable to create temporary decayed log dir: %v", err)
}
return filepath.Join(dir, "sphinxreplay.db")
return dir, "sphinxreplay.db"
}
// startup sets up the DecayedLog and possibly the garbage collector.
func startup(dbPath string, notifier bool) (sphinx.ReplayLog,
func startup(dbPath, dbFileName string, notifier bool) (sphinx.ReplayLog,
*mock.ChainNotifier, *sphinx.HashPrefix, error) {
var log sphinx.ReplayLog
@ -44,10 +44,12 @@ func startup(dbPath string, notifier bool) (sphinx.ReplayLog,
}
// Initialize the DecayedLog object
log = NewDecayedLog(dbPath, chainNotifier)
log = NewDecayedLog(
dbPath, dbFileName, &kvdb.BoltConfig{}, chainNotifier,
)
} else {
// Initialize the DecayedLog object
log = NewDecayedLog(dbPath, nil)
log = NewDecayedLog(dbPath, dbFileName, &kvdb.BoltConfig{}, nil)
}
// Open the channeldb (start the garbage collector)
@ -81,9 +83,9 @@ func shutdown(dir string, d sphinx.ReplayLog) {
func TestDecayedLogGarbageCollector(t *testing.T) {
t.Parallel()
dbPath := tempDecayedLogPath(t)
dbPath, dbFileName := tempDecayedLogPath(t)
d, notifier, hashedSecret, err := startup(dbPath, true)
d, notifier, hashedSecret, err := startup(dbPath, dbFileName, true)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
@ -142,9 +144,9 @@ func TestDecayedLogGarbageCollector(t *testing.T) {
func TestDecayedLogPersistentGarbageCollector(t *testing.T) {
t.Parallel()
dbPath := tempDecayedLogPath(t)
dbPath, dbFileName := tempDecayedLogPath(t)
d, _, hashedSecret, err := startup(dbPath, true)
d, _, hashedSecret, err := startup(dbPath, dbFileName, true)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
@ -164,7 +166,7 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) {
// Shut down DecayedLog and the garbage collector along with it.
d.Stop()
d2, notifier2, _, err := startup(dbPath, true)
d2, notifier2, _, err := startup(dbPath, dbFileName, true)
if err != nil {
t.Fatalf("Unable to restart DecayedLog: %v", err)
}
@ -198,9 +200,9 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) {
func TestDecayedLogInsertionAndDeletion(t *testing.T) {
t.Parallel()
dbPath := tempDecayedLogPath(t)
dbPath, dbFileName := tempDecayedLogPath(t)
d, _, hashedSecret, err := startup(dbPath, false)
d, _, hashedSecret, err := startup(dbPath, dbFileName, false)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
@ -236,9 +238,9 @@ func TestDecayedLogInsertionAndDeletion(t *testing.T) {
func TestDecayedLogStartAndStop(t *testing.T) {
t.Parallel()
dbPath := tempDecayedLogPath(t)
dbPath, dbFileName := tempDecayedLogPath(t)
d, _, hashedSecret, err := startup(dbPath, false)
d, _, hashedSecret, err := startup(dbPath, dbFileName, false)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}
@ -253,7 +255,7 @@ func TestDecayedLogStartAndStop(t *testing.T) {
// Shutdown the DecayedLog's channeldb
d.Stop()
d2, _, hashedSecret2, err := startup(dbPath, false)
d2, _, hashedSecret2, err := startup(dbPath, dbFileName, false)
if err != nil {
t.Fatalf("Unable to restart DecayedLog: %v", err)
}
@ -280,7 +282,7 @@ func TestDecayedLogStartAndStop(t *testing.T) {
// Shutdown the DecayedLog's channeldb
d2.Stop()
d3, _, hashedSecret3, err := startup(dbPath, false)
d3, _, hashedSecret3, err := startup(dbPath, dbFileName, false)
if err != nil {
t.Fatalf("Unable to restart DecayedLog: %v", err)
}
@ -302,9 +304,9 @@ func TestDecayedLogStartAndStop(t *testing.T) {
func TestDecayedLogStorageAndRetrieval(t *testing.T) {
t.Parallel()
dbPath := tempDecayedLogPath(t)
dbPath, dbFileName := tempDecayedLogPath(t)
d, _, hashedSecret, err := startup(dbPath, false)
d, _, hashedSecret, err := startup(dbPath, dbFileName, false)
if err != nil {
t.Fatalf("Unable to start up DecayedLog: %v", err)
}

@ -26,7 +26,9 @@ type DB struct {
func DefaultDB() *DB {
return &DB{
Backend: BoltBackend,
Bolt: &kvdb.BoltConfig{},
Bolt: &kvdb.BoltConfig{
AutoCompactMinAge: kvdb.DefaultBoltAutoCompactMinAge,
},
}
}
@ -81,9 +83,13 @@ func (db *DB) GetBackends(ctx context.Context, dbPath string,
}
}
localDB, err = kvdb.GetBoltBackend(
dbPath, dbName, !db.Bolt.SyncFreelist,
)
localDB, err = kvdb.GetBoltBackend(&kvdb.BoltBackendConfig{
DBPath: dbPath,
DBFileName: dbName,
NoFreelistSync: !db.Bolt.SyncFreelist,
AutoCompact: db.Bolt.AutoCompact,
AutoCompactMinAge: db.Bolt.AutoCompactMinAge,
})
if err != nil {
return nil, err
}

5
lnd.go

@ -1353,8 +1353,9 @@ func initializeDatabases(ctx context.Context,
"minutes...")
if cfg.DB.Backend == lncfg.BoltBackend {
ltndLog.Infof("Opening bbolt database, sync_freelist=%v",
cfg.DB.Bolt.SyncFreelist)
ltndLog.Infof("Opening bbolt database, sync_freelist=%v, "+
"auto_compact=%v", cfg.DB.Bolt.SyncFreelist,
cfg.DB.Bolt.AutoCompact)
}
startOpenTime := time.Now()

@ -1,8 +1,7 @@
VERSION_TAG = $(shell date +%Y%m%d)-01
VERSION_CHECK = @$(call print, "Building master with date version tag")
BUILD_SYSTEM = darwin-386 \
darwin-amd64 \
BUILD_SYSTEM = darwin-amd64 \
dragonfly-amd64 \
freebsd-386 \
freebsd-amd64 \

@ -955,3 +955,15 @@ litecoin.node=ltcd
[bolt]
; If true, prevents the database from syncing its freelist to disk.
; db.bolt.nofreelistsync=1
; Whether the databases used within lnd should automatically be compacted on
; every startup (and if the database has the configured minimum age). This is
; disabled by default because it requires additional disk space to be available
; during the compaction that is freed afterwards. In general compaction leads to
; smaller database files.
; db.bolt.auto-compact=true
; How long ago the last compaction of a database file must be for it to be
; considered for auto compaction again. Can be set to 0 to compact on every
; startup. (default: 168h)
; db.bolt.auto-compact-min-age=0

@ -10,7 +10,6 @@ import (
"math/big"
prand "math/rand"
"net"
"path/filepath"
"regexp"
"strconv"
"strings"
@ -372,10 +371,10 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
// Initialize the sphinx router, placing it's persistent replay log in
// the same directory as the channel graph database. We don't need to
// replicate this data, so we'll store it locally.
sharedSecretPath := filepath.Join(
cfg.localDatabaseDir(), defaultSphinxDbName,
replayLog := htlcswitch.NewDecayedLog(
cfg.localDatabaseDir(), defaultSphinxDbName, cfg.DB.Bolt,
cc.ChainNotifier,
)
replayLog := htlcswitch.NewDecayedLog(sharedSecretPath, cc.ChainNotifier)
sphinxRouter := sphinx.NewRouter(
nodeKeyECDH, cfg.ActiveNetParams.Params, replayLog,
)
@ -1303,7 +1302,9 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
diskCheck := healthcheck.NewObservation(
"disk space",
func() error {
free, err := healthcheck.AvailableDiskSpace(cfg.LndDir)
free, err := healthcheck.AvailableDiskSpaceRatio(
cfg.LndDir,
)
if err != nil {
return err
}