Merge pull request #3833 from Roasbeef/kv-abstraction

channeldb/kvdb: introduce new KV-store database abstraction
This commit is contained in:
Olaoluwa Osuntokun 2020-03-18 20:20:07 -07:00 committed by GitHub
commit 4897b34050
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
78 changed files with 1443 additions and 1201 deletions

@ -10,8 +10,8 @@ import (
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
)
@ -51,7 +51,7 @@ func ChannelGraphFromDatabase(db *channeldb.ChannelGraph) ChannelGraph {
// channeldb.LightningNode. The wrapper method implement the autopilot.Node
// interface.
type dbNode struct {
tx *bbolt.Tx
tx kvdb.ReadTx
node *channeldb.LightningNode
}
@ -84,7 +84,7 @@ func (d dbNode) Addrs() []net.Addr {
//
// NOTE: Part of the autopilot.Node interface.
func (d dbNode) ForEachChannel(cb func(ChannelEdge) error) error {
return d.node.ForEachChannel(d.tx, func(tx *bbolt.Tx,
return d.node.ForEachChannel(d.tx, func(tx kvdb.ReadTx,
ei *channeldb.ChannelEdgeInfo, ep, _ *channeldb.ChannelEdgePolicy) error {
// Skip channels for which no outgoing edge policy is available.
@ -121,7 +121,7 @@ func (d dbNode) ForEachChannel(cb func(ChannelEdge) error) error {
//
// NOTE: Part of the autopilot.ChannelGraph interface.
func (d *databaseChannelGraph) ForEachNode(cb func(Node) error) error {
return d.db.ForEachNode(nil, func(tx *bbolt.Tx, n *channeldb.LightningNode) error {
return d.db.ForEachNode(nil, func(tx kvdb.ReadTx, n *channeldb.LightningNode) error {
// We'll skip over any node that doesn't have any advertised
// addresses. As we won't be able to reach them to actually

@ -13,11 +13,11 @@ import (
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/htlcswitch"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lnwallet"
@ -1237,10 +1237,10 @@ func newRetributionStore(db *channeldb.DB) *retributionStore {
// Add adds a retribution state to the retributionStore, which is then persisted
// to disk.
func (rs *retributionStore) Add(ret *retributionInfo) error {
return rs.db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(rs.db, func(tx kvdb.RwTx) error {
// If this is our first contract breach, the retributionBucket
// won't exist, in which case, we just create a new bucket.
retBucket, err := tx.CreateBucketIfNotExists(retributionBucket)
retBucket, err := tx.CreateTopLevelBucket(retributionBucket)
if err != nil {
return err
}
@ -1264,8 +1264,8 @@ func (rs *retributionStore) Add(ret *retributionInfo) error {
// startup and re-register for confirmation notifications.
func (rs *retributionStore) Finalize(chanPoint *wire.OutPoint,
finalTx *wire.MsgTx) error {
return rs.db.Update(func(tx *bbolt.Tx) error {
justiceBkt, err := tx.CreateBucketIfNotExists(justiceTxnBucket)
return kvdb.Update(rs.db, func(tx kvdb.RwTx) error {
justiceBkt, err := tx.CreateTopLevelBucket(justiceTxnBucket)
if err != nil {
return err
}
@ -1291,8 +1291,8 @@ func (rs *retributionStore) GetFinalizedTxn(
chanPoint *wire.OutPoint) (*wire.MsgTx, error) {
var finalTxBytes []byte
if err := rs.db.View(func(tx *bbolt.Tx) error {
justiceBkt := tx.Bucket(justiceTxnBucket)
if err := kvdb.View(rs.db, func(tx kvdb.ReadTx) error {
justiceBkt := tx.ReadBucket(justiceTxnBucket)
if justiceBkt == nil {
return nil
}
@ -1325,8 +1325,8 @@ func (rs *retributionStore) GetFinalizedTxn(
// that has already been breached.
func (rs *retributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, error) {
var found bool
err := rs.db.View(func(tx *bbolt.Tx) error {
retBucket := tx.Bucket(retributionBucket)
err := kvdb.View(rs.db, func(tx kvdb.ReadTx) error {
retBucket := tx.ReadBucket(retributionBucket)
if retBucket == nil {
return nil
}
@ -1350,8 +1350,8 @@ func (rs *retributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, error) {
// Remove removes a retribution state and finalized justice transaction by
// channel point from the retribution store.
func (rs *retributionStore) Remove(chanPoint *wire.OutPoint) error {
return rs.db.Update(func(tx *bbolt.Tx) error {
retBucket := tx.Bucket(retributionBucket)
return kvdb.Update(rs.db, func(tx kvdb.RwTx) error {
retBucket := tx.ReadWriteBucket(retributionBucket)
// We return an error if the bucket is not already created,
// since normal operation of the breach arbiter should never try
@ -1377,7 +1377,7 @@ func (rs *retributionStore) Remove(chanPoint *wire.OutPoint) error {
// If we have not finalized this channel breach, we can exit
// early.
justiceBkt := tx.Bucket(justiceTxnBucket)
justiceBkt := tx.ReadWriteBucket(justiceTxnBucket)
if justiceBkt == nil {
return nil
}
@ -1389,10 +1389,10 @@ func (rs *retributionStore) Remove(chanPoint *wire.OutPoint) error {
// ForAll iterates through all stored retributions and executes the passed
// callback function on each retribution.
func (rs *retributionStore) ForAll(cb func(*retributionInfo) error) error {
return rs.db.View(func(tx *bbolt.Tx) error {
return kvdb.View(rs.db, func(tx kvdb.ReadTx) error {
// If the bucket does not exist, then there are no pending
// retributions.
retBucket := tx.Bucket(retributionBucket)
retBucket := tx.ReadBucket(retributionBucket)
if retBucket == nil {
return nil
}

@ -4,8 +4,8 @@ import (
"bytes"
"errors"
bolt "github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
var (
@ -95,13 +95,13 @@ func NewHeightHintCache(db *channeldb.DB) (*HeightHintCache, error) {
// initBuckets ensures that the primary buckets used by the circuit are
// initialized so that we can assume their existence after startup.
func (c *HeightHintCache) initBuckets() error {
return c.db.Update(func(tx *bolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(spendHintBucket)
return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error {
_, err := tx.CreateTopLevelBucket(spendHintBucket)
if err != nil {
return err
}
_, err = tx.CreateBucketIfNotExists(confirmHintBucket)
_, err = tx.CreateTopLevelBucket(confirmHintBucket)
return err
})
}
@ -117,8 +117,8 @@ func (c *HeightHintCache) CommitSpendHint(height uint32,
Log.Tracef("Updating spend hint to height %d for %v", height,
spendRequests)
return c.db.Batch(func(tx *bolt.Tx) error {
spendHints := tx.Bucket(spendHintBucket)
return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error {
spendHints := tx.ReadWriteBucket(spendHintBucket)
if spendHints == nil {
return ErrCorruptedHeightHintCache
}
@ -148,8 +148,8 @@ func (c *HeightHintCache) CommitSpendHint(height uint32,
// cache for the outpoint.
func (c *HeightHintCache) QuerySpendHint(spendRequest SpendRequest) (uint32, error) {
var hint uint32
err := c.db.View(func(tx *bolt.Tx) error {
spendHints := tx.Bucket(spendHintBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
spendHints := tx.ReadBucket(spendHintBucket)
if spendHints == nil {
return ErrCorruptedHeightHintCache
}
@ -180,8 +180,8 @@ func (c *HeightHintCache) PurgeSpendHint(spendRequests ...SpendRequest) error {
Log.Tracef("Removing spend hints for %v", spendRequests)
return c.db.Batch(func(tx *bolt.Tx) error {
spendHints := tx.Bucket(spendHintBucket)
return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error {
spendHints := tx.ReadWriteBucket(spendHintBucket)
if spendHints == nil {
return ErrCorruptedHeightHintCache
}
@ -211,8 +211,8 @@ func (c *HeightHintCache) CommitConfirmHint(height uint32,
Log.Tracef("Updating confirm hints to height %d for %v", height,
confRequests)
return c.db.Batch(func(tx *bolt.Tx) error {
confirmHints := tx.Bucket(confirmHintBucket)
return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error {
confirmHints := tx.ReadWriteBucket(confirmHintBucket)
if confirmHints == nil {
return ErrCorruptedHeightHintCache
}
@ -242,8 +242,8 @@ func (c *HeightHintCache) CommitConfirmHint(height uint32,
// the cache for the transaction hash.
func (c *HeightHintCache) QueryConfirmHint(confRequest ConfRequest) (uint32, error) {
var hint uint32
err := c.db.View(func(tx *bolt.Tx) error {
confirmHints := tx.Bucket(confirmHintBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
confirmHints := tx.ReadBucket(confirmHintBucket)
if confirmHints == nil {
return ErrCorruptedHeightHintCache
}
@ -275,8 +275,8 @@ func (c *HeightHintCache) PurgeConfirmHint(confRequests ...ConfRequest) error {
Log.Tracef("Removing confirm hints for %v", confRequests)
return c.db.Batch(func(tx *bolt.Tx) error {
confirmHints := tx.Bucket(confirmHintBucket)
return kvdb.Batch(c.db.Backend, func(tx kvdb.RwTx) error {
confirmHints := tx.ReadWriteBucket(confirmHintBucket)
if confirmHints == nil {
return ErrCorruptedHeightHintCache
}

@ -15,7 +15,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire"
@ -692,7 +692,7 @@ func (c *OpenChannel) RefreshShortChanID() error {
c.Lock()
defer c.Unlock()
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -718,27 +718,30 @@ func (c *OpenChannel) RefreshShortChanID() error {
// fetchChanBucket is a helper function that returns the bucket where a
// channel's data resides in given: the public key for the node, the outpoint,
// and the chainhash that the channel resides on.
func fetchChanBucket(tx *bbolt.Tx, nodeKey *btcec.PublicKey,
outPoint *wire.OutPoint, chainHash chainhash.Hash) (*bbolt.Bucket, error) {
func fetchChanBucket(tx kvdb.ReadTx, nodeKey *btcec.PublicKey,
outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.ReadBucket, error) {
// First fetch the top level bucket which stores all data related to
// current, active channels.
openChanBucket := tx.Bucket(openChannelBucket)
openChanBucket := tx.ReadBucket(openChannelBucket)
if openChanBucket == nil {
return nil, ErrNoChanDBExists
}
// TODO(roasbeef): CreateTopLevelBucket on the interface isn't like
// CreateIfNotExists, will return error
// Within this top level bucket, fetch the bucket dedicated to storing
// open channel data specific to the remote node.
nodePub := nodeKey.SerializeCompressed()
nodeChanBucket := openChanBucket.Bucket(nodePub)
nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
if nodeChanBucket == nil {
return nil, ErrNoActiveChannels
}
// We'll then recurse down an additional layer in order to fetch the
// bucket for this particular chain.
chainBucket := nodeChanBucket.Bucket(chainHash[:])
chainBucket := nodeChanBucket.NestedReadBucket(chainHash[:])
if chainBucket == nil {
return nil, ErrNoActiveChannels
}
@ -749,7 +752,7 @@ func fetchChanBucket(tx *bbolt.Tx, nodeKey *btcec.PublicKey,
if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
return nil, err
}
chanBucket := chainBucket.Bucket(chanPointBuf.Bytes())
chanBucket := chainBucket.NestedReadBucket(chanPointBuf.Bytes())
if chanBucket == nil {
return nil, ErrChannelNotFound
}
@ -757,12 +760,27 @@ func fetchChanBucket(tx *bbolt.Tx, nodeKey *btcec.PublicKey,
return chanBucket, nil
}
// fetchChanBucketRw is a helper function that returns the bucket where a
// channel's data resides in given: the public key for the node, the outpoint,
// and the chainhash that the channel resides on. This differs from
// fetchChanBucket in that it returns a writeable bucket.
func fetchChanBucketRw(tx kvdb.RwTx, nodeKey *btcec.PublicKey, // nolint:interfacer
outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RwBucket, error) {
readBucket, err := fetchChanBucket(tx, nodeKey, outPoint, chainHash)
if err != nil {
return nil, err
}
return readBucket.(kvdb.RwBucket), nil
}
// fullSync syncs the contents of an OpenChannel while re-using an existing
// database transaction.
func (c *OpenChannel) fullSync(tx *bbolt.Tx) error {
func (c *OpenChannel) fullSync(tx kvdb.RwTx) error {
// First fetch the top level bucket which stores all data related to
// current, active channels.
openChanBucket, err := tx.CreateBucketIfNotExists(openChannelBucket)
openChanBucket, err := tx.CreateTopLevelBucket(openChannelBucket)
if err != nil {
return err
}
@ -792,7 +810,7 @@ func (c *OpenChannel) fullSync(tx *bbolt.Tx) error {
chanPointBuf.Bytes(),
)
switch {
case err == bbolt.ErrBucketExists:
case err == kvdb.ErrBucketExists:
// If this channel already exists, then in order to avoid
// overriding it, we'll return an error back up to the caller.
return ErrChanAlreadyExists
@ -809,7 +827,7 @@ func (c *OpenChannel) MarkAsOpen(openLoc lnwire.ShortChannelID) error {
c.Lock()
defer c.Unlock()
if err := c.Db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -825,7 +843,7 @@ func (c *OpenChannel) MarkAsOpen(openLoc lnwire.ShortChannelID) error {
channel.IsPending = false
channel.ShortChannelID = openLoc
return putOpenChannel(chanBucket, channel)
return putOpenChannel(chanBucket.(kvdb.RwBucket), channel)
}); err != nil {
return err
}
@ -849,7 +867,7 @@ func (c *OpenChannel) MarkDataLoss(commitPoint *btcec.PublicKey) error {
return err
}
putCommitPoint := func(chanBucket *bbolt.Bucket) error {
putCommitPoint := func(chanBucket kvdb.RwBucket) error {
return chanBucket.Put(dataLossCommitPointKey, b.Bytes())
}
@ -861,7 +879,7 @@ func (c *OpenChannel) MarkDataLoss(commitPoint *btcec.PublicKey) error {
func (c *OpenChannel) DataLossCommitPoint() (*btcec.PublicKey, error) {
var commitPoint *btcec.PublicKey
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -990,7 +1008,7 @@ func (c *OpenChannel) ChanSyncMsg() (*lnwire.ChannelReestablish, error) {
// active.
//
// NOTE: The primary mutex should already be held before this method is called.
func (c *OpenChannel) isBorked(chanBucket *bbolt.Bucket) (bool, error) {
func (c *OpenChannel) isBorked(chanBucket kvdb.ReadBucket) (bool, error) {
channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
if err != nil {
return false, err
@ -1042,14 +1060,14 @@ func (c *OpenChannel) markBroadcasted(status ChannelStatus, key []byte,
// If a closing tx is provided, we'll generate a closure to write the
// transaction in the appropriate bucket under the given key.
var putClosingTx func(*bbolt.Bucket) error
var putClosingTx func(kvdb.RwBucket) error
if closeTx != nil {
var b bytes.Buffer
if err := WriteElement(&b, closeTx); err != nil {
return err
}
putClosingTx = func(chanBucket *bbolt.Bucket) error {
putClosingTx = func(chanBucket kvdb.RwBucket) error {
return chanBucket.Put(key, b.Bytes())
}
}
@ -1083,7 +1101,7 @@ func (c *OpenChannel) BroadcastedCooperative() (*wire.MsgTx, error) {
func (c *OpenChannel) getClosingTx(key []byte) (*wire.MsgTx, error) {
var closeTx *wire.MsgTx
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -1113,10 +1131,10 @@ func (c *OpenChannel) getClosingTx(key []byte) (*wire.MsgTx, error) {
// list of closures that are given the chanBucket in order to atomically add
// extra information together with the new status.
func (c *OpenChannel) putChanStatus(status ChannelStatus,
fs ...func(*bbolt.Bucket) error) error {
fs ...func(kvdb.RwBucket) error) error {
if err := c.Db.Update(func(tx *bbolt.Tx) error {
chanBucket, err := fetchChanBucket(
if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
if err != nil {
@ -1159,8 +1177,8 @@ func (c *OpenChannel) putChanStatus(status ChannelStatus,
}
func (c *OpenChannel) clearChanStatus(status ChannelStatus) error {
if err := c.Db.Update(func(tx *bbolt.Tx) error {
chanBucket, err := fetchChanBucket(
if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
if err != nil {
@ -1189,7 +1207,7 @@ func (c *OpenChannel) clearChanStatus(status ChannelStatus) error {
// putChannel serializes, and stores the current state of the channel in its
// entirety.
func putOpenChannel(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func putOpenChannel(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
// First, we'll write out all the relatively static fields, that are
// decided upon initial channel creation.
if err := putChanInfo(chanBucket, channel); err != nil {
@ -1213,7 +1231,7 @@ func putOpenChannel(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
// fetchOpenChannel retrieves, and deserializes (including decrypting
// sensitive) the complete channel currently active with the passed nodeID.
func fetchOpenChannel(chanBucket *bbolt.Bucket,
func fetchOpenChannel(chanBucket kvdb.ReadBucket,
chanPoint *wire.OutPoint) (*OpenChannel, error) {
channel := &OpenChannel{
@ -1260,20 +1278,20 @@ func (c *OpenChannel) SyncPending(addr net.Addr, pendingHeight uint32) error {
c.FundingBroadcastHeight = pendingHeight
return c.Db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return syncNewChannel(tx, c, []net.Addr{addr})
})
}
// syncNewChannel will write the passed channel to disk, and also create a
// LinkNode (if needed) for the channel peer.
func syncNewChannel(tx *bbolt.Tx, c *OpenChannel, addrs []net.Addr) error {
func syncNewChannel(tx kvdb.RwTx, c *OpenChannel, addrs []net.Addr) error {
// First, sync all the persistent channel state to disk.
if err := c.fullSync(tx); err != nil {
return err
}
nodeInfoBucket, err := tx.CreateBucketIfNotExists(nodeInfoBucket)
nodeInfoBucket, err := tx.CreateTopLevelBucket(nodeInfoBucket)
if err != nil {
return err
}
@ -1316,8 +1334,8 @@ func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment,
return ErrNoRestoredChannelMutation
}
err := c.Db.Update(func(tx *bbolt.Tx) error {
chanBucket, err := fetchChanBucket(
err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
if err != nil {
@ -1786,10 +1804,10 @@ func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error {
return ErrNoRestoredChannelMutation
}
return c.Db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
// First, we'll grab the writable bucket where this channel's
// data resides.
chanBucket, err := fetchChanBucket(
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
if err != nil {
@ -1854,7 +1872,7 @@ func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error {
// these pointers, causing the tip and the tail to point to the same entry.
func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, error) {
var cd *CommitDiff
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -1891,7 +1909,7 @@ func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, error) {
// updates that still need to be signed for.
func (c *OpenChannel) UnsignedAckedUpdates() ([]LogUpdate, error) {
var updates []LogUpdate
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -1932,8 +1950,8 @@ func (c *OpenChannel) InsertNextRevocation(revKey *btcec.PublicKey) error {
c.RemoteNextRevocation = revKey
err := c.Db.Update(func(tx *bbolt.Tx) error {
chanBucket, err := fetchChanBucket(
err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
if err != nil {
@ -1969,8 +1987,8 @@ func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg) error {
var newRemoteCommit *ChannelCommitment
err := c.Db.Update(func(tx *bbolt.Tx) error {
chanBucket, err := fetchChanBucket(
err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
if err != nil {
@ -2089,7 +2107,7 @@ func (c *OpenChannel) LoadFwdPkgs() ([]*FwdPkg, error) {
defer c.RUnlock()
var fwdPkgs []*FwdPkg
if err := c.Db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
var err error
fwdPkgs, err = c.Packager.LoadFwdPkgs(tx)
return err
@ -2107,7 +2125,7 @@ func (c *OpenChannel) AckAddHtlcs(addRefs ...AddRef) error {
c.Lock()
defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return c.Packager.AckAddHtlcs(tx, addRefs...)
})
}
@ -2120,7 +2138,7 @@ func (c *OpenChannel) AckSettleFails(settleFailRefs ...SettleFailRef) error {
c.Lock()
defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return c.Packager.AckSettleFails(tx, settleFailRefs...)
})
}
@ -2131,7 +2149,7 @@ func (c *OpenChannel) SetFwdFilter(height uint64, fwdFilter *PkgFilter) error {
c.Lock()
defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return c.Packager.SetFwdFilter(tx, height, fwdFilter)
})
}
@ -2144,7 +2162,7 @@ func (c *OpenChannel) RemoveFwdPkg(height uint64) error {
c.Lock()
defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return c.Packager.RemovePkg(tx, height)
})
}
@ -2165,7 +2183,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
}
var commit ChannelCommitment
if err := c.Db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -2173,7 +2191,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
return err
}
logBucket := chanBucket.Bucket(revocationLogBucket)
logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
if logBucket == nil {
return ErrNoPastDeltas
}
@ -2182,7 +2200,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
// this channel, we'll jump to the _last_ key in bucket. As we
// store the update number on disk in a big-endian format,
// this will retrieve the latest entry.
cursor := logBucket.Cursor()
cursor := logBucket.ReadCursor()
_, tailLogEntry := cursor.Last()
logEntryReader := bytes.NewReader(tailLogEntry)
@ -2212,7 +2230,7 @@ func (c *OpenChannel) CommitmentHeight() (uint64, error) {
defer c.RUnlock()
var height uint64
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
// Get the bucket dedicated to storing the metadata for open
// channels.
chanBucket, err := fetchChanBucket(
@ -2247,7 +2265,7 @@ func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, e
defer c.RUnlock()
var commit ChannelCommitment
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -2255,7 +2273,7 @@ func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, e
return err
}
logBucket := chanBucket.Bucket(revocationLogBucket)
logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
if logBucket == nil {
return ErrNoPastDeltas
}
@ -2405,19 +2423,19 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
c.Lock()
defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error {
openChanBucket := tx.Bucket(openChannelBucket)
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
openChanBucket := tx.ReadWriteBucket(openChannelBucket)
if openChanBucket == nil {
return ErrNoChanDBExists
}
nodePub := c.IdentityPub.SerializeCompressed()
nodeChanBucket := openChanBucket.Bucket(nodePub)
nodeChanBucket := openChanBucket.NestedReadWriteBucket(nodePub)
if nodeChanBucket == nil {
return ErrNoActiveChannels
}
chainBucket := nodeChanBucket.Bucket(c.ChainHash[:])
chainBucket := nodeChanBucket.NestedReadWriteBucket(c.ChainHash[:])
if chainBucket == nil {
return ErrNoActiveChannels
}
@ -2428,7 +2446,9 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
return err
}
chanKey := chanPointBuf.Bytes()
chanBucket := chainBucket.Bucket(chanKey)
chanBucket := chainBucket.NestedReadWriteBucket(
chanKey,
)
if chanBucket == nil {
return ErrNoActiveChannels
}
@ -2445,28 +2465,28 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
// Now that the index to this channel has been deleted, purge
// the remaining channel metadata from the database.
err = deleteOpenChannel(chanBucket, chanPointBuf.Bytes())
err = deleteOpenChannel(chanBucket)
if err != nil {
return err
}
// With the base channel data deleted, attempt to delete the
// information stored within the revocation log.
logBucket := chanBucket.Bucket(revocationLogBucket)
logBucket := chanBucket.NestedReadWriteBucket(revocationLogBucket)
if logBucket != nil {
err = chanBucket.DeleteBucket(revocationLogBucket)
err = chanBucket.DeleteNestedBucket(revocationLogBucket)
if err != nil {
return err
}
}
err = chainBucket.DeleteBucket(chanPointBuf.Bytes())
err = chainBucket.DeleteNestedBucket(chanPointBuf.Bytes())
if err != nil {
return err
}
// Add channel state to the historical channel bucket.
historicalBucket, err := tx.CreateBucketIfNotExists(
historicalBucket, err := tx.CreateTopLevelBucket(
historicalChannelBucket,
)
if err != nil {
@ -2570,7 +2590,7 @@ func (c *OpenChannel) Snapshot() *ChannelSnapshot {
// latest fully committed state is returned. The first commitment returned is
// the local commitment, and the second returned is the remote commitment.
func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitment, error) {
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -2592,7 +2612,7 @@ func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitmen
// acting on a possible contract breach to ensure, that the caller has the most
// up to date information required to deliver justice.
func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, error) {
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -2609,10 +2629,10 @@ func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, error) {
return c.RevocationStore, nil
}
func putChannelCloseSummary(tx *bbolt.Tx, chanID []byte,
func putChannelCloseSummary(tx kvdb.RwTx, chanID []byte,
summary *ChannelCloseSummary, lastChanState *OpenChannel) error {
closedChanBucket, err := tx.CreateBucketIfNotExists(closedChannelBucket)
closedChanBucket, err := tx.CreateTopLevelBucket(closedChannelBucket)
if err != nil {
return err
}
@ -2788,7 +2808,7 @@ func fundingTxPresent(channel *OpenChannel) bool {
!channel.hasChanStatus(ChanStatusRestored)
}
func putChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func putChanInfo(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
var w bytes.Buffer
if err := WriteElements(&w,
channel.ChanType, channel.ChainHash, channel.FundingOutpoint,
@ -2835,7 +2855,7 @@ func putChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
// putOptionalUpfrontShutdownScript adds a shutdown script under the key
// provided if it has a non-zero length.
func putOptionalUpfrontShutdownScript(chanBucket *bbolt.Bucket, key []byte,
func putOptionalUpfrontShutdownScript(chanBucket kvdb.RwBucket, key []byte,
script []byte) error {
// If the script is empty, we do not need to add anything.
if len(script) == 0 {
@ -2853,7 +2873,7 @@ func putOptionalUpfrontShutdownScript(chanBucket *bbolt.Bucket, key []byte,
// getOptionalUpfrontShutdownScript reads the shutdown script stored under the
// key provided if it is present. Upfront shutdown scripts are optional, so the
// function returns with no error if the key is not present.
func getOptionalUpfrontShutdownScript(chanBucket *bbolt.Bucket, key []byte,
func getOptionalUpfrontShutdownScript(chanBucket kvdb.ReadBucket, key []byte,
script *lnwire.DeliveryAddress) error {
// Return early if the bucket does not exit, a shutdown script was not set.
@ -2885,7 +2905,7 @@ func serializeChanCommit(w io.Writer, c *ChannelCommitment) error {
return SerializeHtlcs(w, c.Htlcs...)
}
func putChanCommitment(chanBucket *bbolt.Bucket, c *ChannelCommitment,
func putChanCommitment(chanBucket kvdb.RwBucket, c *ChannelCommitment,
local bool) error {
var commitKey []byte
@ -2903,7 +2923,7 @@ func putChanCommitment(chanBucket *bbolt.Bucket, c *ChannelCommitment,
return chanBucket.Put(commitKey, b.Bytes())
}
func putChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func putChanCommitments(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
// If this is a restored channel, then we don't have any commitments to
// write.
if channel.hasChanStatus(ChanStatusRestored) {
@ -2922,7 +2942,7 @@ func putChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
)
}
func putChanRevocationState(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func putChanRevocationState(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
var b bytes.Buffer
err := WriteElements(
@ -2957,7 +2977,7 @@ func readChanConfig(b io.Reader, c *ChannelConfig) error {
)
}
func fetchChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func fetchChanInfo(chanBucket kvdb.ReadBucket, channel *OpenChannel) error {
infoBytes := chanBucket.Get(chanInfoKey)
if infoBytes == nil {
return ErrNoChanInfoFound
@ -3024,7 +3044,7 @@ func deserializeChanCommit(r io.Reader) (ChannelCommitment, error) {
return c, nil
}
func fetchChanCommitment(chanBucket *bbolt.Bucket, local bool) (ChannelCommitment, error) {
func fetchChanCommitment(chanBucket kvdb.ReadBucket, local bool) (ChannelCommitment, error) {
var commitKey []byte
if local {
commitKey = append(chanCommitmentKey, byte(0x00))
@ -3041,7 +3061,7 @@ func fetchChanCommitment(chanBucket *bbolt.Bucket, local bool) (ChannelCommitmen
return deserializeChanCommit(r)
}
func fetchChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func fetchChanCommitments(chanBucket kvdb.ReadBucket, channel *OpenChannel) error {
var err error
// If this is a restored channel, then we don't have any commitments to
@ -3062,7 +3082,7 @@ func fetchChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error
return nil
}
func fetchChanRevocationState(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func fetchChanRevocationState(chanBucket kvdb.ReadBucket, channel *OpenChannel) error {
revBytes := chanBucket.Get(revocationStateKey)
if revBytes == nil {
return ErrNoRevocationsFound
@ -3088,7 +3108,7 @@ func fetchChanRevocationState(chanBucket *bbolt.Bucket, channel *OpenChannel) er
return ReadElements(r, &channel.RemoteNextRevocation)
}
func deleteOpenChannel(chanBucket *bbolt.Bucket, chanPointBytes []byte) error {
func deleteOpenChannel(chanBucket kvdb.RwBucket) error {
if err := chanBucket.Delete(chanInfoKey); err != nil {
return err
@ -3122,7 +3142,7 @@ func makeLogKey(updateNum uint64) [8]byte {
return key
}
func appendChannelLogEntry(log *bbolt.Bucket,
func appendChannelLogEntry(log kvdb.RwBucket,
commit *ChannelCommitment) error {
var b bytes.Buffer
@ -3134,7 +3154,7 @@ func appendChannelLogEntry(log *bbolt.Bucket,
return log.Put(logEntrykey[:], b.Bytes())
}
func fetchChannelLogEntry(log *bbolt.Bucket,
func fetchChannelLogEntry(log kvdb.ReadBucket,
updateNum uint64) (ChannelCommitment, error) {
logEntrykey := makeLogKey(updateNum)

@ -11,8 +11,8 @@ import (
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/channeldb/migration12"
"github.com/lightningnetwork/lnd/channeldb/migration13"
"github.com/lightningnetwork/lnd/channeldb/migration_01_to_11"
@ -28,7 +28,7 @@ const (
// migration is a function which takes a prior outdated version of the database
// instances and mutates the key/bucket structure to arrive at a more
// up-to-date version of the database.
type migration func(tx *bbolt.Tx) error
type migration func(tx kvdb.RwTx) error
type version struct {
number uint32
@ -141,7 +141,7 @@ var (
// information related to nodes, routing data, open/closed channels, fee
// schedules, and reputation data.
type DB struct {
*bbolt.DB
kvdb.Backend
dbPath string
graph *ChannelGraph
clock clock.Clock
@ -165,20 +165,15 @@ func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) {
// Specify bbolt freelist options to reduce heap pressure in case the
// freelist grows to be very large.
options := &bbolt.Options{
NoFreelistSync: opts.NoFreelistSync,
FreelistType: bbolt.FreelistMapType,
}
bdb, err := bbolt.Open(path, dbFilePermission, options)
bdb, err := kvdb.Open(kvdb.BoltBackendName, path, opts.NoFreelistSync)
if err != nil {
return nil, err
}
chanDB := &DB{
DB: bdb,
dbPath: dbPath,
clock: opts.clock,
Backend: bdb,
dbPath: dbPath,
clock: opts.clock,
}
chanDB.graph = newChannelGraph(
chanDB, opts.RejectCacheSize, opts.ChannelCacheSize,
@ -202,41 +197,41 @@ func (d *DB) Path() string {
// database. The deletion is done in a single transaction, therefore this
// operation is fully atomic.
func (d *DB) Wipe() error {
return d.Update(func(tx *bbolt.Tx) error {
err := tx.DeleteBucket(openChannelBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
return kvdb.Update(d, func(tx kvdb.RwTx) error {
err := tx.DeleteTopLevelBucket(openChannelBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(closedChannelBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(closedChannelBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(invoiceBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(invoiceBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(nodeInfoBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(nodeInfoBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(nodeBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(nodeBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(edgeBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(edgeBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(edgeIndexBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(edgeIndexBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(graphMetaBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(graphMetaBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
@ -256,36 +251,36 @@ func createChannelDB(dbPath string) error {
}
path := filepath.Join(dbPath, dbName)
bdb, err := bbolt.Open(path, dbFilePermission, nil)
bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true)
if err != nil {
return err
}
err = bdb.Update(func(tx *bbolt.Tx) error {
if _, err := tx.CreateBucket(openChannelBucket); err != nil {
err = kvdb.Update(bdb, func(tx kvdb.RwTx) error {
if _, err := tx.CreateTopLevelBucket(openChannelBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(closedChannelBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(closedChannelBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(forwardingLogBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(forwardingLogBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(fwdPackagesKey); err != nil {
if _, err := tx.CreateTopLevelBucket(fwdPackagesKey); err != nil {
return err
}
if _, err := tx.CreateBucket(invoiceBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(invoiceBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(nodeInfoBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(nodeInfoBucket); err != nil {
return err
}
nodes, err := tx.CreateBucket(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -298,7 +293,7 @@ func createChannelDB(dbPath string) error {
return err
}
edges, err := tx.CreateBucket(edgeBucket)
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return err
}
@ -315,7 +310,7 @@ func createChannelDB(dbPath string) error {
return err
}
graphMeta, err := tx.CreateBucket(graphMetaBucket)
graphMeta, err := tx.CreateTopLevelBucket(graphMetaBucket)
if err != nil {
return err
}
@ -324,7 +319,7 @@ func createChannelDB(dbPath string) error {
return err
}
if _, err := tx.CreateBucket(metaBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(metaBucket); err != nil {
return err
}
@ -357,7 +352,7 @@ func fileExists(path string) bool {
// zero-length slice is returned.
func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error) {
var channels []*OpenChannel
err := d.View(func(tx *bbolt.Tx) error {
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
var err error
channels, err = d.fetchOpenChannels(tx, nodeID)
return err
@ -370,11 +365,11 @@ func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error)
// stored currently active/open channels associated with the target nodeID. In
// the case that no active channels are known to have been created with this
// node, then a zero-length slice is returned.
func (d *DB) fetchOpenChannels(tx *bbolt.Tx,
func (d *DB) fetchOpenChannels(tx kvdb.ReadTx,
nodeID *btcec.PublicKey) ([]*OpenChannel, error) {
// Get the bucket dedicated to storing the metadata for open channels.
openChanBucket := tx.Bucket(openChannelBucket)
openChanBucket := tx.ReadBucket(openChannelBucket)
if openChanBucket == nil {
return nil, nil
}
@ -382,7 +377,7 @@ func (d *DB) fetchOpenChannels(tx *bbolt.Tx,
// Within this top level bucket, fetch the bucket dedicated to storing
// open channel data specific to the remote node.
pub := nodeID.SerializeCompressed()
nodeChanBucket := openChanBucket.Bucket(pub)
nodeChanBucket := openChanBucket.NestedReadBucket(pub)
if nodeChanBucket == nil {
return nil, nil
}
@ -398,7 +393,7 @@ func (d *DB) fetchOpenChannels(tx *bbolt.Tx,
// If we've found a valid chainhash bucket, then we'll retrieve
// that so we can extract all the channels.
chainBucket := nodeChanBucket.Bucket(chainHash)
chainBucket := nodeChanBucket.NestedReadBucket(chainHash)
if chainBucket == nil {
return fmt.Errorf("unable to read bucket for chain=%x",
chainHash[:])
@ -423,7 +418,7 @@ func (d *DB) fetchOpenChannels(tx *bbolt.Tx,
// fetchNodeChannels retrieves all active channels from the target chainBucket
// which is under a node's dedicated channel bucket. This function is typically
// used to fetch all the active channels related to a particular node.
func (d *DB) fetchNodeChannels(chainBucket *bbolt.Bucket) ([]*OpenChannel, error) {
func (d *DB) fetchNodeChannels(chainBucket kvdb.ReadBucket) ([]*OpenChannel, error) {
var channels []*OpenChannel
@ -437,7 +432,7 @@ func (d *DB) fetchNodeChannels(chainBucket *bbolt.Bucket) ([]*OpenChannel, error
// Once we've found a valid channel bucket, we'll extract it
// from the node's chain bucket.
chanBucket := chainBucket.Bucket(chanPoint)
chanBucket := chainBucket.NestedReadBucket(chanPoint)
var outPoint wire.OutPoint
err := readOutpoint(bytes.NewReader(chanPoint), &outPoint)
@ -482,10 +477,10 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
// structure and skipping fully decoding each channel, we save a good
// bit of CPU as we don't need to do things like decompress public
// keys.
chanScan := func(tx *bbolt.Tx) error {
chanScan := func(tx kvdb.ReadTx) error {
// Get the bucket dedicated to storing the metadata for open
// channels.
openChanBucket := tx.Bucket(openChannelBucket)
openChanBucket := tx.ReadBucket(openChannelBucket)
if openChanBucket == nil {
return ErrNoActiveChannels
}
@ -500,7 +495,7 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
return nil
}
nodeChanBucket := openChanBucket.Bucket(nodePub)
nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
if nodeChanBucket == nil {
return nil
}
@ -514,7 +509,9 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
return nil
}
chainBucket := nodeChanBucket.Bucket(chainHash)
chainBucket := nodeChanBucket.NestedReadBucket(
chainHash,
)
if chainBucket == nil {
return fmt.Errorf("unable to read "+
"bucket for chain=%x", chainHash[:])
@ -522,7 +519,7 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
// Finally we reach the leaf bucket that stores
// all the chanPoints for this node.
chanBucket := chainBucket.Bucket(
chanBucket := chainBucket.NestedReadBucket(
targetChanPoint.Bytes(),
)
if chanBucket == nil {
@ -544,7 +541,7 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
})
}
err := d.View(chanScan)
err := kvdb.View(d, chanScan)
if err != nil {
return nil, err
}
@ -636,10 +633,10 @@ func waitingCloseFilter(waitingClose bool) fetchChannelsFilter {
func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error) {
var channels []*OpenChannel
err := d.View(func(tx *bbolt.Tx) error {
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
// Get the bucket dedicated to storing the metadata for open
// channels.
openChanBucket := tx.Bucket(openChannelBucket)
openChanBucket := tx.ReadBucket(openChannelBucket)
if openChanBucket == nil {
return ErrNoActiveChannels
}
@ -647,7 +644,7 @@ func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error
// Next, fetch the bucket dedicated to storing metadata related
// to all nodes. All keys within this bucket are the serialized
// public keys of all our direct counterparties.
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
if nodeMetaBucket == nil {
return fmt.Errorf("node bucket not created")
}
@ -655,7 +652,7 @@ func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error
// Finally for each node public key in the bucket, fetch all
// the channels related to this particular node.
return nodeMetaBucket.ForEach(func(k, v []byte) error {
nodeChanBucket := openChanBucket.Bucket(k)
nodeChanBucket := openChanBucket.NestedReadBucket(k)
if nodeChanBucket == nil {
return nil
}
@ -670,7 +667,9 @@ func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error
// If we've found a valid chainhash bucket,
// then we'll retrieve that so we can extract
// all the channels.
chainBucket := nodeChanBucket.Bucket(chainHash)
chainBucket := nodeChanBucket.NestedReadBucket(
chainHash,
)
if chainBucket == nil {
return fmt.Errorf("unable to read "+
"bucket for chain=%x", chainHash[:])
@ -726,8 +725,8 @@ func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error
func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) {
var chanSummaries []*ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error {
closeBucket := tx.Bucket(closedChannelBucket)
if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
closeBucket := tx.ReadBucket(closedChannelBucket)
if closeBucket == nil {
return ErrNoClosedChannels
}
@ -764,8 +763,8 @@ var ErrClosedChannelNotFound = errors.New("unable to find closed channel summary
// point of the channel in question.
func (d *DB) FetchClosedChannel(chanID *wire.OutPoint) (*ChannelCloseSummary, error) {
var chanSummary *ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error {
closeBucket := tx.Bucket(closedChannelBucket)
if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
closeBucket := tx.ReadBucket(closedChannelBucket)
if closeBucket == nil {
return ErrClosedChannelNotFound
}
@ -798,15 +797,15 @@ func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) (
*ChannelCloseSummary, error) {
var chanSummary *ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error {
closeBucket := tx.Bucket(closedChannelBucket)
if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
closeBucket := tx.ReadBucket(closedChannelBucket)
if closeBucket == nil {
return ErrClosedChannelNotFound
}
// The first 30 bytes of the channel ID and outpoint will be
// equal.
cursor := closeBucket.Cursor()
cursor := closeBucket.ReadCursor()
op, c := cursor.Seek(cid[:30])
// We scan over all possible candidates for this channel ID.
@ -846,7 +845,7 @@ func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) (
// the pending funds in a channel that has been forcibly closed have been
// swept.
func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
return d.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(d, func(tx kvdb.RwTx) error {
var b bytes.Buffer
if err := writeOutpoint(&b, chanPoint); err != nil {
return err
@ -854,7 +853,7 @@ func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
chanID := b.Bytes()
closedChanBucket, err := tx.CreateBucketIfNotExists(
closedChanBucket, err := tx.CreateTopLevelBucket(
closedChannelBucket,
)
if err != nil {
@ -899,7 +898,7 @@ func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
// pruneLinkNode determines whether we should garbage collect a link node from
// the database due to no longer having any open channels with it. If there are
// any left, then this acts as a no-op.
func (d *DB) pruneLinkNode(tx *bbolt.Tx, remotePub *btcec.PublicKey) error {
func (d *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) error {
openChannels, err := d.fetchOpenChannels(tx, remotePub)
if err != nil {
return fmt.Errorf("unable to fetch open channels for peer %x: "+
@ -919,7 +918,7 @@ func (d *DB) pruneLinkNode(tx *bbolt.Tx, remotePub *btcec.PublicKey) error {
// PruneLinkNodes attempts to prune all link nodes found within the databse with
// whom we no longer have any open channels with.
func (d *DB) PruneLinkNodes() error {
return d.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(d, func(tx kvdb.RwTx) error {
linkNodes, err := d.fetchAllLinkNodes(tx)
if err != nil {
return err
@ -963,7 +962,7 @@ func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error {
defer chanGraph.cacheMu.Unlock()
var chansRestored []uint64
err := d.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
for _, channelShell := range channelShells {
channel := channelShell.Chan
@ -1000,7 +999,7 @@ func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error {
Capacity: channel.Capacity,
}
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -1074,7 +1073,7 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) {
graphNode LightningNode
)
dbErr := d.View(func(tx *bbolt.Tx) error {
dbErr := kvdb.View(d, func(tx kvdb.ReadTx) error {
var err error
linkNode, err = fetchLinkNode(tx, nodePub)
@ -1085,7 +1084,7 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) {
// We'll also query the graph for this peer to see if they have
// any addresses that we don't currently have stored within the
// link node database.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -1212,7 +1211,7 @@ func (d *DB) syncVersions(versions []version) error {
migrations, migrationVersions := getMigrationsToApply(
versions, meta.DbVersionNumber,
)
return d.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(d, func(tx kvdb.RwTx) error {
for i, migration := range migrations {
if migration == nil {
continue
@ -1260,12 +1259,12 @@ func getMigrationsToApply(versions []version, version uint32) ([]migration, []ui
// fetchHistoricalChanBucket returns a the channel bucket for a given outpoint
// from the historical channel bucket. If the bucket does not exist,
// ErrNoHistoricalBucket is returned.
func fetchHistoricalChanBucket(tx *bbolt.Tx,
outPoint *wire.OutPoint) (*bbolt.Bucket, error) {
func fetchHistoricalChanBucket(tx kvdb.ReadTx,
outPoint *wire.OutPoint) (kvdb.ReadBucket, error) {
// First fetch the top level bucket which stores all data related to
// historically stored channels.
historicalChanBucket := tx.Bucket(historicalChannelBucket)
historicalChanBucket := tx.ReadBucket(historicalChannelBucket)
if historicalChanBucket == nil {
return nil, ErrNoHistoricalBucket
}
@ -1276,7 +1275,7 @@ func fetchHistoricalChanBucket(tx *bbolt.Tx,
if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
return nil, err
}
chanBucket := historicalChanBucket.Bucket(chanPointBuf.Bytes())
chanBucket := historicalChanBucket.NestedReadBucket(chanPointBuf.Bytes())
if chanBucket == nil {
return nil, ErrChannelNotFound
}
@ -1288,7 +1287,7 @@ func fetchHistoricalChanBucket(tx *bbolt.Tx,
// bucket.
func (d *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, error) {
var channel *OpenChannel
err := d.View(func(tx *bbolt.Tx) error {
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchHistoricalChanBucket(tx, outPoint)
if err != nil {
return err

@ -8,7 +8,7 @@ import (
"time"
"github.com/btcsuite/btcd/btcec"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
@ -61,7 +61,7 @@ type duplicateHTLCAttemptInfo struct {
// fetchDuplicatePaymentStatus fetches the payment status of the payment. If the
// payment isn't found, it will default to "StatusUnknown".
func fetchDuplicatePaymentStatus(bucket *bbolt.Bucket) PaymentStatus {
func fetchDuplicatePaymentStatus(bucket kvdb.ReadBucket) PaymentStatus {
if bucket.Get(duplicatePaymentSettleInfoKey) != nil {
return StatusSucceeded
}
@ -129,7 +129,7 @@ func deserializeDuplicatePaymentCreationInfo(r io.Reader) (
return c, nil
}
func fetchDuplicatePayment(bucket *bbolt.Bucket) (*MPPayment, error) {
func fetchDuplicatePayment(bucket kvdb.ReadBucket) (*MPPayment, error) {
seqBytes := bucket.Get(duplicatePaymentSequenceKey)
if seqBytes == nil {
return nil, fmt.Errorf("sequence number not found")
@ -209,7 +209,7 @@ func fetchDuplicatePayment(bucket *bbolt.Bucket) (*MPPayment, error) {
return payment, nil
}
func fetchDuplicatePayments(paymentHashBucket *bbolt.Bucket) ([]*MPPayment,
func fetchDuplicatePayments(paymentHashBucket kvdb.ReadBucket) ([]*MPPayment,
error) {
var payments []*MPPayment
@ -217,13 +217,13 @@ func fetchDuplicatePayments(paymentHashBucket *bbolt.Bucket) ([]*MPPayment,
// For older versions of lnd, duplicate payments to a payment has was
// possible. These will be found in a sub-bucket indexed by their
// sequence number if available.
dup := paymentHashBucket.Bucket(duplicatePaymentsBucket)
dup := paymentHashBucket.NestedReadBucket(duplicatePaymentsBucket)
if dup == nil {
return nil, nil
}
err := dup.ForEach(func(k, v []byte) error {
subBucket := dup.Bucket(k)
subBucket := dup.NestedReadBucket(k)
if subBucket == nil {
// We one bucket for each duplicate to be found.
return fmt.Errorf("non bucket element" +

@ -6,7 +6,7 @@ import (
"sort"
"time"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -111,10 +111,10 @@ func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) error {
var timestamp [8]byte
return f.db.Batch(func(tx *bbolt.Tx) error {
return kvdb.Batch(f.db.Backend, func(tx kvdb.RwTx) error {
// First, we'll fetch the bucket that stores our time series
// log.
logBucket, err := tx.CreateBucketIfNotExists(
logBucket, err := tx.CreateTopLevelBucket(
forwardingLogBucket,
)
if err != nil {
@ -204,10 +204,10 @@ func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, e
recordsToSkip := q.IndexOffset
recordOffset := q.IndexOffset
err := f.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(f.db, func(tx kvdb.ReadTx) error {
// If the bucket wasn't found, then there aren't any events to
// be returned.
logBucket := tx.Bucket(forwardingLogBucket)
logBucket := tx.ReadBucket(forwardingLogBucket)
if logBucket == nil {
return ErrNoForwardingEvents
}
@ -223,7 +223,7 @@ func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, e
// our seek through the log in order to satisfy the query.
// We'll continue until either we reach the end of the range,
// or reach our max number of events.
logCursor := logBucket.Cursor()
logCursor := logBucket.ReadCursor()
timestamp, events := logCursor.Seek(startTime[:])
for ; timestamp != nil && bytes.Compare(timestamp, endTime[:]) <= 0; timestamp, events = logCursor.Next() {
// If our current return payload exceeds the max number

@ -7,7 +7,7 @@ import (
"fmt"
"io"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -318,7 +318,7 @@ type SettleFailRef struct {
type SettleFailAcker interface {
// AckSettleFails atomically updates the settle-fail filters in *other*
// channels' forwarding packages.
AckSettleFails(tx *bbolt.Tx, settleFailRefs ...SettleFailRef) error
AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) error
}
// GlobalFwdPkgReader is an interface used to retrieve the forwarding packages
@ -326,7 +326,7 @@ type SettleFailAcker interface {
type GlobalFwdPkgReader interface {
// LoadChannelFwdPkgs loads all known forwarding packages for the given
// channel.
LoadChannelFwdPkgs(tx *bbolt.Tx,
LoadChannelFwdPkgs(tx kvdb.RwTx,
source lnwire.ShortChannelID) ([]*FwdPkg, error)
}
@ -357,14 +357,14 @@ func NewSwitchPackager() *SwitchPackager {
// AckSettleFails atomically updates the settle-fail filters in *other*
// channels' forwarding packages, to mark that the switch has received a settle
// or fail residing in the forwarding package of a link.
func (*SwitchPackager) AckSettleFails(tx *bbolt.Tx,
func (*SwitchPackager) AckSettleFails(tx kvdb.RwTx,
settleFailRefs ...SettleFailRef) error {
return ackSettleFails(tx, settleFailRefs)
}
// LoadChannelFwdPkgs loads all forwarding packages for a particular channel.
func (*SwitchPackager) LoadChannelFwdPkgs(tx *bbolt.Tx,
func (*SwitchPackager) LoadChannelFwdPkgs(tx kvdb.RwTx,
source lnwire.ShortChannelID) ([]*FwdPkg, error) {
return loadChannelFwdPkgs(tx, source)
@ -376,19 +376,19 @@ func (*SwitchPackager) LoadChannelFwdPkgs(tx *bbolt.Tx,
type FwdPackager interface {
// AddFwdPkg serializes and writes a FwdPkg for this channel at the
// remote commitment height included in the forwarding package.
AddFwdPkg(tx *bbolt.Tx, fwdPkg *FwdPkg) error
AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) error
// SetFwdFilter looks up the forwarding package at the remote `height`
// and sets the `fwdFilter`, marking the Adds for which:
// 1) We are not the exit node
// 2) Passed all validation
// 3) Should be forwarded to the switch immediately after a failure
SetFwdFilter(tx *bbolt.Tx, height uint64, fwdFilter *PkgFilter) error
SetFwdFilter(tx kvdb.RwTx, height uint64, fwdFilter *PkgFilter) error
// AckAddHtlcs atomically updates the add filters in this channel's
// forwarding packages to mark the resolution of an Add that was
// received from the remote party.
AckAddHtlcs(tx *bbolt.Tx, addRefs ...AddRef) error
AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) error
// SettleFailAcker allows a link to acknowledge settle/fail HTLCs
// belonging to other channels.
@ -396,11 +396,11 @@ type FwdPackager interface {
// LoadFwdPkgs loads all known forwarding packages owned by this
// channel.
LoadFwdPkgs(tx *bbolt.Tx) ([]*FwdPkg, error)
LoadFwdPkgs(tx kvdb.ReadTx) ([]*FwdPkg, error)
// RemovePkg deletes a forwarding package owned by this channel at
// the provided remote `height`.
RemovePkg(tx *bbolt.Tx, height uint64) error
RemovePkg(tx kvdb.RwTx, height uint64) error
}
// ChannelPackager is used by a channel to manage the lifecycle of its forwarding
@ -420,8 +420,8 @@ func NewChannelPackager(source lnwire.ShortChannelID) *ChannelPackager {
}
// AddFwdPkg writes a newly locked in forwarding package to disk.
func (*ChannelPackager) AddFwdPkg(tx *bbolt.Tx, fwdPkg *FwdPkg) error {
fwdPkgBkt, err := tx.CreateBucketIfNotExists(fwdPackagesKey)
func (*ChannelPackager) AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) error {
fwdPkgBkt, err := tx.CreateTopLevelBucket(fwdPackagesKey)
if err != nil {
return err
}
@ -485,7 +485,7 @@ func (*ChannelPackager) AddFwdPkg(tx *bbolt.Tx, fwdPkg *FwdPkg) error {
}
// putLogUpdate writes an htlc to the provided `bkt`, using `index` as the key.
func putLogUpdate(bkt *bbolt.Bucket, idx uint16, htlc *LogUpdate) error {
func putLogUpdate(bkt kvdb.RwBucket, idx uint16, htlc *LogUpdate) error {
var b bytes.Buffer
if err := htlc.Encode(&b); err != nil {
return err
@ -497,19 +497,19 @@ func putLogUpdate(bkt *bbolt.Bucket, idx uint16, htlc *LogUpdate) error {
// LoadFwdPkgs scans the forwarding log for any packages that haven't been
// processed, and returns their deserialized log updates in a map indexed by the
// remote commitment height at which the updates were locked in.
func (p *ChannelPackager) LoadFwdPkgs(tx *bbolt.Tx) ([]*FwdPkg, error) {
func (p *ChannelPackager) LoadFwdPkgs(tx kvdb.ReadTx) ([]*FwdPkg, error) {
return loadChannelFwdPkgs(tx, p.source)
}
// loadChannelFwdPkgs loads all forwarding packages owned by `source`.
func loadChannelFwdPkgs(tx *bbolt.Tx, source lnwire.ShortChannelID) ([]*FwdPkg, error) {
fwdPkgBkt := tx.Bucket(fwdPackagesKey)
func loadChannelFwdPkgs(tx kvdb.ReadTx, source lnwire.ShortChannelID) ([]*FwdPkg, error) {
fwdPkgBkt := tx.ReadBucket(fwdPackagesKey)
if fwdPkgBkt == nil {
return nil, nil
}
sourceKey := makeLogKey(source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(sourceKey[:])
sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:])
if sourceBkt == nil {
return nil, nil
}
@ -543,23 +543,23 @@ func loadChannelFwdPkgs(tx *bbolt.Tx, source lnwire.ShortChannelID) ([]*FwdPkg,
// loadFwPkg reads the packager's fwd pkg at a given height, and determines the
// appropriate FwdState.
func loadFwdPkg(fwdPkgBkt *bbolt.Bucket, source lnwire.ShortChannelID,
func loadFwdPkg(fwdPkgBkt kvdb.ReadBucket, source lnwire.ShortChannelID,
height uint64) (*FwdPkg, error) {
sourceKey := makeLogKey(source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(sourceKey[:])
sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:])
if sourceBkt == nil {
return nil, ErrCorruptedFwdPkg
}
heightKey := makeLogKey(height)
heightBkt := sourceBkt.Bucket(heightKey[:])
heightBkt := sourceBkt.NestedReadBucket(heightKey[:])
if heightBkt == nil {
return nil, ErrCorruptedFwdPkg
}
// Load ADDs from disk.
addBkt := heightBkt.Bucket(addBucketKey)
addBkt := heightBkt.NestedReadBucket(addBucketKey)
if addBkt == nil {
return nil, ErrCorruptedFwdPkg
}
@ -582,7 +582,7 @@ func loadFwdPkg(fwdPkgBkt *bbolt.Bucket, source lnwire.ShortChannelID,
}
// Load SETTLE/FAILs from disk.
failSettleBkt := heightBkt.Bucket(failSettleBucketKey)
failSettleBkt := heightBkt.NestedReadBucket(failSettleBucketKey)
if failSettleBkt == nil {
return nil, ErrCorruptedFwdPkg
}
@ -649,7 +649,7 @@ func loadFwdPkg(fwdPkgBkt *bbolt.Bucket, source lnwire.ShortChannelID,
// loadHtlcs retrieves all serialized htlcs in a bucket, returning
// them in order of the indexes they were written under.
func loadHtlcs(bkt *bbolt.Bucket) ([]LogUpdate, error) {
func loadHtlcs(bkt kvdb.ReadBucket) ([]LogUpdate, error) {
var htlcs []LogUpdate
if err := bkt.ForEach(func(_, v []byte) error {
var htlc LogUpdate
@ -674,22 +674,22 @@ func loadHtlcs(bkt *bbolt.Bucket) ([]LogUpdate, error) {
// leaving this channel. After a restart, we skip validation of these Adds,
// since they are assumed to have already been validated, and make the switch or
// outgoing link responsible for handling replays.
func (p *ChannelPackager) SetFwdFilter(tx *bbolt.Tx, height uint64,
func (p *ChannelPackager) SetFwdFilter(tx kvdb.RwTx, height uint64,
fwdFilter *PkgFilter) error {
fwdPkgBkt := tx.Bucket(fwdPackagesKey)
fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
if fwdPkgBkt == nil {
return ErrCorruptedFwdPkg
}
source := makeLogKey(p.source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(source[:])
sourceBkt := fwdPkgBkt.NestedReadWriteBucket(source[:])
if sourceBkt == nil {
return ErrCorruptedFwdPkg
}
heightKey := makeLogKey(height)
heightBkt := sourceBkt.Bucket(heightKey[:])
heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:])
if heightBkt == nil {
return ErrCorruptedFwdPkg
}
@ -713,18 +713,18 @@ func (p *ChannelPackager) SetFwdFilter(tx *bbolt.Tx, height uint64,
// AckAddHtlcs accepts a list of references to add htlcs, and updates the
// AckAddFilter of those forwarding packages to indicate that a settle or fail
// has been received in response to the add.
func (p *ChannelPackager) AckAddHtlcs(tx *bbolt.Tx, addRefs ...AddRef) error {
func (p *ChannelPackager) AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) error {
if len(addRefs) == 0 {
return nil
}
fwdPkgBkt := tx.Bucket(fwdPackagesKey)
fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
if fwdPkgBkt == nil {
return ErrCorruptedFwdPkg
}
sourceKey := makeLogKey(p.source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(sourceKey[:])
sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceKey[:])
if sourceBkt == nil {
return ErrCorruptedFwdPkg
}
@ -753,11 +753,11 @@ func (p *ChannelPackager) AckAddHtlcs(tx *bbolt.Tx, addRefs ...AddRef) error {
// ackAddHtlcsAtHeight updates the AddAckFilter of a single forwarding package
// with a list of indexes, writing the resulting filter back in its place.
func ackAddHtlcsAtHeight(sourceBkt *bbolt.Bucket, height uint64,
func ackAddHtlcsAtHeight(sourceBkt kvdb.RwBucket, height uint64,
indexes []uint16) error {
heightKey := makeLogKey(height)
heightBkt := sourceBkt.Bucket(heightKey[:])
heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:])
if heightBkt == nil {
// If the height bucket isn't found, this could be because the
// forwarding package was already removed. We'll return nil to
@ -796,17 +796,17 @@ func ackAddHtlcsAtHeight(sourceBkt *bbolt.Bucket, height uint64,
// package. This should only be called after the source of the Add has locked in
// the settle/fail, or it becomes otherwise safe to forgo retransmitting the
// settle/fail after a restart.
func (p *ChannelPackager) AckSettleFails(tx *bbolt.Tx, settleFailRefs ...SettleFailRef) error {
func (p *ChannelPackager) AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) error {
return ackSettleFails(tx, settleFailRefs)
}
// ackSettleFails persistently acknowledges a batch of settle fail references.
func ackSettleFails(tx *bbolt.Tx, settleFailRefs []SettleFailRef) error {
func ackSettleFails(tx kvdb.RwTx, settleFailRefs []SettleFailRef) error {
if len(settleFailRefs) == 0 {
return nil
}
fwdPkgBkt := tx.Bucket(fwdPackagesKey)
fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
if fwdPkgBkt == nil {
return ErrCorruptedFwdPkg
}
@ -832,7 +832,7 @@ func ackSettleFails(tx *bbolt.Tx, settleFailRefs []SettleFailRef) error {
// settle/fail htlcs.
for dest, destHeights := range destHeightDiffs {
destKey := makeLogKey(dest.ToUint64())
destBkt := fwdPkgBkt.Bucket(destKey[:])
destBkt := fwdPkgBkt.NestedReadWriteBucket(destKey[:])
if destBkt == nil {
// If the destination bucket is not found, this is
// likely the result of the destination channel being
@ -855,11 +855,11 @@ func ackSettleFails(tx *bbolt.Tx, settleFailRefs []SettleFailRef) error {
// ackSettleFailsAtHeight given a destination bucket, acks the provided indexes
// at particular a height by updating the settle fail filter.
func ackSettleFailsAtHeight(destBkt *bbolt.Bucket, height uint64,
func ackSettleFailsAtHeight(destBkt kvdb.RwBucket, height uint64,
indexes []uint16) error {
heightKey := makeLogKey(height)
heightBkt := destBkt.Bucket(heightKey[:])
heightBkt := destBkt.NestedReadWriteBucket(heightKey[:])
if heightBkt == nil {
// If the height bucket isn't found, this could be because the
// forwarding package was already removed. We'll return nil to
@ -895,21 +895,21 @@ func ackSettleFailsAtHeight(destBkt *bbolt.Bucket, height uint64,
// RemovePkg deletes the forwarding package at the given height from the
// packager's source bucket.
func (p *ChannelPackager) RemovePkg(tx *bbolt.Tx, height uint64) error {
fwdPkgBkt := tx.Bucket(fwdPackagesKey)
func (p *ChannelPackager) RemovePkg(tx kvdb.RwTx, height uint64) error {
fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
if fwdPkgBkt == nil {
return nil
}
sourceBytes := makeLogKey(p.source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(sourceBytes[:])
sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceBytes[:])
if sourceBkt == nil {
return ErrCorruptedFwdPkg
}
heightKey := makeLogKey(height)
return sourceBkt.DeleteBucket(heightKey[:])
return sourceBkt.DeleteNestedBucket(heightKey[:])
}
// uint16Key writes the provided 16-bit unsigned integer to a 2-byte slice.

@ -8,8 +8,8 @@ import (
"testing"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -207,7 +207,7 @@ func TestPackagerEmptyFwdPkg(t *testing.T) {
// Next, create and write a new forwarding package with no htlcs.
fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, nil, nil)
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err)
@ -226,7 +226,7 @@ func TestPackagerEmptyFwdPkg(t *testing.T) {
// Now, write the forwarding decision. In this case, its just an empty
// fwd filter.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err)
@ -244,7 +244,7 @@ func TestPackagerEmptyFwdPkg(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err)
@ -279,7 +279,7 @@ func TestPackagerOnlyAdds(t *testing.T) {
nAdds := len(adds)
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err)
@ -300,7 +300,7 @@ func TestPackagerOnlyAdds(t *testing.T) {
// added any adds to the fwdfilter, this would indicate that all of the
// adds were 1) settled locally by this link (exit hop), or 2) the htlc
// was failed locally.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err)
@ -324,7 +324,7 @@ func TestPackagerOnlyAdds(t *testing.T) {
Index: uint16(i),
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckAddHtlcs(tx, addRef)
}); err != nil {
t.Fatalf("unable to ack add htlc: %v", err)
@ -343,7 +343,7 @@ func TestPackagerOnlyAdds(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err)
@ -381,7 +381,7 @@ func TestPackagerOnlySettleFails(t *testing.T) {
nSettleFails := len(settleFails)
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err)
@ -402,7 +402,7 @@ func TestPackagerOnlySettleFails(t *testing.T) {
// added any adds to the fwdfilter, this would indicate that all of the
// adds were 1) settled locally by this link (exit hop), or 2) the htlc
// was failed locally.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err)
@ -428,7 +428,7 @@ func TestPackagerOnlySettleFails(t *testing.T) {
Index: uint16(i),
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckSettleFails(tx, failSettleRef)
}); err != nil {
t.Fatalf("unable to ack add htlc: %v", err)
@ -448,7 +448,7 @@ func TestPackagerOnlySettleFails(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err)
@ -486,7 +486,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
nAdds := len(adds)
nSettleFails := len(settleFails)
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err)
@ -507,7 +507,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
// added any adds to the fwdfilter, this would indicate that all of the
// adds were 1) settled locally by this link (exit hop), or 2) the htlc
// was failed locally.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err)
@ -532,7 +532,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
Index: uint16(i),
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckAddHtlcs(tx, addRef)
}); err != nil {
t.Fatalf("unable to ack add htlc: %v", err)
@ -559,7 +559,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
Index: uint16(i),
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckSettleFails(tx, failSettleRef)
}); err != nil {
t.Fatalf("unable to remove settle/fail htlc: %v", err)
@ -579,7 +579,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err)
@ -619,7 +619,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
nAdds := len(adds)
nSettleFails := len(settleFails)
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err)
@ -640,7 +640,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
// added any adds to the fwdfilter, this would indicate that all of the
// adds were 1) settled locally by this link (exit hop), or 2) the htlc
// was failed locally.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err)
@ -669,7 +669,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
Index: uint16(i),
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckSettleFails(tx, failSettleRef)
}); err != nil {
t.Fatalf("unable to remove settle/fail htlc: %v", err)
@ -696,7 +696,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
Index: uint16(i),
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckAddHtlcs(tx, addRef)
}); err != nil {
t.Fatalf("unable to ack add htlc: %v", err)
@ -716,7 +716,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err)
@ -778,11 +778,11 @@ func assertSettleFailFilterIsFull(t *testing.T, fwdPkg *channeldb.FwdPkg, expect
// loadFwdPkgs is a helper method that reads all forwarding packages for a
// particular packager.
func loadFwdPkgs(t *testing.T, db *bbolt.DB,
func loadFwdPkgs(t *testing.T, db kvdb.Backend,
packager channeldb.FwdPackager) []*channeldb.FwdPkg {
var fwdPkgs []*channeldb.FwdPkg
if err := db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(db, func(tx kvdb.ReadTx) error {
var err error
fwdPkgs, err = packager.LoadFwdPkgs(tx)
return err
@ -795,7 +795,7 @@ func loadFwdPkgs(t *testing.T, db *bbolt.DB,
// makeFwdPkgDB initializes a test database for forwarding packages. If the
// provided path is an empty, it will create a temp dir/file to use.
func makeFwdPkgDB(t *testing.T, path string) *bbolt.DB {
func makeFwdPkgDB(t *testing.T, path string) kvdb.Backend { // nolint:unparam
if path == "" {
var err error
path, err = ioutil.TempDir("", "fwdpkgdb")
@ -806,10 +806,10 @@ func makeFwdPkgDB(t *testing.T, path string) *bbolt.DB {
path = filepath.Join(path, "fwdpkg.db")
}
db, err := bbolt.Open(path, 0600, nil)
bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true)
if err != nil {
t.Fatalf("unable to open boltdb: %v", err)
}
return db
return bdb
}

@ -18,7 +18,7 @@ import (
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
)
@ -206,10 +206,10 @@ func (c *ChannelGraph) Database() *DB {
func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error {
// TODO(roasbeef): ptr map to reduce # of allocs? no duplicates
return c.db.View(func(tx *bbolt.Tx) error {
return kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First, grab the node bucket. This will be used to populate
// the Node pointers in each edge read from disk.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -217,11 +217,11 @@ func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePoli
// Next, grab the edge bucket which stores the edges, and also
// the index itself so we can group the directed edges together
// logically.
edges := tx.Bucket(edgeBucket)
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -265,8 +265,8 @@ func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePoli
// should be passed as the first argument. Otherwise the first argument should
// be nil and a fresh transaction will be created to execute the graph
// traversal.
func (c *ChannelGraph) ForEachNodeChannel(tx *bbolt.Tx, nodePub []byte,
cb func(*bbolt.Tx, *ChannelEdgeInfo, *ChannelEdgePolicy,
func (c *ChannelGraph) ForEachNodeChannel(tx kvdb.ReadTx, nodePub []byte,
cb func(kvdb.ReadTx, *ChannelEdgeInfo, *ChannelEdgePolicy,
*ChannelEdgePolicy) error) error {
db := c.db
@ -281,13 +281,15 @@ func (c *ChannelGraph) DisabledChannelIDs() ([]uint64, error) {
var disabledChanIDs []uint64
chanEdgeFound := make(map[uint64]struct{})
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
disabledEdgePolicyIndex := edges.Bucket(disabledEdgePolicyBucket)
disabledEdgePolicyIndex := edges.NestedReadBucket(
disabledEdgePolicyBucket,
)
if disabledEdgePolicyIndex == nil {
return nil
}
@ -326,11 +328,11 @@ func (c *ChannelGraph) DisabledChannelIDs() ([]uint64, error) {
//
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
// traversal when graph gets mega
func (c *ChannelGraph) ForEachNode(tx *bbolt.Tx, cb func(*bbolt.Tx, *LightningNode) error) error {
traversal := func(tx *bbolt.Tx) error {
func (c *ChannelGraph) ForEachNode(tx kvdb.RwTx, cb func(kvdb.ReadTx, *LightningNode) error) error { // nolint:interfacer
traversal := func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -359,7 +361,7 @@ func (c *ChannelGraph) ForEachNode(tx *bbolt.Tx, cb func(*bbolt.Tx, *LightningNo
// If no transaction was provided, then we'll create a new transaction
// to execute the transaction within.
if tx == nil {
return c.db.View(traversal)
return kvdb.View(c.db, traversal)
}
// Otherwise, we re-use the existing transaction to execute the graph
@ -373,10 +375,10 @@ func (c *ChannelGraph) ForEachNode(tx *bbolt.Tx, cb func(*bbolt.Tx, *LightningNo
// node based off the source node.
func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
var source *LightningNode
err := c.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -400,7 +402,7 @@ func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
// of the graph. The source node is treated as the center node within a
// star-graph. This method may be used to kick off a path finding algorithm in
// order to explore the reachability of another node based off the source node.
func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) {
func (c *ChannelGraph) sourceNode(nodes kvdb.ReadBucket) (*LightningNode, error) {
selfPub := nodes.Get(sourceKey)
if selfPub == nil {
return nil, ErrSourceNodeNotSet
@ -423,10 +425,10 @@ func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) {
func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
nodePubBytes := node.PubKeyBytes[:]
return c.db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -452,13 +454,13 @@ func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
//
// TODO(roasbeef): also need sig of announcement
func (c *ChannelGraph) AddLightningNode(node *LightningNode) error {
return c.db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
return addLightningNode(tx, node)
})
}
func addLightningNode(tx *bbolt.Tx, node *LightningNode) error {
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
func addLightningNode(tx kvdb.RwTx, node *LightningNode) error {
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -483,13 +485,13 @@ func addLightningNode(tx *bbolt.Tx, node *LightningNode) error {
func (c *ChannelGraph) LookupAlias(pub *btcec.PublicKey) (string, error) {
var alias string
err := c.db.View(func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodesNotFound
}
aliases := nodes.Bucket(aliasIndexBucket)
aliases := nodes.NestedReadBucket(aliasIndexBucket)
if aliases == nil {
return ErrGraphNodesNotFound
}
@ -516,8 +518,8 @@ func (c *ChannelGraph) LookupAlias(pub *btcec.PublicKey) (string, error) {
// from the database according to the node's public key.
func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
// TODO(roasbeef): ensure dangling edges are removed...
return c.db.Update(func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodeNotFound
}
@ -528,10 +530,10 @@ func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
// deleteLightningNode uses an existing database transaction to remove a
// vertex/node from the database according to the node's public key.
func (c *ChannelGraph) deleteLightningNode(nodes *bbolt.Bucket,
func (c *ChannelGraph) deleteLightningNode(nodes kvdb.RwBucket,
compressedPubKey []byte) error {
aliases := nodes.Bucket(aliasIndexBucket)
aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
if aliases == nil {
return ErrGraphNodesNotFound
}
@ -556,7 +558,7 @@ func (c *ChannelGraph) deleteLightningNode(nodes *bbolt.Bucket,
// Finally, we'll delete the index entry for the node within the
// nodeUpdateIndexBucket as this node is no longer active, so we don't
// need to track its last update.
nodeUpdateIndex := nodes.Bucket(nodeUpdateIndexBucket)
nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
if nodeUpdateIndex == nil {
return ErrGraphNodesNotFound
}
@ -581,7 +583,7 @@ func (c *ChannelGraph) AddChannelEdge(edge *ChannelEdgeInfo) error {
c.cacheMu.Lock()
defer c.cacheMu.Unlock()
err := c.db.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
return c.addChannelEdge(tx, edge)
})
if err != nil {
@ -596,16 +598,16 @@ func (c *ChannelGraph) AddChannelEdge(edge *ChannelEdgeInfo) error {
// addChannelEdge is the private form of AddChannelEdge that allows callers to
// utilize an existing db transaction.
func (c *ChannelGraph) addChannelEdge(tx *bbolt.Tx, edge *ChannelEdgeInfo) error {
func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx, edge *ChannelEdgeInfo) error {
// Construct the channel's primary key which is the 8-byte channel ID.
var chanKey [8]byte
binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
edges, err := tx.CreateBucketIfNotExists(edgeBucket)
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return err
}
@ -731,12 +733,12 @@ func (c *ChannelGraph) HasChannelEdge(
return upd1Time, upd2Time, exists, isZombie, nil
}
if err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
if err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -748,7 +750,7 @@ func (c *ChannelGraph) HasChannelEdge(
// index.
if edgeIndex.Get(channelID[:]) == nil {
exists = false
zombieIndex := edges.Bucket(zombieBucket)
zombieIndex := edges.NestedReadBucket(zombieBucket)
if zombieIndex != nil {
isZombie, _, _ = isZombieEdge(
zombieIndex, chanID,
@ -764,7 +766,7 @@ func (c *ChannelGraph) HasChannelEdge(
// If the channel has been found in the graph, then retrieve
// the edges itself so we can return the last updated
// timestamps.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodeNotFound
}
@ -808,13 +810,13 @@ func (c *ChannelGraph) UpdateChannelEdge(edge *ChannelEdgeInfo) error {
var chanKey [8]byte
binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
return c.db.Update(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
edges := tx.ReadWriteBucket(edgeBucket)
if edge == nil {
return ErrEdgeNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrEdgeNotFound
}
@ -851,10 +853,10 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
var chansClosed []*ChannelEdgeInfo
err := c.db.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
// First grab the edges bucket which houses the information
// we'd like to delete
edges, err := tx.CreateBucketIfNotExists(edgeBucket)
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return err
}
@ -868,7 +870,7 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
if err != nil {
return err
}
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return ErrSourceNodeNotSet
}
@ -919,7 +921,7 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
chansClosed = append(chansClosed, &edgeInfo)
}
metaBucket, err := tx.CreateBucketIfNotExists(graphMetaBucket)
metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
if err != nil {
return err
}
@ -965,16 +967,16 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
// that we only maintain a graph of reachable nodes. In the event that a pruned
// node gains more channels, it will be re-added back to the graph.
func (c *ChannelGraph) PruneGraphNodes() error {
return c.db.Update(func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodesNotFound
}
edges := tx.Bucket(edgeBucket)
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return ErrGraphNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -986,8 +988,8 @@ func (c *ChannelGraph) PruneGraphNodes() error {
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
// channel closed within the current block. If the node still has existing
// channels in the graph, this will act as a no-op.
func (c *ChannelGraph) pruneGraphNodes(nodes *bbolt.Bucket,
edgeIndex *bbolt.Bucket) error {
func (c *ChannelGraph) pruneGraphNodes(nodes kvdb.RwBucket,
edgeIndex kvdb.RwBucket) error {
log.Trace("Pruning nodes from graph with no open channels")
@ -1113,8 +1115,8 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
// Keep track of the channels that are removed from the graph.
var removedChans []*ChannelEdgeInfo
if err := c.db.Update(func(tx *bbolt.Tx) error {
edges, err := tx.CreateBucketIfNotExists(edgeBucket)
if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return err
}
@ -1130,7 +1132,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
if err != nil {
return err
}
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -1140,7 +1142,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
// NOTE: we must delete the edges after the cursor loop, since
// modifying the bucket while traversing is not safe.
var keys [][]byte
cursor := edgeIndex.Cursor()
cursor := edgeIndex.ReadWriteCursor()
for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
@ -1166,7 +1168,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
// Delete all the entries in the prune log having a height
// greater or equal to the block disconnected.
metaBucket, err := tx.CreateBucketIfNotExists(graphMetaBucket)
metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
if err != nil {
return err
}
@ -1185,7 +1187,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
// To avoid modifying the bucket while traversing, we delete
// the keys in a second loop.
var pruneKeys [][]byte
pruneCursor := pruneBucket.Cursor()
pruneCursor := pruneBucket.ReadWriteCursor()
for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
@ -1221,17 +1223,17 @@ func (c *ChannelGraph) PruneTip() (*chainhash.Hash, uint32, error) {
tipHeight uint32
)
err := c.db.View(func(tx *bbolt.Tx) error {
graphMeta := tx.Bucket(graphMetaBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
graphMeta := tx.ReadBucket(graphMetaBucket)
if graphMeta == nil {
return ErrGraphNotFound
}
pruneBucket := graphMeta.Bucket(pruneLogBucket)
pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
if pruneBucket == nil {
return ErrGraphNeverPruned
}
pruneCursor := pruneBucket.Cursor()
pruneCursor := pruneBucket.ReadCursor()
// The prune key with the largest block height will be our
// prune tip.
@ -1266,20 +1268,20 @@ func (c *ChannelGraph) DeleteChannelEdges(chanIDs ...uint64) error {
c.cacheMu.Lock()
defer c.cacheMu.Unlock()
err := c.db.Update(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return ErrEdgeNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrEdgeNotFound
}
chanIndex := edges.Bucket(channelPointBucket)
chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
if chanIndex == nil {
return ErrEdgeNotFound
}
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodeNotFound
}
@ -1319,7 +1321,7 @@ func (c *ChannelGraph) DeleteChannelEdges(chanIDs ...uint64) error {
// the database, then ErrEdgeNotFound is returned.
func (c *ChannelGraph) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
var chanID uint64
if err := c.db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
var err error
chanID, err = getChanID(tx, chanPoint)
return err
@ -1331,17 +1333,17 @@ func (c *ChannelGraph) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
}
// getChanID returns the assigned channel ID for a given channel point.
func getChanID(tx *bbolt.Tx, chanPoint *wire.OutPoint) (uint64, error) {
func getChanID(tx kvdb.ReadTx, chanPoint *wire.OutPoint) (uint64, error) {
var b bytes.Buffer
if err := writeOutpoint(&b, chanPoint); err != nil {
return 0, err
}
edges := tx.Bucket(edgeBucket)
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return 0, ErrGraphNoEdgesFound
}
chanIndex := edges.Bucket(channelPointBucket)
chanIndex := edges.NestedReadBucket(channelPointBucket)
if chanIndex == nil {
return 0, ErrGraphNoEdgesFound
}
@ -1364,19 +1366,19 @@ func getChanID(tx *bbolt.Tx, chanPoint *wire.OutPoint) (uint64, error) {
func (c *ChannelGraph) HighestChanID() (uint64, error) {
var cid uint64
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
// In order to find the highest chan ID, we'll fetch a cursor
// and use that to seek to the "end" of our known rage.
cidCursor := edgeIndex.Cursor()
cidCursor := edgeIndex.ReadCursor()
lastChanID, _ := cidCursor.Last()
@ -1428,28 +1430,28 @@ func (c *ChannelGraph) ChanUpdatesInHorizon(startTime, endTime time.Time) ([]Cha
defer c.cacheMu.Unlock()
var hits int
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
edgeUpdateIndex := edges.Bucket(edgeUpdateIndexBucket)
edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
if edgeUpdateIndex == nil {
return ErrGraphNoEdgesFound
}
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodesNotFound
}
// We'll now obtain a cursor to perform a range query within
// the index to find all channels within the horizon.
updateCursor := edgeUpdateIndex.Cursor()
updateCursor := edgeUpdateIndex.ReadCursor()
var startTimeBytes, endTimeBytes [8 + 8]byte
byteOrder.PutUint64(
@ -1548,20 +1550,20 @@ func (c *ChannelGraph) ChanUpdatesInHorizon(startTime, endTime time.Time) ([]Cha
func (c *ChannelGraph) NodeUpdatesInHorizon(startTime, endTime time.Time) ([]LightningNode, error) {
var nodesInHorizon []LightningNode
err := c.db.View(func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodesNotFound
}
nodeUpdateIndex := nodes.Bucket(nodeUpdateIndexBucket)
nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
if nodeUpdateIndex == nil {
return ErrGraphNodesNotFound
}
// We'll now obtain a cursor to perform a range query within
// the index to find all node announcements within the horizon.
updateCursor := nodeUpdateIndex.Cursor()
updateCursor := nodeUpdateIndex.ReadCursor()
var startTimeBytes, endTimeBytes [8 + 33]byte
byteOrder.PutUint64(
@ -1610,12 +1612,12 @@ func (c *ChannelGraph) NodeUpdatesInHorizon(startTime, endTime time.Time) ([]Lig
func (c *ChannelGraph) FilterKnownChanIDs(chanIDs []uint64) ([]uint64, error) {
var newChanIDs []uint64
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -1623,7 +1625,7 @@ func (c *ChannelGraph) FilterKnownChanIDs(chanIDs []uint64) ([]uint64, error) {
// Fetch the zombie index, it may not exist if no edges have
// ever been marked as zombies. If the index has been
// initialized, we will use it later to skip known zombie edges.
zombieIndex := edges.Bucket(zombieBucket)
zombieIndex := edges.NestedReadBucket(zombieBucket)
// We'll run through the set of chanIDs and collate only the
// set of channel that are unable to be found within our db.
@ -1686,17 +1688,17 @@ func (c *ChannelGraph) FilterChannelRange(startHeight, endHeight uint32) ([]uint
byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
cursor := edgeIndex.Cursor()
cursor := edgeIndex.ReadCursor()
// We'll now iterate through the database, and find each
// channel ID that resides within the specified range.
@ -1739,16 +1741,16 @@ func (c *ChannelGraph) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
cidBytes [8]byte
)
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -1794,12 +1796,12 @@ func (c *ChannelGraph) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
return chanEdges, nil
}
func delEdgeUpdateIndexEntry(edgesBucket *bbolt.Bucket, chanID uint64,
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
edge1, edge2 *ChannelEdgePolicy) error {
// First, we'll fetch the edge update index bucket which currently
// stores an entry for the channel we're about to delete.
updateIndex := edgesBucket.Bucket(edgeUpdateIndexBucket)
updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
if updateIndex == nil {
// No edges in bucket, return early.
return nil
@ -1833,7 +1835,7 @@ func delEdgeUpdateIndexEntry(edgesBucket *bbolt.Bucket, chanID uint64,
}
func delChannelEdge(edges, edgeIndex, chanIndex, zombieIndex,
nodes *bbolt.Bucket, chanID []byte, isZombie bool) error {
nodes kvdb.RwBucket, chanID []byte, isZombie bool) error {
edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
if err != nil {
@ -1919,7 +1921,7 @@ func (c *ChannelGraph) UpdateEdgePolicy(edge *ChannelEdgePolicy) error {
defer c.cacheMu.Unlock()
var isUpdate1 bool
err := c.db.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
var err error
isUpdate1, err = updateEdgePolicy(tx, edge)
return err
@ -1961,17 +1963,17 @@ func (c *ChannelGraph) UpdateEdgePolicy(edge *ChannelEdgePolicy) error {
// buckets using an existing database transaction. The returned boolean will be
// true if the updated policy belongs to node1, and false if the policy belonged
// to node2.
func updateEdgePolicy(tx *bbolt.Tx, edge *ChannelEdgePolicy) (bool, error) {
edges := tx.Bucket(edgeBucket)
func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy) (bool, error) {
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return false, ErrEdgeNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return false, ErrEdgeNotFound
}
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return false, err
}
@ -2138,14 +2140,14 @@ func (l *LightningNode) NodeAnnouncement(signed bool) (*lnwire.NodeAnnouncement,
// isPublic determines whether the node is seen as public within the graph from
// the source node's point of view. An existing database transaction can also be
// specified.
func (l *LightningNode) isPublic(tx *bbolt.Tx, sourcePubKey []byte) (bool, error) {
func (l *LightningNode) isPublic(tx kvdb.ReadTx, sourcePubKey []byte) (bool, error) {
// In order to determine whether this node is publicly advertised within
// the graph, we'll need to look at all of its edges and check whether
// they extend to any other node than the source node. errDone will be
// used to terminate the check early.
nodeIsPublic := false
errDone := errors.New("done")
err := l.ForEachChannel(tx, func(_ *bbolt.Tx, info *ChannelEdgeInfo,
err := l.ForEachChannel(tx, func(_ kvdb.ReadTx, info *ChannelEdgeInfo,
_, _ *ChannelEdgePolicy) error {
// If this edge doesn't extend to the source node, we'll
@ -2184,15 +2186,15 @@ func (l *LightningNode) isPublic(tx *bbolt.Tx, sourcePubKey []byte) (bool, error
// should be passed as the first argument. Otherwise the first argument should
// be nil and a fresh transaction will be created to execute the graph
// traversal.
func (c *ChannelGraph) FetchLightningNode(tx *bbolt.Tx, nodePub route.Vertex) (
func (c *ChannelGraph) FetchLightningNode(tx kvdb.ReadTx, nodePub route.Vertex) (
*LightningNode, error) {
var node *LightningNode
fetchNode := func(tx *bbolt.Tx) error {
fetchNode := func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -2220,7 +2222,7 @@ func (c *ChannelGraph) FetchLightningNode(tx *bbolt.Tx, nodePub route.Vertex) (
var err error
if tx == nil {
err = c.db.View(fetchNode)
err = kvdb.View(c.db, fetchNode)
} else {
err = fetchNode(tx)
}
@ -2242,10 +2244,10 @@ func (c *ChannelGraph) HasLightningNode(nodePub [33]byte) (time.Time, bool, erro
exists bool
)
err := c.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -2280,19 +2282,19 @@ func (c *ChannelGraph) HasLightningNode(nodePub [33]byte) (time.Time, bool, erro
// nodeTraversal is used to traverse all channels of a node given by its
// public key and passes channel information into the specified callback.
func nodeTraversal(tx *bbolt.Tx, nodePub []byte, db *DB,
cb func(*bbolt.Tx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error {
func nodeTraversal(tx kvdb.ReadTx, nodePub []byte, db *DB,
cb func(kvdb.ReadTx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error {
traversal := func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
traversal := func(tx kvdb.ReadTx) error {
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
edges := tx.Bucket(edgeBucket)
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -2312,7 +2314,7 @@ func nodeTraversal(tx *bbolt.Tx, nodePub []byte, db *DB,
// bucket until the retrieved key no longer has the public key
// as its prefix. This indicates that we've stepped over into
// another node's edges, so we can terminate our scan.
edgeCursor := edges.Cursor()
edgeCursor := edges.ReadCursor()
for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() {
// If the prefix still matches, the channel id is
// returned in nodeEdge. Channel id is used to lookup
@ -2357,7 +2359,7 @@ func nodeTraversal(tx *bbolt.Tx, nodePub []byte, db *DB,
// If no transaction was provided, then we'll create a new transaction
// to execute the transaction within.
if tx == nil {
return db.View(traversal)
return kvdb.View(db, traversal)
}
// Otherwise, we re-use the existing transaction to execute the graph
@ -2378,8 +2380,8 @@ func nodeTraversal(tx *bbolt.Tx, nodePub []byte, db *DB,
// should be passed as the first argument. Otherwise the first argument should
// be nil and a fresh transaction will be created to execute the graph
// traversal.
func (l *LightningNode) ForEachChannel(tx *bbolt.Tx,
cb func(*bbolt.Tx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error {
func (l *LightningNode) ForEachChannel(tx kvdb.ReadTx,
cb func(kvdb.ReadTx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error {
nodePub := l.PubKeyBytes[:]
db := l.db
@ -2570,7 +2572,7 @@ func (c *ChannelEdgeInfo) OtherNodeKeyBytes(thisNodeKey []byte) (
// the target node in the channel. This is useful when one knows the pubkey of
// one of the nodes, and wishes to obtain the full LightningNode for the other
// end of the channel.
func (c *ChannelEdgeInfo) FetchOtherNode(tx *bbolt.Tx, thisNodeKey []byte) (*LightningNode, error) {
func (c *ChannelEdgeInfo) FetchOtherNode(tx kvdb.ReadTx, thisNodeKey []byte) (*LightningNode, error) {
// Ensure that the node passed in is actually a member of the channel.
var targetNodeBytes [33]byte
@ -2584,10 +2586,10 @@ func (c *ChannelEdgeInfo) FetchOtherNode(tx *bbolt.Tx, thisNodeKey []byte) (*Lig
}
var targetNode *LightningNode
fetchNodeFunc := func(tx *bbolt.Tx) error {
fetchNodeFunc := func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -2607,7 +2609,7 @@ func (c *ChannelEdgeInfo) FetchOtherNode(tx *bbolt.Tx, thisNodeKey []byte) (*Lig
// otherwise we can use the existing db transaction.
var err error
if tx == nil {
err = c.db.View(fetchNodeFunc)
err = kvdb.View(c.db, fetchNodeFunc)
} else {
err = fetchNodeFunc(tx)
}
@ -2883,10 +2885,10 @@ func (c *ChannelGraph) FetchChannelEdgesByOutpoint(op *wire.OutPoint,
policy2 *ChannelEdgePolicy
)
err := c.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First, grab the node bucket. This will be used to populate
// the Node pointers in each edge read from disk.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -2894,18 +2896,18 @@ func (c *ChannelGraph) FetchChannelEdgesByOutpoint(op *wire.OutPoint,
// Next, grab the edge bucket which stores the edges, and also
// the index itself so we can group the directed edges together
// logically.
edges := tx.Bucket(edgeBucket)
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
// If the channel's outpoint doesn't exist within the outpoint
// index, then the edge does not exist.
chanIndex := edges.Bucket(channelPointBucket)
chanIndex := edges.NestedReadBucket(channelPointBucket)
if chanIndex == nil {
return ErrGraphNoEdgesFound
}
@ -2967,10 +2969,10 @@ func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64,
channelID [8]byte
)
err := c.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First, grab the node bucket. This will be used to populate
// the Node pointers in each edge read from disk.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -2978,11 +2980,11 @@ func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64,
// Next, grab the edge bucket which stores the edges, and also
// the index itself so we can group the directed edges together
// logically.
edges := tx.Bucket(edgeBucket)
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -2998,7 +3000,7 @@ func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64,
// If the zombie index doesn't exist, or the edge is not
// marked as a zombie within it, then we'll return the
// original ErrEdgeNotFound error.
zombieIndex := edges.Bucket(zombieBucket)
zombieIndex := edges.NestedReadBucket(zombieBucket)
if zombieIndex == nil {
return ErrEdgeNotFound
}
@ -3057,8 +3059,8 @@ func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64,
// source node's point of view.
func (c *ChannelGraph) IsPublicNode(pubKey [33]byte) (bool, error) {
var nodeIsPublic bool
err := c.db.View(func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodesNotFound
}
@ -3143,19 +3145,19 @@ func (e *EdgePoint) String() string {
// closes on the resident blockchain.
func (c *ChannelGraph) ChannelView() ([]EdgePoint, error) {
var edgePoints []EdgePoint
if err := c.db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// We're going to iterate over the entire channel index, so
// we'll need to fetch the edgeBucket to get to the index as
// it's a sub-bucket.
edges := tx.Bucket(edgeBucket)
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
chanIndex := edges.Bucket(channelPointBucket)
chanIndex := edges.NestedReadBucket(channelPointBucket)
if chanIndex == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -3209,7 +3211,7 @@ func (c *ChannelGraph) NewChannelEdgePolicy() *ChannelEdgePolicy {
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
// keys should represent the node public keys of the two parties involved in the
// edge.
func markEdgeZombie(zombieIndex *bbolt.Bucket, chanID uint64, pubKey1,
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
pubKey2 [33]byte) error {
var k [8]byte
@ -3227,12 +3229,12 @@ func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error {
c.cacheMu.Lock()
defer c.cacheMu.Unlock()
err := c.db.Update(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
zombieIndex := edges.Bucket(zombieBucket)
zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
if zombieIndex == nil {
return nil
}
@ -3260,12 +3262,12 @@ func (c *ChannelGraph) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
pubKey1, pubKey2 [33]byte
)
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
zombieIndex := edges.Bucket(zombieBucket)
zombieIndex := edges.NestedReadBucket(zombieBucket)
if zombieIndex == nil {
return nil
}
@ -3283,7 +3285,7 @@ func (c *ChannelGraph) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
// isZombieEdge returns whether an entry exists for the given channel in the
// zombie index. If an entry exists, then the two node public keys corresponding
// to this edge are also returned.
func isZombieEdge(zombieIndex *bbolt.Bucket,
func isZombieEdge(zombieIndex kvdb.ReadBucket,
chanID uint64) (bool, [33]byte, [33]byte) {
var k [8]byte
@ -3304,12 +3306,12 @@ func isZombieEdge(zombieIndex *bbolt.Bucket,
// NumZombies returns the current number of zombie channels in the graph.
func (c *ChannelGraph) NumZombies() (uint64, error) {
var numZombies uint64
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return nil
}
zombieIndex := edges.Bucket(zombieBucket)
zombieIndex := edges.NestedReadBucket(zombieBucket)
if zombieIndex == nil {
return nil
}
@ -3326,8 +3328,8 @@ func (c *ChannelGraph) NumZombies() (uint64, error) {
return numZombies, nil
}
func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket,
updateIndex *bbolt.Bucket, node *LightningNode) error {
func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket, // nolint:dupl
updateIndex kvdb.RwBucket, node *LightningNode) error {
var (
scratch [16]byte
@ -3455,7 +3457,7 @@ func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket,
return nodeBucket.Put(nodePub, b.Bytes())
}
func fetchLightningNode(nodeBucket *bbolt.Bucket,
func fetchLightningNode(nodeBucket kvdb.ReadBucket,
nodePub []byte) (LightningNode, error) {
nodeBytes := nodeBucket.Get(nodePub)
@ -3563,7 +3565,7 @@ func deserializeLightningNode(r io.Reader) (LightningNode, error) {
return node, nil
}
func putChanEdgeInfo(edgeIndex *bbolt.Bucket, edgeInfo *ChannelEdgeInfo, chanID [8]byte) error {
func putChanEdgeInfo(edgeIndex kvdb.RwBucket, edgeInfo *ChannelEdgeInfo, chanID [8]byte) error {
var b bytes.Buffer
if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
@ -3629,7 +3631,7 @@ func putChanEdgeInfo(edgeIndex *bbolt.Bucket, edgeInfo *ChannelEdgeInfo, chanID
return edgeIndex.Put(chanID[:], b.Bytes())
}
func fetchChanEdgeInfo(edgeIndex *bbolt.Bucket,
func fetchChanEdgeInfo(edgeIndex kvdb.ReadBucket,
chanID []byte) (ChannelEdgeInfo, error) {
edgeInfoBytes := edgeIndex.Get(chanID)
@ -3718,7 +3720,7 @@ func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, error) {
return edgeInfo, nil
}
func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy,
func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy,
from, to []byte) error {
var edgeKey [33 + 8]byte
@ -3798,7 +3800,7 @@ func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy,
// in this bucket.
// Maintaining the bucket this way allows a fast retrieval of disabled
// channels, for example when prune is needed.
func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64,
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
direction bool, disabled bool) error {
var disabledEdgeKey [8 + 1]byte
@ -3823,7 +3825,7 @@ func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64,
// putChanEdgePolicyUnknown marks the edge policy as unknown
// in the edges bucket.
func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64,
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
from []byte) error {
var edgeKey [33 + 8]byte
@ -3838,8 +3840,8 @@ func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64,
return edges.Put(edgeKey[:], unknownPolicy)
}
func fetchChanEdgePolicy(edges *bbolt.Bucket, chanID []byte,
nodePub []byte, nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) {
func fetchChanEdgePolicy(edges kvdb.ReadBucket, chanID []byte,
nodePub []byte, nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) {
var edgeKey [33 + 8]byte
copy(edgeKey[:], nodePub)
@ -3871,8 +3873,8 @@ func fetchChanEdgePolicy(edges *bbolt.Bucket, chanID []byte,
return ep, nil
}
func fetchChanEdgePolicies(edgeIndex *bbolt.Bucket, edges *bbolt.Bucket,
nodes *bbolt.Bucket, chanID []byte,
func fetchChanEdgePolicies(edgeIndex kvdb.ReadBucket, edges kvdb.ReadBucket,
nodes kvdb.ReadBucket, chanID []byte,
db *DB) (*ChannelEdgePolicy, *ChannelEdgePolicy, error) {
edgeInfo := edgeIndex.Get(chanID)
@ -3980,7 +3982,7 @@ func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy,
}
func deserializeChanEdgePolicy(r io.Reader,
nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) {
nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) {
edge := &ChannelEdgePolicy{}

@ -17,8 +17,8 @@ import (
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
)
@ -882,7 +882,7 @@ func TestGraphTraversal(t *testing.T) {
// Iterate over each node as returned by the graph, if all nodes are
// reached, then the map created above should be empty.
err = graph.ForEachNode(nil, func(_ *bbolt.Tx, node *LightningNode) error {
err = graph.ForEachNode(nil, func(_ kvdb.ReadTx, node *LightningNode) error {
delete(nodeIndex, node.Alias)
return nil
})
@ -978,7 +978,7 @@ func TestGraphTraversal(t *testing.T) {
// Finally, we want to test the ability to iterate over all the
// outgoing channels for a particular node.
numNodeChans := 0
err = firstNode.ForEachChannel(nil, func(_ *bbolt.Tx, _ *ChannelEdgeInfo,
err = firstNode.ForEachChannel(nil, func(_ kvdb.ReadTx, _ *ChannelEdgeInfo,
outEdge, inEdge *ChannelEdgePolicy) error {
// All channels between first and second node should have fully
@ -1051,7 +1051,7 @@ func assertNumChans(t *testing.T, graph *ChannelGraph, n int) {
func assertNumNodes(t *testing.T, graph *ChannelGraph, n int) {
numNodes := 0
err := graph.ForEachNode(nil, func(_ *bbolt.Tx, _ *LightningNode) error {
err := graph.ForEachNode(nil, func(_ kvdb.ReadTx, _ *LightningNode) error {
numNodes++
return nil
})
@ -2097,10 +2097,9 @@ func TestIncompleteChannelPolicies(t *testing.T) {
}
// Ensure that channel is reported with unknown policies.
checkPolicies := func(node *LightningNode, expectedIn, expectedOut bool) {
calls := 0
node.ForEachChannel(nil, func(_ *bbolt.Tx, _ *ChannelEdgeInfo,
err := node.ForEachChannel(nil, func(_ kvdb.ReadTx, _ *ChannelEdgeInfo,
outEdge, inEdge *ChannelEdgePolicy) error {
if !expectedOut && outEdge != nil {
@ -2123,6 +2122,9 @@ func TestIncompleteChannelPolicies(t *testing.T) {
return nil
})
if err != nil {
t.Fatalf("unable to scan channels: %v", err)
}
if calls != 1 {
t.Fatalf("Expected only one callback call")
@ -2233,17 +2235,27 @@ func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) {
timestampSet[t] = struct{}{}
}
err := db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeUpdateIndex := edges.Bucket(edgeUpdateIndexBucket)
edgeUpdateIndex := edges.NestedReadBucket(
edgeUpdateIndexBucket,
)
if edgeUpdateIndex == nil {
return ErrGraphNoEdgesFound
}
numEntries := edgeUpdateIndex.Stats().KeyN
var numEntries int
err := edgeUpdateIndex.ForEach(func(k, v []byte) error {
numEntries++
return nil
})
if err != nil {
return err
}
expectedEntries := len(timestampSet)
if numEntries != expectedEntries {
return fmt.Errorf("expected %v entries in the "+
@ -2832,8 +2844,8 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) {
// Attempting to deserialize these bytes should return an error.
r := bytes.NewReader(stripped)
err = db.View(func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
err = kvdb.View(db, func(tx kvdb.ReadTx) error {
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -2852,13 +2864,13 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) {
}
// Put the stripped bytes in the DB.
err = db.Update(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err = kvdb.Update(db, func(tx kvdb.RwTx) error {
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return ErrEdgeNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrEdgeNotFound
}

@ -8,7 +8,7 @@ import (
"io"
"time"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/htlcswitch/hop"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
@ -401,8 +401,8 @@ func (d *DB) AddInvoice(newInvoice *Invoice, paymentHash lntypes.Hash) (
}
var invoiceAddIndex uint64
err := d.Update(func(tx *bbolt.Tx) error {
invoices, err := tx.CreateBucketIfNotExists(invoiceBucket)
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
if err != nil {
return err
}
@ -479,13 +479,13 @@ func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, error) {
var startIndex [8]byte
byteOrder.PutUint64(startIndex[:], sinceAddIndex)
err := d.DB.View(func(tx *bbolt.Tx) error {
invoices := tx.Bucket(invoiceBucket)
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil {
return ErrNoInvoicesCreated
}
addIndex := invoices.Bucket(addIndexBucket)
addIndex := invoices.NestedReadBucket(addIndexBucket)
if addIndex == nil {
return ErrNoInvoicesCreated
}
@ -493,7 +493,7 @@ func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, error) {
// We'll now run through each entry in the add index starting
// at our starting index. We'll continue until we reach the
// very end of the current key space.
invoiceCursor := addIndex.Cursor()
invoiceCursor := addIndex.ReadCursor()
// We'll seek to the starting index, then manually advance the
// cursor in order to skip the entry with the since add index.
@ -534,12 +534,12 @@ func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, error) {
// terms of the payment.
func (d *DB) LookupInvoice(paymentHash [32]byte) (Invoice, error) {
var invoice Invoice
err := d.View(func(tx *bbolt.Tx) error {
invoices := tx.Bucket(invoiceBucket)
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil {
return ErrNoInvoicesCreated
}
invoiceIndex := invoices.Bucket(invoiceIndexBucket)
invoiceIndex := invoices.NestedReadBucket(invoiceIndexBucket)
if invoiceIndex == nil {
return ErrNoInvoicesCreated
}
@ -589,13 +589,13 @@ func (d *DB) FetchAllInvoicesWithPaymentHash(pendingOnly bool) (
var result []InvoiceWithPaymentHash
err := d.View(func(tx *bbolt.Tx) error {
invoices := tx.Bucket(invoiceBucket)
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil {
return ErrNoInvoicesCreated
}
invoiceIndex := invoices.Bucket(invoiceIndexBucket)
invoiceIndex := invoices.NestedReadBucket(invoiceIndexBucket)
if invoiceIndex == nil {
// Mask the error if there's no invoice
// index as that simply means there are no
@ -695,21 +695,21 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) {
InvoiceQuery: q,
}
err := d.View(func(tx *bbolt.Tx) error {
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
// If the bucket wasn't found, then there aren't any invoices
// within the database yet, so we can simply exit.
invoices := tx.Bucket(invoiceBucket)
invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil {
return ErrNoInvoicesCreated
}
invoiceAddIndex := invoices.Bucket(addIndexBucket)
invoiceAddIndex := invoices.NestedReadBucket(addIndexBucket)
if invoiceAddIndex == nil {
return ErrNoInvoicesCreated
}
// keyForIndex is a helper closure that retrieves the invoice
// key for the given add index of an invoice.
keyForIndex := func(c *bbolt.Cursor, index uint64) []byte {
keyForIndex := func(c kvdb.ReadCursor, index uint64) []byte {
var keyIndex [8]byte
byteOrder.PutUint64(keyIndex[:], index)
_, invoiceKey := c.Seek(keyIndex[:])
@ -718,7 +718,7 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) {
// nextKey is a helper closure to determine what the next
// invoice key is when iterating over the invoice add index.
nextKey := func(c *bbolt.Cursor) ([]byte, []byte) {
nextKey := func(c kvdb.ReadCursor) ([]byte, []byte) {
if q.Reversed {
return c.Prev()
}
@ -728,7 +728,7 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) {
// We'll be using a cursor to seek into the database and return
// a slice of invoices. We'll need to determine where to start
// our cursor depending on the parameters set within the query.
c := invoiceAddIndex.Cursor()
c := invoiceAddIndex.ReadCursor()
invoiceKey := keyForIndex(c, q.IndexOffset+1)
// If the query is specifying reverse iteration, then we must
@ -822,8 +822,8 @@ func (d *DB) UpdateInvoice(paymentHash lntypes.Hash,
callback InvoiceUpdateCallback) (*Invoice, error) {
var updatedInvoice *Invoice
err := d.Update(func(tx *bbolt.Tx) error {
invoices, err := tx.CreateBucketIfNotExists(invoiceBucket)
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
if err != nil {
return err
}
@ -877,13 +877,13 @@ func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, error) {
var startIndex [8]byte
byteOrder.PutUint64(startIndex[:], sinceSettleIndex)
err := d.DB.View(func(tx *bbolt.Tx) error {
invoices := tx.Bucket(invoiceBucket)
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil {
return ErrNoInvoicesCreated
}
settleIndex := invoices.Bucket(settleIndexBucket)
settleIndex := invoices.NestedReadBucket(settleIndexBucket)
if settleIndex == nil {
return ErrNoInvoicesCreated
}
@ -891,7 +891,7 @@ func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, error) {
// We'll now run through each entry in the add index starting
// at our starting index. We'll continue until we reach the
// very end of the current key space.
invoiceCursor := settleIndex.Cursor()
invoiceCursor := settleIndex.ReadCursor()
// We'll seek to the starting index, then manually advance the
// cursor in order to skip the entry with the since add index.
@ -919,7 +919,7 @@ func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, error) {
return settledInvoices, nil
}
func putInvoice(invoices, invoiceIndex, addIndex *bbolt.Bucket,
func putInvoice(invoices, invoiceIndex, addIndex kvdb.RwBucket,
i *Invoice, invoiceNum uint32, paymentHash lntypes.Hash) (
uint64, error) {
@ -1112,7 +1112,7 @@ func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) error {
return nil
}
func fetchInvoice(invoiceNum []byte, invoices *bbolt.Bucket) (Invoice, error) {
func fetchInvoice(invoiceNum []byte, invoices kvdb.ReadBucket) (Invoice, error) {
invoiceBytes := invoices.Get(invoiceNum)
if invoiceBytes == nil {
return Invoice{}, ErrInvoiceNotFound
@ -1325,7 +1325,7 @@ func copyInvoice(src *Invoice) *Invoice {
// updateInvoice fetches the invoice, obtains the update descriptor from the
// callback and applies the updates in a single db transaction.
func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex *bbolt.Bucket,
func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex kvdb.RwBucket,
invoiceNum []byte, callback InvoiceUpdateCallback) (*Invoice, error) {
invoice, err := fetchInvoice(invoiceNum, invoices)
@ -1572,7 +1572,7 @@ func updateHtlc(resolveTime time.Time, htlc *InvoiceHTLC,
// setSettleMetaFields updates the metadata associated with settlement of an
// invoice.
func setSettleMetaFields(settleIndex *bbolt.Bucket, invoiceNum []byte,
func setSettleMetaFields(settleIndex kvdb.RwBucket, invoiceNum []byte,
invoice *Invoice, now time.Time) error {
// Now that we know the invoice hasn't already been settled, we'll

10
channeldb/kvdb/bbolt.go Normal file

@ -0,0 +1,10 @@
package kvdb
import (
_ "github.com/btcsuite/btcwallet/walletdb/bdb" // Import to register backend.
)
// BoltBackendName is the name of the backend that should be passed into
// kvdb.Create to initialize a new instance of kvdb.Backend backed by a live
// instance of bbolt.
const BoltBackendName = "bdb"

@ -0,0 +1,90 @@
package kvdb
import (
"github.com/btcsuite/btcwallet/walletdb"
_ "github.com/btcsuite/btcwallet/walletdb/bdb" // Import to register backend.
)
// Update opens a database read/write transaction and executes the function f
// with the transaction passed as a parameter. After f exits, if f did not
// error, the transaction is committed. Otherwise, if f did error, the
// transaction is rolled back. If the rollback fails, the original error
// returned by f is still returned. If the commit fails, the commit error is
// returned.
var Update = walletdb.Update
// View opens a database read transaction and executes the function f with the
// transaction passed as a parameter. After f exits, the transaction is rolled
// back. If f errors, its error is returned, not a rollback error (if any
// occur).
var View = walletdb.View
// Batch is identical to the Update call, but it attempts to combine several
// individual Update transactions into a single write database transaction on
// an optimistic basis. This only has benefits if multiple goroutines call
// Batch.
var Batch = walletdb.Batch
// Create initializes and opens a database for the specified type. The
// arguments are specific to the database type driver. See the documentation
// for the database driver for further details.
//
// ErrDbUnknownType will be returned if the database type is not registered.
var Create = walletdb.Create
// Backend represents an ACID database. All database access is performed
// through read or read+write transactions.
type Backend = walletdb.DB
// Open opens an existing database for the specified type. The arguments are
// specific to the database type driver. See the documentation for the database
// driver for further details.
//
// ErrDbUnknownType will be returned if the database type is not registered.
var Open = walletdb.Open
// Driver defines a structure for backend drivers to use when they registered
// themselves as a backend which implements the Backend interface.
type Driver = walletdb.Driver
// ReadBucket represents a bucket (a hierarchical structure within the
// database) that is only allowed to perform read operations.
type ReadBucket = walletdb.ReadBucket
// ReadCursor represents a bucket cursor that can be positioned at the start or
// end of the bucket's key/value pairs and iterate over pairs in the bucket.
// This type is only allowed to perform database read operations.
type ReadCursor = walletdb.ReadCursor
// ReadTx represents a database transaction that can only be used for reads. If
// a database update must occur, use a RwTx.
type ReadTx = walletdb.ReadTx
// RwBucket represents a bucket (a hierarchical structure within the database)
// that is allowed to perform both read and write operations.
type RwBucket = walletdb.ReadWriteBucket
// RwCursor represents a bucket cursor that can be positioned at the start or
// end of the bucket's key/value pairs and iterate over pairs in the bucket.
// This abstraction is allowed to perform both database read and write
// operations.
type RwCursor = walletdb.ReadWriteCursor
// ReadWriteTx represents a database transaction that can be used for both
// reads and writes. When only reads are necessary, consider using a ReadTx
// instead.
type RwTx = walletdb.ReadWriteTx
var (
// ErrBucketNotFound is returned when trying to access a bucket that
// has not been created yet.
ErrBucketNotFound = walletdb.ErrBucketNotFound
// ErrBucketExists is returned when creating a bucket that already
// exists.
ErrBucketExists = walletdb.ErrBucketExists
// ErrDatabaseNotOpen is returned when a database instance is accessed
// before it is opened or after it is closed.
ErrDatabaseNotOpen = walletdb.ErrDbNotOpen
)

@ -1,6 +1,8 @@
package channeldb
import "github.com/coreos/bbolt"
import (
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
var (
// metaBucket stores all the meta information concerning the state of
@ -20,10 +22,10 @@ type Meta struct {
// FetchMeta fetches the meta data from boltdb and returns filled meta
// structure.
func (d *DB) FetchMeta(tx *bbolt.Tx) (*Meta, error) {
func (d *DB) FetchMeta(tx kvdb.ReadTx) (*Meta, error) {
meta := &Meta{}
err := d.View(func(tx *bbolt.Tx) error {
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
return fetchMeta(meta, tx)
})
if err != nil {
@ -36,8 +38,8 @@ func (d *DB) FetchMeta(tx *bbolt.Tx) (*Meta, error) {
// fetchMeta is an internal helper function used in order to allow callers to
// re-use a database transaction. See the publicly exported FetchMeta method
// for more information.
func fetchMeta(meta *Meta, tx *bbolt.Tx) error {
metaBucket := tx.Bucket(metaBucket)
func fetchMeta(meta *Meta, tx kvdb.ReadTx) error {
metaBucket := tx.ReadBucket(metaBucket)
if metaBucket == nil {
return ErrMetaNotFound
}
@ -54,7 +56,7 @@ func fetchMeta(meta *Meta, tx *bbolt.Tx) error {
// PutMeta writes the passed instance of the database met-data struct to disk.
func (d *DB) PutMeta(meta *Meta) error {
return d.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(d, func(tx kvdb.RwTx) error {
return putMeta(meta, tx)
})
}
@ -62,8 +64,8 @@ func (d *DB) PutMeta(meta *Meta) error {
// putMeta is an internal helper function used in order to allow callers to
// re-use a database transaction. See the publicly exported PutMeta method for
// more information.
func putMeta(meta *Meta, tx *bbolt.Tx) error {
metaBucket, err := tx.CreateBucketIfNotExists(metaBucket)
func putMeta(meta *Meta, tx kvdb.RwTx) error {
metaBucket, err := tx.CreateTopLevelBucket(metaBucket)
if err != nil {
return err
}
@ -71,7 +73,7 @@ func putMeta(meta *Meta, tx *bbolt.Tx) error {
return putDbVersion(metaBucket, meta)
}
func putDbVersion(metaBucket *bbolt.Bucket, meta *Meta) error {
func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) error {
scratch := make([]byte, 4)
byteOrder.PutUint32(scratch, meta.DbVersionNumber)
return metaBucket.Put(dbVersionKey, scratch)

@ -5,8 +5,8 @@ import (
"io/ioutil"
"testing"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// applyMigration is a helper test function that encapsulates the general steps
@ -121,11 +121,11 @@ func TestOrderOfMigrations(t *testing.T) {
versions := []version{
{0, nil},
{1, nil},
{2, func(tx *bbolt.Tx) error {
{2, func(tx kvdb.RwTx) error {
appliedMigration = 2
return nil
}},
{3, func(tx *bbolt.Tx) error {
{3, func(tx kvdb.RwTx) error {
appliedMigration = 3
return nil
}},
@ -197,21 +197,23 @@ func TestMigrationWithPanic(t *testing.T) {
beforeMigrationFunc := func(d *DB) {
// Insert data in database and in order then make sure that the
// key isn't changes in case of panic or fail.
d.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
bucket.Put(keyPrefix, beforeMigration)
return nil
return bucket.Put(keyPrefix, beforeMigration)
})
if err != nil {
t.Fatalf("unable to insert: %v", err)
}
}
// Create migration function which changes the initially created data and
// throw the panic, in this case we pretending that something goes.
migrationWithPanic := func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
migrationWithPanic := func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -231,8 +233,8 @@ func TestMigrationWithPanic(t *testing.T) {
t.Fatal("migration panicked but version is changed")
}
err = d.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
err = kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -268,22 +270,24 @@ func TestMigrationWithFatal(t *testing.T) {
afterMigration := []byte("aftermigration")
beforeMigrationFunc := func(d *DB) {
d.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
bucket.Put(keyPrefix, beforeMigration)
return nil
return bucket.Put(keyPrefix, beforeMigration)
})
if err != nil {
t.Fatalf("unable to insert pre migration key: %v", err)
}
}
// Create migration function which changes the initially created data and
// return the error, in this case we pretending that something goes
// wrong.
migrationWithFatal := func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
migrationWithFatal := func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -303,8 +307,8 @@ func TestMigrationWithFatal(t *testing.T) {
t.Fatal("migration failed but version is changed")
}
err = d.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
err = kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -341,20 +345,22 @@ func TestMigrationWithoutErrors(t *testing.T) {
// Populate database with initial data.
beforeMigrationFunc := func(d *DB) {
d.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
bucket.Put(keyPrefix, beforeMigration)
return nil
return bucket.Put(keyPrefix, beforeMigration)
})
if err != nil {
t.Fatalf("unable to update db pre migration: %v", err)
}
}
// Create migration function which changes the initially created data.
migrationWithoutErrors := func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
migrationWithoutErrors := func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -375,8 +381,8 @@ func TestMigrationWithoutErrors(t *testing.T) {
"successfully applied migration")
}
err = d.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
err = kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -419,7 +425,7 @@ func TestMigrationReversion(t *testing.T) {
// Update the database metadata to point to one more than the highest
// known version.
err = cdb.Update(func(tx *bbolt.Tx) error {
err = kvdb.Update(cdb, func(tx kvdb.RwTx) error {
newMeta := &Meta{
DbVersionNumber: getLatestDBVersion(dbVersions) + 1,
}

@ -3,7 +3,7 @@ package migration12
import (
"bytes"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -12,11 +12,11 @@ var emptyFeatures = lnwire.NewFeatureVector(nil, nil)
// MigrateInvoiceTLV migrates all existing invoice bodies over to be serialized
// in a single TLV stream. In the process, we drop the Receipt field and add
// PaymentAddr and Features to the invoice Terms.
func MigrateInvoiceTLV(tx *bbolt.Tx) error {
func MigrateInvoiceTLV(tx kvdb.RwTx) error {
log.Infof("Migrating invoice bodies to TLV, " +
"adding payment addresses and feature vectors.")
invoiceB := tx.Bucket(invoiceBucket)
invoiceB := tx.ReadWriteBucket(invoiceBucket)
if invoiceB == nil {
return nil
}

@ -5,7 +5,7 @@ import (
"fmt"
"testing"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/channeldb/migration12"
"github.com/lightningnetwork/lnd/channeldb/migtest"
"github.com/lightningnetwork/lnd/lntypes"
@ -121,15 +121,15 @@ var (
type migrationTest struct {
name string
beforeMigration func(*bbolt.Tx) error
afterMigration func(*bbolt.Tx) error
beforeMigration func(kvdb.RwTx) error
afterMigration func(kvdb.RwTx) error
}
var migrationTests = []migrationTest{
{
name: "no invoices",
beforeMigration: func(*bbolt.Tx) error { return nil },
afterMigration: func(*bbolt.Tx) error { return nil },
beforeMigration: func(kvdb.RwTx) error { return nil },
afterMigration: func(kvdb.RwTx) error { return nil },
},
{
name: "zero htlcs",
@ -145,9 +145,9 @@ var migrationTests = []migrationTest{
// genBeforeMigration creates a closure that inserts an invoice serialized under
// the old format under the test payment hash.
func genBeforeMigration(beforeBytes []byte) func(*bbolt.Tx) error {
return func(tx *bbolt.Tx) error {
invoices, err := tx.CreateBucketIfNotExists(
func genBeforeMigration(beforeBytes []byte) func(kvdb.RwTx) error {
return func(tx kvdb.RwTx) error {
invoices, err := tx.CreateTopLevelBucket(
invoiceBucket,
)
if err != nil {
@ -162,9 +162,9 @@ func genBeforeMigration(beforeBytes []byte) func(*bbolt.Tx) error {
// succeeded, but comparing the resulting encoding of the invoice to the
// expected serialization. In addition, the decoded invoice is compared against
// the expected invoice for equality.
func genAfterMigration(afterBytes []byte) func(*bbolt.Tx) error {
return func(tx *bbolt.Tx) error {
invoices := tx.Bucket(invoiceBucket)
func genAfterMigration(afterBytes []byte) func(kvdb.RwTx) error {
return func(tx kvdb.RwTx) error {
invoices := tx.ReadWriteBucket(invoiceBucket)
if invoices == nil {
return fmt.Errorf("invoice bucket not found")
}

@ -4,7 +4,7 @@ import (
"encoding/binary"
"fmt"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
var (
@ -48,13 +48,13 @@ var (
// MigrateMPP migrates the payments to a new structure that accommodates for mpp
// payments.
func MigrateMPP(tx *bbolt.Tx) error {
func MigrateMPP(tx kvdb.RwTx) error {
log.Infof("Migrating payments to mpp structure")
// Iterate over all payments and store their indexing keys. This is
// needed, because no modifications are allowed inside a Bucket.ForEach
// loop.
paymentsBucket := tx.Bucket(paymentsRootBucket)
paymentsBucket := tx.ReadWriteBucket(paymentsRootBucket)
if paymentsBucket == nil {
return nil
}
@ -70,7 +70,7 @@ func MigrateMPP(tx *bbolt.Tx) error {
// With all keys retrieved, start the migration.
for _, k := range paymentKeys {
bucket := paymentsBucket.Bucket(k)
bucket := paymentsBucket.NestedReadWriteBucket(k)
// We only expect sub-buckets to be found in
// this top-level bucket.

@ -3,7 +3,7 @@ package migration13
import (
"testing"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/channeldb/migtest"
)
@ -111,10 +111,10 @@ func TestMigrateMpp(t *testing.T) {
migtest.ApplyMigration(
t,
func(tx *bbolt.Tx) error {
func(tx kvdb.RwTx) error {
return migtest.RestoreDB(tx, paymentsRootBucket, pre)
},
func(tx *bbolt.Tx) error {
func(tx kvdb.RwTx) error {
return migtest.VerifyDB(tx, paymentsRootBucket, post)
},
MigrateMPP,

@ -8,7 +8,7 @@ import (
"path/filepath"
"time"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
const (
@ -19,7 +19,7 @@ const (
// migration is a function which takes a prior outdated version of the database
// instances and mutates the key/bucket structure to arrive at a more
// up-to-date version of the database.
type migration func(tx *bbolt.Tx) error
type migration func(tx kvdb.RwTx) error
var (
// Big endian is the preferred byte order, due to cursor scans over
@ -31,7 +31,7 @@ var (
// information related to nodes, routing data, open/closed channels, fee
// schedules, and reputation data.
type DB struct {
*bbolt.DB
kvdb.Backend
dbPath string
graph *ChannelGraph
now func() time.Time
@ -55,20 +55,15 @@ func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) {
// Specify bbolt freelist options to reduce heap pressure in case the
// freelist grows to be very large.
options := &bbolt.Options{
NoFreelistSync: opts.NoFreelistSync,
FreelistType: bbolt.FreelistMapType,
}
bdb, err := bbolt.Open(path, dbFilePermission, options)
bdb, err := kvdb.Open(kvdb.BoltBackendName, path, opts.NoFreelistSync)
if err != nil {
return nil, err
}
chanDB := &DB{
DB: bdb,
dbPath: dbPath,
now: time.Now,
Backend: bdb,
dbPath: dbPath,
now: time.Now,
}
chanDB.graph = newChannelGraph(
chanDB, opts.RejectCacheSize, opts.ChannelCacheSize,
@ -89,28 +84,28 @@ func createChannelDB(dbPath string) error {
}
path := filepath.Join(dbPath, dbName)
bdb, err := bbolt.Open(path, dbFilePermission, nil)
bdb, err := kvdb.Create(kvdb.BoltBackendName, path, false)
if err != nil {
return err
}
err = bdb.Update(func(tx *bbolt.Tx) error {
if _, err := tx.CreateBucket(openChannelBucket); err != nil {
err = kvdb.Update(bdb, func(tx kvdb.RwTx) error {
if _, err := tx.CreateTopLevelBucket(openChannelBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(closedChannelBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(closedChannelBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(invoiceBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(invoiceBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(paymentBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(paymentBucket); err != nil {
return err
}
nodes, err := tx.CreateBucket(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -123,7 +118,7 @@ func createChannelDB(dbPath string) error {
return err
}
edges, err := tx.CreateBucket(edgeBucket)
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return err
}
@ -140,7 +135,7 @@ func createChannelDB(dbPath string) error {
return err
}
graphMeta, err := tx.CreateBucket(graphMetaBucket)
graphMeta, err := tx.CreateTopLevelBucket(graphMetaBucket)
if err != nil {
return err
}
@ -149,7 +144,7 @@ func createChannelDB(dbPath string) error {
return err
}
if _, err := tx.CreateBucket(metaBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(metaBucket); err != nil {
return err
}
@ -185,8 +180,8 @@ func fileExists(path string) bool {
func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) {
var chanSummaries []*ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error {
closeBucket := tx.Bucket(closedChannelBucket)
if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
closeBucket := tx.ReadBucket(closedChannelBucket)
if closeBucket == nil {
return ErrNoClosedChannels
}

@ -13,7 +13,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -175,10 +175,10 @@ func newChannelGraph(db *DB, rejectCacheSize, chanCacheSize int) *ChannelGraph {
// node based off the source node.
func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
var source *LightningNode
err := c.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -202,7 +202,7 @@ func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
// of the graph. The source node is treated as the center node within a
// star-graph. This method may be used to kick off a path finding algorithm in
// order to explore the reachability of another node based off the source node.
func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) {
func (c *ChannelGraph) sourceNode(nodes kvdb.ReadBucket) (*LightningNode, error) {
selfPub := nodes.Get(sourceKey)
if selfPub == nil {
return nil, ErrSourceNodeNotSet
@ -225,10 +225,10 @@ func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) {
func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
nodePubBytes := node.PubKeyBytes[:]
return c.db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -245,8 +245,8 @@ func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
})
}
func addLightningNode(tx *bbolt.Tx, node *LightningNode) error {
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
func addLightningNode(tx kvdb.RwTx, node *LightningNode) error {
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -270,17 +270,17 @@ func addLightningNode(tx *bbolt.Tx, node *LightningNode) error {
// buckets using an existing database transaction. The returned boolean will be
// true if the updated policy belongs to node1, and false if the policy belonged
// to node2.
func updateEdgePolicy(tx *bbolt.Tx, edge *ChannelEdgePolicy) (bool, error) {
edges := tx.Bucket(edgeBucket)
if edges == nil {
func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy) (bool, error) {
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return false, ErrEdgeNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return false, ErrEdgeNotFound
}
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return false, err
}
@ -551,8 +551,8 @@ func (c *ChannelEdgePolicy) IsDisabled() bool {
lnwire.ChanUpdateDisabled
}
func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket,
updateIndex *bbolt.Bucket, node *LightningNode) error {
func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket,
updateIndex kvdb.RwBucket, node *LightningNode) error {
var (
scratch [16]byte
@ -680,7 +680,7 @@ func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket,
return nodeBucket.Put(nodePub, b.Bytes())
}
func fetchLightningNode(nodeBucket *bbolt.Bucket,
func fetchLightningNode(nodeBucket kvdb.ReadBucket,
nodePub []byte) (LightningNode, error) {
nodeBytes := nodeBucket.Get(nodePub)
@ -863,7 +863,7 @@ func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, error) {
return edgeInfo, nil
}
func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy,
func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy,
from, to []byte) error {
var edgeKey [33 + 8]byte
@ -943,7 +943,7 @@ func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy,
// in this bucket.
// Maintaining the bucket this way allows a fast retrieval of disabled
// channels, for example when prune is needed.
func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64,
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
direction bool, disabled bool) error {
var disabledEdgeKey [8 + 1]byte
@ -968,7 +968,7 @@ func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64,
// putChanEdgePolicyUnknown marks the edge policy as unknown
// in the edges bucket.
func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64,
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
from []byte) error {
var edgeKey [33 + 8]byte
@ -983,8 +983,8 @@ func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64,
return edges.Put(edgeKey[:], unknownPolicy)
}
func fetchChanEdgePolicy(edges *bbolt.Bucket, chanID []byte,
nodePub []byte, nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) {
func fetchChanEdgePolicy(edges kvdb.ReadBucket, chanID []byte,
nodePub []byte, nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) {
var edgeKey [33 + 8]byte
copy(edgeKey[:], nodePub)
@ -1084,7 +1084,7 @@ func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy,
}
func deserializeChanEdgePolicy(r io.Reader,
nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) {
nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) {
edge := &ChannelEdgePolicy{}

@ -8,7 +8,7 @@ import (
"time"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/tlv"
@ -252,8 +252,8 @@ func validateInvoice(i *Invoice) error {
func (d *DB) FetchAllInvoices(pendingOnly bool) ([]Invoice, error) {
var invoices []Invoice
err := d.View(func(tx *bbolt.Tx) error {
invoiceB := tx.Bucket(invoiceBucket)
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoiceB := tx.ReadBucket(invoiceBucket)
if invoiceB == nil {
return ErrNoInvoicesCreated
}

@ -1,6 +1,8 @@
package migration_01_to_11
import "github.com/coreos/bbolt"
import (
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
var (
// metaBucket stores all the meta information concerning the state of
@ -21,8 +23,8 @@ type Meta struct {
// putMeta is an internal helper function used in order to allow callers to
// re-use a database transaction. See the publicly exported PutMeta method for
// more information.
func putMeta(meta *Meta, tx *bbolt.Tx) error {
metaBucket, err := tx.CreateBucketIfNotExists(metaBucket)
func putMeta(meta *Meta, tx kvdb.RwTx) error {
metaBucket, err := tx.CreateTopLevelBucket(metaBucket)
if err != nil {
return err
}
@ -30,7 +32,7 @@ func putMeta(meta *Meta, tx *bbolt.Tx) error {
return putDbVersion(metaBucket, meta)
}
func putDbVersion(metaBucket *bbolt.Bucket, meta *Meta) error {
func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) error {
scratch := make([]byte, 4)
byteOrder.PutUint32(scratch, meta.DbVersionNumber)
return metaBucket.Put(dbVersionKey, scratch)

@ -3,8 +3,8 @@ package migration_01_to_11
import (
"testing"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// applyMigration is a helper test function that encapsulates the general steps
@ -49,7 +49,7 @@ func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB),
}()
// Apply migration.
err = cdb.Update(func(tx *bbolt.Tx) error {
err = kvdb.Update(cdb, func(tx kvdb.RwTx) error {
return migrationFunc(tx)
})
if err != nil {

@ -7,7 +7,7 @@ import (
"io"
"sort"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -76,8 +76,8 @@ func (db *DB) addPayment(payment *outgoingPayment) error {
}
paymentBytes := b.Bytes()
return db.Batch(func(tx *bbolt.Tx) error {
payments, err := tx.CreateBucketIfNotExists(paymentBucket)
return kvdb.Update(db, func(tx kvdb.RwTx) error {
payments, err := tx.CreateTopLevelBucket(paymentBucket)
if err != nil {
return err
}
@ -104,8 +104,8 @@ func (db *DB) addPayment(payment *outgoingPayment) error {
func (db *DB) fetchAllPayments() ([]*outgoingPayment, error) {
var payments []*outgoingPayment
err := db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(paymentBucket)
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
bucket := tx.ReadBucket(paymentBucket)
if bucket == nil {
return ErrNoPaymentsCreated
}
@ -140,7 +140,7 @@ func (db *DB) fetchAllPayments() ([]*outgoingPayment, error) {
// NOTE: Deprecated. Kept around for migration purposes.
func (db *DB) fetchPaymentStatus(paymentHash [32]byte) (PaymentStatus, error) {
var paymentStatus = StatusUnknown
err := db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
var err error
paymentStatus, err = fetchPaymentStatusTx(tx, paymentHash)
return err
@ -158,11 +158,11 @@ func (db *DB) fetchPaymentStatus(paymentHash [32]byte) (PaymentStatus, error) {
// can be composed into other atomic operations.
//
// NOTE: Deprecated. Kept around for migration purposes.
func fetchPaymentStatusTx(tx *bbolt.Tx, paymentHash [32]byte) (PaymentStatus, error) {
func fetchPaymentStatusTx(tx kvdb.ReadTx, paymentHash [32]byte) (PaymentStatus, error) {
// The default status for all payments that aren't recorded in database.
var paymentStatus = StatusUnknown
bucket := tx.Bucket(paymentStatusBucket)
bucket := tx.ReadBucket(paymentStatusBucket)
if bucket == nil {
return paymentStatus, nil
}
@ -375,14 +375,14 @@ func deserializeHopMigration9(r io.Reader) (*Hop, error) {
func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) {
var payments []*Payment
err := db.View(func(tx *bbolt.Tx) error {
paymentsBucket := tx.Bucket(paymentsRootBucket)
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
paymentsBucket := tx.ReadBucket(paymentsRootBucket)
if paymentsBucket == nil {
return nil
}
return paymentsBucket.ForEach(func(k, v []byte) error {
bucket := paymentsBucket.Bucket(k)
bucket := paymentsBucket.NestedReadBucket(k)
if bucket == nil {
// We only expect sub-buckets to be found in
// this top-level bucket.
@ -401,13 +401,13 @@ func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) {
// payment has was possible. These will be found in a
// sub-bucket indexed by their sequence number if
// available.
dup := bucket.Bucket(paymentDuplicateBucket)
dup := bucket.NestedReadBucket(paymentDuplicateBucket)
if dup == nil {
return nil
}
return dup.ForEach(func(k, v []byte) error {
subBucket := dup.Bucket(k)
subBucket := dup.NestedReadBucket(k)
if subBucket == nil {
// We one bucket for each duplicate to
// be found.
@ -437,7 +437,7 @@ func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) {
return payments, nil
}
func fetchPaymentMigration9(bucket *bbolt.Bucket) (*Payment, error) {
func fetchPaymentMigration9(bucket kvdb.ReadBucket) (*Payment, error) {
var (
err error
p = &Payment{}

@ -4,15 +4,15 @@ import (
"bytes"
"io"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// MigrateRouteSerialization migrates the way we serialize routes across the
// entire database. At the time of writing of this migration, this includes our
// payment attempts, as well as the payment results in mission control.
func MigrateRouteSerialization(tx *bbolt.Tx) error {
func MigrateRouteSerialization(tx kvdb.RwTx) error {
// First, we'll do all the payment attempts.
rootPaymentBucket := tx.Bucket(paymentsRootBucket)
rootPaymentBucket := tx.ReadWriteBucket(paymentsRootBucket)
if rootPaymentBucket == nil {
return nil
}
@ -36,7 +36,7 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
// Now that we have all the payment hashes, we can carry out the
// migration itself.
for _, payHash := range payHashes {
payHashBucket := rootPaymentBucket.Bucket(payHash)
payHashBucket := rootPaymentBucket.NestedReadWriteBucket(payHash)
// First, we'll migrate the main (non duplicate) payment to
// this hash.
@ -47,7 +47,7 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
// Now that we've migrated the main payment, we'll also check
// for any duplicate payments to the same payment hash.
dupBucket := payHashBucket.Bucket(paymentDuplicateBucket)
dupBucket := payHashBucket.NestedReadWriteBucket(paymentDuplicateBucket)
// If there's no dup bucket, then we can move on to the next
// payment.
@ -69,7 +69,7 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
// Now in this second pass, we'll re-serialize their duplicate
// payment attempts under the new encoding.
for _, seqNo := range dupSeqNos {
dupPayHashBucket := dupBucket.Bucket(seqNo)
dupPayHashBucket := dupBucket.NestedReadWriteBucket(seqNo)
err := migrateAttemptEncoding(tx, dupPayHashBucket)
if err != nil {
return err
@ -83,8 +83,8 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
"existing data")
resultsKey := []byte("missioncontrol-results")
err = tx.DeleteBucket(resultsKey)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(resultsKey)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
@ -95,7 +95,7 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
// migrateAttemptEncoding migrates payment attempts using the legacy format to
// the new format.
func migrateAttemptEncoding(tx *bbolt.Tx, payHashBucket *bbolt.Bucket) error {
func migrateAttemptEncoding(tx kvdb.RwTx, payHashBucket kvdb.RwBucket) error {
payAttemptBytes := payHashBucket.Get(paymentAttemptInfoKey)
if payAttemptBytes == nil {
return nil

@ -8,7 +8,7 @@ import (
bitcoinCfg "github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/zpay32"
litecoinCfg "github.com/ltcsuite/ltcd/chaincfg"
@ -16,10 +16,10 @@ import (
// MigrateInvoices adds invoice htlcs and a separate cltv delta field to the
// invoices.
func MigrateInvoices(tx *bbolt.Tx) error {
func MigrateInvoices(tx kvdb.RwTx) error {
log.Infof("Migrating invoices to new invoice format")
invoiceB := tx.Bucket(invoiceBucket)
invoiceB := tx.ReadWriteBucket(invoiceBucket)
if invoiceB == nil {
return nil
}

@ -8,7 +8,7 @@ import (
"github.com/btcsuite/btcd/btcec"
bitcoinCfg "github.com/btcsuite/btcd/chaincfg"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/zpay32"
litecoinCfg "github.com/ltcsuite/ltcd/chaincfg"
)
@ -26,8 +26,8 @@ var (
// beforeMigrationFuncV11 insert the test invoices in the database.
func beforeMigrationFuncV11(t *testing.T, d *DB, invoices []Invoice) {
err := d.Update(func(tx *bbolt.Tx) error {
invoicesBucket, err := tx.CreateBucketIfNotExists(
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
invoicesBucket, err := tx.CreateTopLevelBucket(
invoiceBucket,
)
if err != nil {

@ -7,7 +7,7 @@ import (
"fmt"
"github.com/btcsuite/btcd/btcec"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -16,11 +16,11 @@ import (
// (one for nodes and one for edges) to keep track of the last time a node or
// edge was updated on the network. These new indexes allow us to implement the
// new graph sync protocol added.
func MigrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error {
func MigrateNodeAndEdgeUpdateIndex(tx kvdb.RwTx) error {
// First, we'll populating the node portion of the new index. Before we
// can add new values to the index, we'll first create the new bucket
// where these items will be housed.
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return fmt.Errorf("unable to create node bucket: %v", err)
}
@ -64,7 +64,7 @@ func MigrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error {
// With the set of nodes updated, we'll now update all edges to have a
// corresponding entry in the edge update index.
edges, err := tx.CreateBucketIfNotExists(edgeBucket)
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return fmt.Errorf("unable to create edge bucket: %v", err)
}
@ -121,8 +121,8 @@ func MigrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error {
// invoices an index in the add and/or the settle index. Additionally, all
// existing invoices will have their bytes padded out in order to encode the
// add+settle index as well as the amount paid.
func MigrateInvoiceTimeSeries(tx *bbolt.Tx) error {
invoices, err := tx.CreateBucketIfNotExists(invoiceBucket)
func MigrateInvoiceTimeSeries(tx kvdb.RwTx) error {
invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
if err != nil {
return err
}
@ -258,8 +258,8 @@ func MigrateInvoiceTimeSeries(tx *bbolt.Tx) error {
// migrateInvoiceTimeSeries migration. As at the time of writing, the
// OutgoingPayment struct embeddeds an instance of the Invoice struct. As a
// result, we also need to migrate the internal invoice to the new format.
func MigrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error {
payBucket := tx.Bucket(paymentBucket)
func MigrateInvoiceTimeSeriesOutgoingPayments(tx kvdb.RwTx) error {
payBucket := tx.ReadWriteBucket(paymentBucket)
if payBucket == nil {
return nil
}
@ -339,18 +339,18 @@ func MigrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error {
// bucket. It ensure that edges with unknown policies will also have an entry
// in the bucket. After the migration, there will be two edge entries for
// every channel, regardless of whether the policies are known.
func MigrateEdgePolicies(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
func MigrateEdgePolicies(tx kvdb.RwTx) error {
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return nil
}
edges := tx.Bucket(edgeBucket)
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return nil
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return nil
}
@ -411,10 +411,10 @@ func MigrateEdgePolicies(tx *bbolt.Tx) error {
// PaymentStatusesMigration is a database migration intended for adding payment
// statuses for each existing payment entity in bucket to be able control
// transitions of statuses and prevent cases such as double payment
func PaymentStatusesMigration(tx *bbolt.Tx) error {
func PaymentStatusesMigration(tx kvdb.RwTx) error {
// Get the bucket dedicated to storing statuses of payments,
// where a key is payment hash, value is payment status.
paymentStatuses, err := tx.CreateBucketIfNotExists(paymentStatusBucket)
paymentStatuses, err := tx.CreateTopLevelBucket(paymentStatusBucket)
if err != nil {
return err
}
@ -422,7 +422,7 @@ func PaymentStatusesMigration(tx *bbolt.Tx) error {
log.Infof("Migrating database to support payment statuses")
circuitAddKey := []byte("circuit-adds")
circuits := tx.Bucket(circuitAddKey)
circuits := tx.ReadWriteBucket(circuitAddKey)
if circuits != nil {
log.Infof("Marking all known circuits with status InFlight")
@ -455,7 +455,7 @@ func PaymentStatusesMigration(tx *bbolt.Tx) error {
log.Infof("Marking all existing payments with status Completed")
// Get the bucket dedicated to storing payments
bucket := tx.Bucket(paymentBucket)
bucket := tx.ReadWriteBucket(paymentBucket)
if bucket == nil {
return nil
}
@ -498,14 +498,14 @@ func PaymentStatusesMigration(tx *bbolt.Tx) error {
// migration also fixes the case where the public keys within edge policies were
// being serialized with an extra byte, causing an even greater error when
// attempting to perform the offset calculation described earlier.
func MigratePruneEdgeUpdateIndex(tx *bbolt.Tx) error {
func MigratePruneEdgeUpdateIndex(tx kvdb.RwTx) error {
// To begin the migration, we'll retrieve the update index bucket. If it
// does not exist, we have nothing left to do so we can simply exit.
edges := tx.Bucket(edgeBucket)
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return nil
}
edgeUpdateIndex := edges.Bucket(edgeUpdateIndexBucket)
edgeUpdateIndex := edges.NestedReadWriteBucket(edgeUpdateIndexBucket)
if edgeUpdateIndex == nil {
return nil
}
@ -521,7 +521,7 @@ func MigratePruneEdgeUpdateIndex(tx *bbolt.Tx) error {
return fmt.Errorf("unable to create/fetch edge index " +
"bucket")
}
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return fmt.Errorf("unable to make node bucket")
}
@ -612,8 +612,8 @@ func MigratePruneEdgeUpdateIndex(tx *bbolt.Tx) error {
// MigrateOptionalChannelCloseSummaryFields migrates the serialized format of
// ChannelCloseSummary to a format where optional fields' presence is indicated
// with boolean markers.
func MigrateOptionalChannelCloseSummaryFields(tx *bbolt.Tx) error {
closedChanBucket := tx.Bucket(closedChannelBucket)
func MigrateOptionalChannelCloseSummaryFields(tx kvdb.RwTx) error {
closedChanBucket := tx.ReadWriteBucket(closedChannelBucket)
if closedChanBucket == nil {
return nil
}
@ -671,11 +671,11 @@ var messageStoreBucket = []byte("message-store")
// MigrateGossipMessageStoreKeys migrates the key format for gossip messages
// found in the message store to a new one that takes into consideration the of
// the message being stored.
func MigrateGossipMessageStoreKeys(tx *bbolt.Tx) error {
func MigrateGossipMessageStoreKeys(tx kvdb.RwTx) error {
// We'll start by retrieving the bucket in which these messages are
// stored within. If there isn't one, there's nothing left for us to do
// so we can avoid the migration.
messageStore := tx.Bucket(messageStoreBucket)
messageStore := tx.ReadWriteBucket(messageStoreBucket)
if messageStore == nil {
return nil
}
@ -747,10 +747,10 @@ func MigrateGossipMessageStoreKeys(tx *bbolt.Tx) error {
// InFlight (we have no PaymentAttemptInfo available for pre-migration
// payments) we delete those statuses, so only Completed payments remain in the
// new bucket structure.
func MigrateOutgoingPayments(tx *bbolt.Tx) error {
func MigrateOutgoingPayments(tx kvdb.RwTx) error {
log.Infof("Migrating outgoing payments to new bucket structure")
oldPayments := tx.Bucket(paymentBucket)
oldPayments := tx.ReadWriteBucket(paymentBucket)
// Return early if there are no payments to migrate.
if oldPayments == nil {
@ -758,7 +758,7 @@ func MigrateOutgoingPayments(tx *bbolt.Tx) error {
return nil
}
newPayments, err := tx.CreateBucket(paymentsRootBucket)
newPayments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
if err != nil {
return err
}
@ -767,7 +767,7 @@ func MigrateOutgoingPayments(tx *bbolt.Tx) error {
// only attempt to fetch it if needed.
sourcePub := func() ([33]byte, error) {
var pub [33]byte
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return pub, ErrGraphNotFound
}
@ -862,8 +862,8 @@ func MigrateOutgoingPayments(tx *bbolt.Tx) error {
// from a database containing duplicate payments to a payment
// hash. To keep this information, we store such duplicate
// payments in a sub-bucket.
if err == bbolt.ErrBucketExists {
pHashBucket := newPayments.Bucket(paymentHash[:])
if err == kvdb.ErrBucketExists {
pHashBucket := newPayments.NestedReadWriteBucket(paymentHash[:])
// Create a bucket for duplicate payments within this
// payment hash's bucket.
@ -922,14 +922,14 @@ func MigrateOutgoingPayments(tx *bbolt.Tx) error {
// Now we delete the old buckets. Deleting the payment status buckets
// deletes all payment statuses other than Complete.
err = tx.DeleteBucket(paymentStatusBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(paymentStatusBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
// Finally delete the old payment bucket.
err = tx.DeleteBucket(paymentBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(paymentBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}

@ -11,9 +11,9 @@ import (
"time"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -59,8 +59,8 @@ func TestPaymentStatusesMigration(t *testing.T) {
// locally-sourced payment should end up with an InFlight
// status, while the other should remain unchanged, which
// defaults to Grounded.
err = d.Update(func(tx *bbolt.Tx) error {
circuits, err := tx.CreateBucketIfNotExists(
err = kvdb.Update(d, func(tx kvdb.RwTx) error {
circuits, err := tx.CreateTopLevelBucket(
[]byte("circuit-adds"),
)
if err != nil {
@ -377,8 +377,8 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) {
// Get the old serialization format for this test's
// close summary, and it to the closed channel bucket.
old := test.oldSerialization(test.closeSummary)
err = d.Update(func(tx *bbolt.Tx) error {
closedChanBucket, err := tx.CreateBucketIfNotExists(
err = kvdb.Update(d, func(tx kvdb.RwTx) error {
closedChanBucket, err := tx.CreateTopLevelBucket(
closedChannelBucket,
)
if err != nil {
@ -404,8 +404,8 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) {
newSerialization := b.Bytes()
var dbSummary []byte
err = d.View(func(tx *bbolt.Tx) error {
closedChanBucket := tx.Bucket(closedChannelBucket)
err = kvdb.View(d, func(tx kvdb.ReadTx) error {
closedChanBucket := tx.ReadBucket(closedChannelBucket)
if closedChanBucket == nil {
return errors.New("unable to find bucket")
}
@ -482,8 +482,8 @@ func TestMigrateGossipMessageStoreKeys(t *testing.T) {
t.Fatalf("unable to serialize message: %v", err)
}
err := db.Update(func(tx *bbolt.Tx) error {
messageStore, err := tx.CreateBucketIfNotExists(
err := kvdb.Update(db, func(tx kvdb.RwTx) error {
messageStore, err := tx.CreateTopLevelBucket(
messageStoreBucket,
)
if err != nil {
@ -503,8 +503,8 @@ func TestMigrateGossipMessageStoreKeys(t *testing.T) {
// 3. The message matches the original.
afterMigration := func(db *DB) {
var rawMsg []byte
err := db.View(func(tx *bbolt.Tx) error {
messageStore := tx.Bucket(messageStoreBucket)
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
messageStore := tx.ReadBucket(messageStoreBucket)
if messageStore == nil {
return errors.New("message store bucket not " +
"found")
@ -666,8 +666,8 @@ func TestOutgoingPaymentsMigration(t *testing.T) {
// Finally, check that the payment sequence number is updated
// to reflect the migrated payments.
err = d.View(func(tx *bbolt.Tx) error {
payments := tx.Bucket(paymentsRootBucket)
err = kvdb.Update(d, func(tx kvdb.RwTx) error {
payments := tx.ReadWriteBucket(paymentsRootBucket)
if payments == nil {
return fmt.Errorf("payments bucket not found")
}
@ -746,8 +746,8 @@ func TestPaymentRouteSerialization(t *testing.T) {
// We'll first add a series of fake payments, using the existing legacy
// serialization format.
beforeMigrationFunc := func(d *DB) {
err := d.Update(func(tx *bbolt.Tx) error {
paymentsBucket, err := tx.CreateBucket(
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
paymentsBucket, err := tx.CreateTopLevelBucket(
paymentsRootBucket,
)
if err != nil {
@ -798,7 +798,7 @@ func TestPaymentRouteSerialization(t *testing.T) {
// the proper bucket. If this is the duplicate
// payment, then we'll grab the dup bucket,
// otherwise, we'll use the top level bucket.
var payHashBucket *bbolt.Bucket
var payHashBucket kvdb.RwBucket
if i < numPayments-1 {
payHashBucket, err = paymentsBucket.CreateBucket(
payInfo.PaymentHash[:],
@ -807,7 +807,7 @@ func TestPaymentRouteSerialization(t *testing.T) {
t.Fatalf("unable to create payments bucket: %v", err)
}
} else {
payHashBucket = paymentsBucket.Bucket(
payHashBucket = paymentsBucket.NestedReadWriteBucket(
payInfo.PaymentHash[:],
)
dupPayBucket, err := payHashBucket.CreateBucket(

@ -1,12 +1,10 @@
package migration_01_to_11
import (
"github.com/coreos/bbolt"
)
import "github.com/lightningnetwork/lnd/channeldb/kvdb"
// fetchPaymentStatus fetches the payment status of the payment. If the payment
// isn't found, it will default to "StatusUnknown".
func fetchPaymentStatus(bucket *bbolt.Bucket) PaymentStatus {
func fetchPaymentStatus(bucket kvdb.ReadBucket) PaymentStatus {
if bucket.Get(paymentSettleInfoKey) != nil {
return StatusSucceeded
}

@ -11,7 +11,7 @@ import (
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/tlv"
@ -254,14 +254,14 @@ type Payment struct {
func (db *DB) FetchPayments() ([]*Payment, error) {
var payments []*Payment
err := db.View(func(tx *bbolt.Tx) error {
paymentsBucket := tx.Bucket(paymentsRootBucket)
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
paymentsBucket := tx.ReadBucket(paymentsRootBucket)
if paymentsBucket == nil {
return nil
}
return paymentsBucket.ForEach(func(k, v []byte) error {
bucket := paymentsBucket.Bucket(k)
bucket := paymentsBucket.NestedReadBucket(k)
if bucket == nil {
// We only expect sub-buckets to be found in
// this top-level bucket.
@ -280,13 +280,13 @@ func (db *DB) FetchPayments() ([]*Payment, error) {
// payment has was possible. These will be found in a
// sub-bucket indexed by their sequence number if
// available.
dup := bucket.Bucket(paymentDuplicateBucket)
dup := bucket.NestedReadBucket(paymentDuplicateBucket)
if dup == nil {
return nil
}
return dup.ForEach(func(k, v []byte) error {
subBucket := dup.Bucket(k)
subBucket := dup.NestedReadBucket(k)
if subBucket == nil {
// We one bucket for each duplicate to
// be found.
@ -316,7 +316,7 @@ func (db *DB) FetchPayments() ([]*Payment, error) {
return payments, nil
}
func fetchPayment(bucket *bbolt.Bucket) (*Payment, error) {
func fetchPayment(bucket kvdb.ReadBucket) (*Payment, error) {
var (
err error
p = &Payment{}

@ -6,13 +6,13 @@ import (
"os"
"testing"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// MakeDB creates a new instance of the ChannelDB for testing purposes. A
// callback which cleans up the created temporary directories is also returned
// and intended to be executed after the test completes.
func MakeDB() (*bbolt.DB, func(), error) {
func MakeDB() (kvdb.Backend, func(), error) {
// Create temporary database for mission control.
file, err := ioutil.TempFile("", "*.db")
if err != nil {
@ -20,7 +20,7 @@ func MakeDB() (*bbolt.DB, func(), error) {
}
dbPath := file.Name()
db, err := bbolt.Open(dbPath, 0600, nil)
db, err := kvdb.Open(kvdb.BoltBackendName, dbPath, true)
if err != nil {
return nil, nil, err
}
@ -36,7 +36,7 @@ func MakeDB() (*bbolt.DB, func(), error) {
// ApplyMigration is a helper test function that encapsulates the general steps
// which are needed to properly check the result of applying migration function.
func ApplyMigration(t *testing.T,
beforeMigration, afterMigration, migrationFunc func(tx *bbolt.Tx) error,
beforeMigration, afterMigration, migrationFunc func(tx kvdb.RwTx) error,
shouldFail bool) {
cdb, cleanUp, err := MakeDB()
@ -47,7 +47,7 @@ func ApplyMigration(t *testing.T,
// beforeMigration usually used for populating the database
// with test data.
err = cdb.Update(beforeMigration)
err = kvdb.Update(cdb, beforeMigration)
if err != nil {
t.Fatal(err)
}
@ -65,14 +65,14 @@ func ApplyMigration(t *testing.T,
// afterMigration usually used for checking the database state and
// throwing the error if something went wrong.
err = cdb.Update(afterMigration)
err = kvdb.Update(cdb, afterMigration)
if err != nil {
t.Fatal(err)
}
}()
// Apply migration.
err = cdb.Update(migrationFunc)
err = kvdb.Update(cdb, migrationFunc)
if err != nil {
t.Fatal(err)
}

@ -7,7 +7,7 @@ import (
"fmt"
"strings"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// DumpDB dumps go code describing the contents of the database to stdout. This
@ -21,8 +21,8 @@ import (
// hex("1111"): hex("5783492373"),
// },
// }
func DumpDB(tx *bbolt.Tx, rootKey []byte) error {
bucket := tx.Bucket(rootKey)
func DumpDB(tx kvdb.ReadTx, rootKey []byte) error {
bucket := tx.ReadBucket(rootKey)
if bucket == nil {
return fmt.Errorf("bucket %v not found", string(rootKey))
}
@ -30,13 +30,13 @@ func DumpDB(tx *bbolt.Tx, rootKey []byte) error {
return dumpBucket(bucket)
}
func dumpBucket(bucket *bbolt.Bucket) error {
func dumpBucket(bucket kvdb.ReadBucket) error {
fmt.Printf("map[string]interface{} {\n")
err := bucket.ForEach(func(k, v []byte) error {
key := toString(k)
fmt.Printf("%v: ", key)
subBucket := bucket.Bucket(k)
subBucket := bucket.NestedReadBucket(k)
if subBucket != nil {
err := dumpBucket(subBucket)
if err != nil {
@ -58,8 +58,8 @@ func dumpBucket(bucket *bbolt.Bucket) error {
}
// RestoreDB primes the database with the given data set.
func RestoreDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error {
bucket, err := tx.CreateBucket(rootKey)
func RestoreDB(tx kvdb.RwTx, rootKey []byte, data map[string]interface{}) error {
bucket, err := tx.CreateTopLevelBucket(rootKey)
if err != nil {
return err
}
@ -67,7 +67,7 @@ func RestoreDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error
return restoreDB(bucket, data)
}
func restoreDB(bucket *bbolt.Bucket, data map[string]interface{}) error {
func restoreDB(bucket kvdb.RwBucket, data map[string]interface{}) error {
for k, v := range data {
key := []byte(k)
@ -100,8 +100,8 @@ func restoreDB(bucket *bbolt.Bucket, data map[string]interface{}) error {
}
// VerifyDB verifies the database against the given data set.
func VerifyDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error {
bucket := tx.Bucket(rootKey)
func VerifyDB(tx kvdb.ReadTx, rootKey []byte, data map[string]interface{}) error {
bucket := tx.ReadBucket(rootKey)
if bucket == nil {
return fmt.Errorf("bucket %v not found", string(rootKey))
}
@ -109,7 +109,7 @@ func VerifyDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error {
return verifyDB(bucket, data)
}
func verifyDB(bucket *bbolt.Bucket, data map[string]interface{}) error {
func verifyDB(bucket kvdb.ReadBucket, data map[string]interface{}) error {
for k, v := range data {
key := []byte(k)
@ -126,7 +126,7 @@ func verifyDB(bucket *bbolt.Bucket, data map[string]interface{}) error {
// Key contains a sub-bucket.
case map[string]interface{}:
subBucket := bucket.Bucket(key)
subBucket := bucket.NestedReadBucket(key)
if subBucket == nil {
return fmt.Errorf("bucket %v not found", k)
}

@ -8,7 +8,7 @@ import (
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
var (
@ -101,8 +101,8 @@ func (l *LinkNode) Sync() error {
// Finally update the database by storing the link node and updating
// any relevant indexes.
return l.db.Update(func(tx *bbolt.Tx) error {
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
return kvdb.Update(l.db, func(tx kvdb.RwTx) error {
nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket)
if nodeMetaBucket == nil {
return ErrLinkNodesNotFound
}
@ -114,7 +114,7 @@ func (l *LinkNode) Sync() error {
// putLinkNode serializes then writes the encoded version of the passed link
// node into the nodeMetaBucket. This function is provided in order to allow
// the ability to re-use a database transaction across many operations.
func putLinkNode(nodeMetaBucket *bbolt.Bucket, l *LinkNode) error {
func putLinkNode(nodeMetaBucket kvdb.RwBucket, l *LinkNode) error {
// First serialize the LinkNode into its raw-bytes encoding.
var b bytes.Buffer
if err := serializeLinkNode(&b, l); err != nil {
@ -130,13 +130,13 @@ func putLinkNode(nodeMetaBucket *bbolt.Bucket, l *LinkNode) error {
// DeleteLinkNode removes the link node with the given identity from the
// database.
func (db *DB) DeleteLinkNode(identity *btcec.PublicKey) error {
return db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(db, func(tx kvdb.RwTx) error {
return db.deleteLinkNode(tx, identity)
})
}
func (db *DB) deleteLinkNode(tx *bbolt.Tx, identity *btcec.PublicKey) error {
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
func (db *DB) deleteLinkNode(tx kvdb.RwTx, identity *btcec.PublicKey) error {
nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket)
if nodeMetaBucket == nil {
return ErrLinkNodesNotFound
}
@ -150,7 +150,7 @@ func (db *DB) deleteLinkNode(tx *bbolt.Tx, identity *btcec.PublicKey) error {
// key cannot be found, then ErrNodeNotFound if returned.
func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, error) {
var linkNode *LinkNode
err := db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
node, err := fetchLinkNode(tx, identity)
if err != nil {
return err
@ -163,10 +163,10 @@ func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, error) {
return linkNode, err
}
func fetchLinkNode(tx *bbolt.Tx, targetPub *btcec.PublicKey) (*LinkNode, error) {
func fetchLinkNode(tx kvdb.ReadTx, targetPub *btcec.PublicKey) (*LinkNode, error) {
// First fetch the bucket for storing node metadata, bailing out early
// if it hasn't been created yet.
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
if nodeMetaBucket == nil {
return nil, ErrLinkNodesNotFound
}
@ -191,7 +191,7 @@ func fetchLinkNode(tx *bbolt.Tx, targetPub *btcec.PublicKey) (*LinkNode, error)
// whom we have active channels with.
func (db *DB) FetchAllLinkNodes() ([]*LinkNode, error) {
var linkNodes []*LinkNode
err := db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
nodes, err := db.fetchAllLinkNodes(tx)
if err != nil {
return err
@ -209,8 +209,8 @@ func (db *DB) FetchAllLinkNodes() ([]*LinkNode, error) {
// fetchAllLinkNodes uses an existing database transaction to fetch all nodes
// with whom we have active channels with.
func (db *DB) fetchAllLinkNodes(tx *bbolt.Tx) ([]*LinkNode, error) {
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
func (db *DB) fetchAllLinkNodes(tx kvdb.ReadTx) ([]*LinkNode, error) {
nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
if nodeMetaBucket == nil {
return nil, ErrLinkNodesNotFound
}

@ -6,7 +6,7 @@ import (
"errors"
"fmt"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
)
@ -65,7 +65,7 @@ func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash,
infoBytes := b.Bytes()
var updateErr error
err := p.db.Batch(func(tx *bbolt.Tx) error {
err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
// Reset the update error, to avoid carrying over an error
// from a previous execution of the batched db transaction.
updateErr = nil
@ -130,8 +130,8 @@ func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash,
// We'll delete any lingering HTLCs to start with, in case we
// are initializing a payment that was attempted earlier, but
// left in a state where we could retry.
err = bucket.DeleteBucket(paymentHtlcsBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = bucket.DeleteNestedBucket(paymentHtlcsBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
@ -162,9 +162,8 @@ func (p *PaymentControl) RegisterAttempt(paymentHash lntypes.Hash,
htlcIDBytes := make([]byte, 8)
binary.BigEndian.PutUint64(htlcIDBytes, attempt.AttemptID)
return p.db.Update(func(tx *bbolt.Tx) error {
// Get the payment bucket to register this new attempt in.
bucket, err := fetchPaymentBucket(tx, paymentHash)
return kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
if err != nil {
return err
}
@ -234,10 +233,10 @@ func (p *PaymentControl) updateHtlcKey(paymentHash lntypes.Hash,
binary.BigEndian.PutUint64(htlcIDBytes, attemptID)
var payment *MPPayment
err := p.db.Batch(func(tx *bbolt.Tx) error {
// Fetch bucket that contains all information for the payment
// with this hash.
bucket, err := fetchPaymentBucket(tx, paymentHash)
err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
payment = nil
bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
if err != nil {
return err
}
@ -247,12 +246,12 @@ func (p *PaymentControl) updateHtlcKey(paymentHash lntypes.Hash,
return err
}
htlcsBucket := bucket.Bucket(paymentHtlcsBucket)
htlcsBucket := bucket.NestedReadWriteBucket(paymentHtlcsBucket)
if htlcsBucket == nil {
return fmt.Errorf("htlcs bucket not found")
}
htlcBucket := htlcsBucket.Bucket(htlcIDBytes)
htlcBucket := htlcsBucket.NestedReadWriteBucket(htlcIDBytes)
if htlcBucket == nil {
return fmt.Errorf("HTLC with ID %v not registered",
attemptID)
@ -286,13 +285,13 @@ func (p *PaymentControl) Fail(paymentHash lntypes.Hash,
updateErr error
payment *MPPayment
)
err := p.db.Batch(func(tx *bbolt.Tx) error {
err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
// Reset the update error, to avoid carrying over an error
// from a previous execution of the batched db transaction.
updateErr = nil
payment = nil
bucket, err := fetchPaymentBucket(tx, paymentHash)
bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
if err == ErrPaymentNotInitiated {
updateErr = ErrPaymentNotInitiated
return nil
@ -341,7 +340,7 @@ func (p *PaymentControl) FetchPayment(paymentHash lntypes.Hash) (
*MPPayment, error) {
var payment *MPPayment
err := p.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(p.db, func(tx kvdb.ReadTx) error {
bucket, err := fetchPaymentBucket(tx, paymentHash)
if err != nil {
return err
@ -360,10 +359,10 @@ func (p *PaymentControl) FetchPayment(paymentHash lntypes.Hash) (
// createPaymentBucket creates or fetches the sub-bucket assigned to this
// payment hash.
func createPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) (
*bbolt.Bucket, error) {
func createPaymentBucket(tx kvdb.RwTx, paymentHash lntypes.Hash) (
kvdb.RwBucket, error) {
payments, err := tx.CreateBucketIfNotExists(paymentsRootBucket)
payments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
if err != nil {
return nil, err
}
@ -373,15 +372,34 @@ func createPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) (
// fetchPaymentBucket fetches the sub-bucket assigned to this payment hash. If
// the bucket does not exist, it returns ErrPaymentNotInitiated.
func fetchPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) (
*bbolt.Bucket, error) {
func fetchPaymentBucket(tx kvdb.ReadTx, paymentHash lntypes.Hash) (
kvdb.ReadBucket, error) {
payments := tx.Bucket(paymentsRootBucket)
payments := tx.ReadBucket(paymentsRootBucket)
if payments == nil {
return nil, ErrPaymentNotInitiated
}
bucket := payments.Bucket(paymentHash[:])
bucket := payments.NestedReadBucket(paymentHash[:])
if bucket == nil {
return nil, ErrPaymentNotInitiated
}
return bucket, nil
}
// fetchPaymentBucketUpdate is identical to fetchPaymentBucket, but it returns a
// bucket that can be written to.
func fetchPaymentBucketUpdate(tx kvdb.RwTx, paymentHash lntypes.Hash) (
kvdb.RwBucket, error) {
payments := tx.ReadWriteBucket(paymentsRootBucket)
if payments == nil {
return nil, ErrPaymentNotInitiated
}
bucket := payments.NestedReadWriteBucket(paymentHash[:])
if bucket == nil {
return nil, ErrPaymentNotInitiated
}
@ -391,8 +409,8 @@ func fetchPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) (
// nextPaymentSequence returns the next sequence number to store for a new
// payment.
func nextPaymentSequence(tx *bbolt.Tx) ([]byte, error) {
payments, err := tx.CreateBucketIfNotExists(paymentsRootBucket)
func nextPaymentSequence(tx kvdb.RwTx) ([]byte, error) {
payments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
if err != nil {
return nil, err
}
@ -409,8 +427,8 @@ func nextPaymentSequence(tx *bbolt.Tx) ([]byte, error) {
// fetchPaymentStatus fetches the payment status of the payment. If the payment
// isn't found, it will default to "StatusUnknown".
func fetchPaymentStatus(bucket *bbolt.Bucket) (PaymentStatus, error) {
htlcsBucket := bucket.Bucket(paymentHtlcsBucket)
func fetchPaymentStatus(bucket kvdb.ReadBucket) (PaymentStatus, error) {
htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket)
if htlcsBucket != nil {
htlcs, err := fetchHtlcAttempts(htlcsBucket)
if err != nil {
@ -424,7 +442,6 @@ func fetchPaymentStatus(bucket *bbolt.Bucket) (PaymentStatus, error) {
return StatusSucceeded, nil
}
}
}
if bucket.Get(paymentFailInfoKey) != nil {
@ -441,7 +458,7 @@ func fetchPaymentStatus(bucket *bbolt.Bucket) (PaymentStatus, error) {
// ensureInFlight checks whether the payment found in the given bucket has
// status InFlight, and returns an error otherwise. This should be used to
// ensure we only mark in-flight payments as succeeded or failed.
func ensureInFlight(bucket *bbolt.Bucket) error {
func ensureInFlight(bucket kvdb.ReadBucket) error {
paymentStatus, err := fetchPaymentStatus(bucket)
if err != nil {
return err
@ -486,14 +503,14 @@ type InFlightPayment struct {
// FetchInFlightPayments returns all payments with status InFlight.
func (p *PaymentControl) FetchInFlightPayments() ([]*InFlightPayment, error) {
var inFlights []*InFlightPayment
err := p.db.View(func(tx *bbolt.Tx) error {
payments := tx.Bucket(paymentsRootBucket)
err := kvdb.View(p.db, func(tx kvdb.ReadTx) error {
payments := tx.ReadBucket(paymentsRootBucket)
if payments == nil {
return nil
}
return payments.ForEach(func(k, _ []byte) error {
bucket := payments.Bucket(k)
bucket := payments.NestedReadBucket(k)
if bucket == nil {
return fmt.Errorf("non bucket element")
}
@ -523,7 +540,9 @@ func (p *PaymentControl) FetchInFlightPayments() ([]*InFlightPayment, error) {
return err
}
htlcsBucket := bucket.Bucket(paymentHtlcsBucket)
htlcsBucket := bucket.NestedReadBucket(
paymentHtlcsBucket,
)
if htlcsBucket == nil {
return nil
}

@ -9,7 +9,7 @@ import (
"time"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/record"
@ -200,14 +200,14 @@ type PaymentCreationInfo struct {
func (db *DB) FetchPayments() ([]*MPPayment, error) {
var payments []*MPPayment
err := db.View(func(tx *bbolt.Tx) error {
paymentsBucket := tx.Bucket(paymentsRootBucket)
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
paymentsBucket := tx.ReadBucket(paymentsRootBucket)
if paymentsBucket == nil {
return nil
}
return paymentsBucket.ForEach(func(k, v []byte) error {
bucket := paymentsBucket.Bucket(k)
bucket := paymentsBucket.NestedReadBucket(k)
if bucket == nil {
// We only expect sub-buckets to be found in
// this top-level bucket.
@ -232,7 +232,6 @@ func (db *DB) FetchPayments() ([]*MPPayment, error) {
}
payments = append(payments, duplicatePayments...)
return nil
})
})
@ -248,7 +247,7 @@ func (db *DB) FetchPayments() ([]*MPPayment, error) {
return payments, nil
}
func fetchPayment(bucket *bbolt.Bucket) (*MPPayment, error) {
func fetchPayment(bucket kvdb.ReadBucket) (*MPPayment, error) {
seqBytes := bucket.Get(paymentSequenceKey)
if seqBytes == nil {
return nil, fmt.Errorf("sequence number not found")
@ -276,7 +275,7 @@ func fetchPayment(bucket *bbolt.Bucket) (*MPPayment, error) {
}
var htlcs []HTLCAttempt
htlcsBucket := bucket.Bucket(paymentHtlcsBucket)
htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket)
if htlcsBucket != nil {
// Get the payment attempts. This can be empty.
htlcs, err = fetchHtlcAttempts(htlcsBucket)
@ -304,12 +303,12 @@ func fetchPayment(bucket *bbolt.Bucket) (*MPPayment, error) {
// fetchHtlcAttempts retrives all htlc attempts made for the payment found in
// the given bucket.
func fetchHtlcAttempts(bucket *bbolt.Bucket) ([]HTLCAttempt, error) {
func fetchHtlcAttempts(bucket kvdb.ReadBucket) ([]HTLCAttempt, error) {
htlcs := make([]HTLCAttempt, 0)
err := bucket.ForEach(func(k, _ []byte) error {
aid := byteOrder.Uint64(k)
htlcBucket := bucket.Bucket(k)
htlcBucket := bucket.NestedReadBucket(k)
attemptInfo, err := fetchHtlcAttemptInfo(
htlcBucket,
@ -347,7 +346,7 @@ func fetchHtlcAttempts(bucket *bbolt.Bucket) ([]HTLCAttempt, error) {
// fetchHtlcAttemptInfo fetches the payment attempt info for this htlc from the
// bucket.
func fetchHtlcAttemptInfo(bucket *bbolt.Bucket) (*HTLCAttemptInfo, error) {
func fetchHtlcAttemptInfo(bucket kvdb.ReadBucket) (*HTLCAttemptInfo, error) {
b := bucket.Get(htlcAttemptInfoKey)
if b == nil {
return nil, errNoAttemptInfo
@ -359,7 +358,7 @@ func fetchHtlcAttemptInfo(bucket *bbolt.Bucket) (*HTLCAttemptInfo, error) {
// fetchHtlcSettleInfo retrieves the settle info for the htlc. If the htlc isn't
// settled, nil is returned.
func fetchHtlcSettleInfo(bucket *bbolt.Bucket) (*HTLCSettleInfo, error) {
func fetchHtlcSettleInfo(bucket kvdb.ReadBucket) (*HTLCSettleInfo, error) {
b := bucket.Get(htlcSettleInfoKey)
if b == nil {
// Settle info is optional.
@ -372,7 +371,7 @@ func fetchHtlcSettleInfo(bucket *bbolt.Bucket) (*HTLCSettleInfo, error) {
// fetchHtlcFailInfo retrieves the failure info for the htlc. If the htlc hasn't
// failed, nil is returned.
func fetchHtlcFailInfo(bucket *bbolt.Bucket) (*HTLCFailInfo, error) {
func fetchHtlcFailInfo(bucket kvdb.ReadBucket) (*HTLCFailInfo, error) {
b := bucket.Get(htlcFailInfoKey)
if b == nil {
// Fail info is optional.
@ -385,15 +384,15 @@ func fetchHtlcFailInfo(bucket *bbolt.Bucket) (*HTLCFailInfo, error) {
// DeletePayments deletes all completed and failed payments from the DB.
func (db *DB) DeletePayments() error {
return db.Update(func(tx *bbolt.Tx) error {
payments := tx.Bucket(paymentsRootBucket)
return kvdb.Update(db, func(tx kvdb.RwTx) error {
payments := tx.ReadWriteBucket(paymentsRootBucket)
if payments == nil {
return nil
}
var deleteBuckets [][]byte
err := payments.ForEach(func(k, _ []byte) error {
bucket := payments.Bucket(k)
bucket := payments.NestedReadWriteBucket(k)
if bucket == nil {
// We only expect sub-buckets to be found in
// this top-level bucket.
@ -420,7 +419,7 @@ func (db *DB) DeletePayments() error {
}
for _, k := range deleteBuckets {
if err := payments.DeleteBucket(k); err != nil {
if err := payments.DeleteNestedBucket(k); err != nil {
return err
}
}

@ -8,8 +8,8 @@ import (
"bytes"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -61,12 +61,12 @@ func (s *WaitingProofStore) Add(proof *WaitingProof) error {
s.mu.Lock()
defer s.mu.Unlock()
err := s.db.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(s.db, func(tx kvdb.RwTx) error {
var err error
var b bytes.Buffer
// Get or create the bucket.
bucket, err := tx.CreateBucketIfNotExists(waitingProofsBucketKey)
bucket, err := tx.CreateTopLevelBucket(waitingProofsBucketKey)
if err != nil {
return err
}
@ -100,9 +100,9 @@ func (s *WaitingProofStore) Remove(key WaitingProofKey) error {
return ErrWaitingProofNotFound
}
err := s.db.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(s.db, func(tx kvdb.RwTx) error {
// Get or create the top bucket.
bucket := tx.Bucket(waitingProofsBucketKey)
bucket := tx.ReadWriteBucket(waitingProofsBucketKey)
if bucket == nil {
return ErrWaitingProofNotFound
}
@ -123,8 +123,8 @@ func (s *WaitingProofStore) Remove(key WaitingProofKey) error {
// ForAll iterates thought all waiting proofs and passing the waiting proof
// in the given callback.
func (s *WaitingProofStore) ForAll(cb func(*WaitingProof) error) error {
return s.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(waitingProofsBucketKey)
return kvdb.View(s.db, func(tx kvdb.ReadTx) error {
bucket := tx.ReadBucket(waitingProofsBucketKey)
if bucket == nil {
return ErrWaitingProofNotFound
}
@ -158,8 +158,8 @@ func (s *WaitingProofStore) Get(key WaitingProofKey) (*WaitingProof, error) {
return nil, ErrWaitingProofNotFound
}
err := s.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(waitingProofsBucketKey)
err := kvdb.View(s.db, func(tx kvdb.ReadTx) error {
bucket := tx.ReadBucket(waitingProofsBucketKey)
if bucket == nil {
return ErrWaitingProofNotFound
}

@ -3,7 +3,7 @@ package channeldb
import (
"fmt"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
)
@ -106,8 +106,8 @@ func (w *WitnessCache) addWitnessEntries(wType WitnessType,
return nil
}
return w.db.Batch(func(tx *bbolt.Tx) error {
witnessBucket, err := tx.CreateBucketIfNotExists(witnessBucketKey)
return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error {
witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
if err != nil {
return err
}
@ -150,8 +150,8 @@ func (w *WitnessCache) LookupSha256Witness(hash lntypes.Hash) (lntypes.Preimage,
// will be returned.
func (w *WitnessCache) lookupWitness(wType WitnessType, witnessKey []byte) ([]byte, error) {
var witness []byte
err := w.db.View(func(tx *bbolt.Tx) error {
witnessBucket := tx.Bucket(witnessBucketKey)
err := kvdb.View(w.db, func(tx kvdb.ReadTx) error {
witnessBucket := tx.ReadBucket(witnessBucketKey)
if witnessBucket == nil {
return ErrNoWitnesses
}
@ -160,7 +160,7 @@ func (w *WitnessCache) lookupWitness(wType WitnessType, witnessKey []byte) ([]by
if err != nil {
return err
}
witnessTypeBucket := witnessBucket.Bucket(witnessTypeBucketKey)
witnessTypeBucket := witnessBucket.NestedReadBucket(witnessTypeBucketKey)
if witnessTypeBucket == nil {
return ErrNoWitnesses
}
@ -189,8 +189,8 @@ func (w *WitnessCache) DeleteSha256Witness(hash lntypes.Hash) error {
// deleteWitness attempts to delete a particular witness from the database.
func (w *WitnessCache) deleteWitness(wType WitnessType, witnessKey []byte) error {
return w.db.Batch(func(tx *bbolt.Tx) error {
witnessBucket, err := tx.CreateBucketIfNotExists(witnessBucketKey)
return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error {
witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
if err != nil {
return err
}
@ -213,8 +213,8 @@ func (w *WitnessCache) deleteWitness(wType WitnessType, witnessKey []byte) error
// DeleteWitnessClass attempts to delete an *entire* class of witnesses. After
// this function return with a non-nil error,
func (w *WitnessCache) DeleteWitnessClass(wType WitnessType) error {
return w.db.Batch(func(tx *bbolt.Tx) error {
witnessBucket, err := tx.CreateBucketIfNotExists(witnessBucketKey)
return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error {
witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
if err != nil {
return err
}
@ -224,6 +224,6 @@ func (w *WitnessCache) DeleteWitnessClass(wType WitnessType) error {
return err
}
return witnessBucket.DeleteBucket(witnessTypeBucketKey)
return witnessBucket.DeleteNestedBucket(witnessTypeBucketKey)
})
}

@ -8,8 +8,8 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lnwallet"
)
@ -309,7 +309,7 @@ var (
// boltArbitratorLog is an implementation of the ArbitratorLog interface backed
// by a bolt DB instance.
type boltArbitratorLog struct {
db *bbolt.DB
db kvdb.Backend
cfg ChannelArbitratorConfig
@ -318,7 +318,7 @@ type boltArbitratorLog struct {
// newBoltArbitratorLog returns a new instance of the boltArbitratorLog given
// an arbitrator config, and the items needed to create its log scope.
func newBoltArbitratorLog(db *bbolt.DB, cfg ChannelArbitratorConfig,
func newBoltArbitratorLog(db kvdb.Backend, cfg ChannelArbitratorConfig,
chainHash chainhash.Hash, chanPoint wire.OutPoint) (*boltArbitratorLog, error) {
scope, err := newLogScope(chainHash, chanPoint)
@ -337,13 +337,13 @@ func newBoltArbitratorLog(db *bbolt.DB, cfg ChannelArbitratorConfig,
// interface.
var _ ArbitratorLog = (*boltArbitratorLog)(nil)
func fetchContractReadBucket(tx *bbolt.Tx, scopeKey []byte) (*bbolt.Bucket, error) {
scopeBucket := tx.Bucket(scopeKey)
func fetchContractReadBucket(tx kvdb.ReadTx, scopeKey []byte) (kvdb.ReadBucket, error) {
scopeBucket := tx.ReadBucket(scopeKey)
if scopeBucket == nil {
return nil, errScopeBucketNoExist
}
contractBucket := scopeBucket.Bucket(contractsBucketKey)
contractBucket := scopeBucket.NestedReadBucket(contractsBucketKey)
if contractBucket == nil {
return nil, errNoContracts
}
@ -351,8 +351,8 @@ func fetchContractReadBucket(tx *bbolt.Tx, scopeKey []byte) (*bbolt.Bucket, erro
return contractBucket, nil
}
func fetchContractWriteBucket(tx *bbolt.Tx, scopeKey []byte) (*bbolt.Bucket, error) {
scopeBucket, err := tx.CreateBucketIfNotExists(scopeKey)
func fetchContractWriteBucket(tx kvdb.RwTx, scopeKey []byte) (kvdb.RwBucket, error) {
scopeBucket, err := tx.CreateTopLevelBucket(scopeKey)
if err != nil {
return nil, err
}
@ -369,7 +369,7 @@ func fetchContractWriteBucket(tx *bbolt.Tx, scopeKey []byte) (*bbolt.Bucket, err
// writeResolver is a helper method that writes a contract resolver and stores
// it it within the passed contractBucket using its unique resolutionsKey key.
func (b *boltArbitratorLog) writeResolver(contractBucket *bbolt.Bucket,
func (b *boltArbitratorLog) writeResolver(contractBucket kvdb.RwBucket,
res ContractResolver) error {
// Only persist resolvers that are stateful. Stateless resolvers don't
@ -415,8 +415,8 @@ func (b *boltArbitratorLog) writeResolver(contractBucket *bbolt.Bucket,
// NOTE: Part of the ContractResolver interface.
func (b *boltArbitratorLog) CurrentState() (ArbitratorState, error) {
var s ArbitratorState
err := b.db.View(func(tx *bbolt.Tx) error {
scopeBucket := tx.Bucket(b.scopeKey[:])
err := kvdb.View(b.db, func(tx kvdb.ReadTx) error {
scopeBucket := tx.ReadBucket(b.scopeKey[:])
if scopeBucket == nil {
return errScopeBucketNoExist
}
@ -440,8 +440,9 @@ func (b *boltArbitratorLog) CurrentState() (ArbitratorState, error) {
//
// NOTE: Part of the ContractResolver interface.
func (b *boltArbitratorLog) CommitState(s ArbitratorState) error {
return b.db.Batch(func(tx *bbolt.Tx) error {
scopeBucket, err := tx.CreateBucketIfNotExists(b.scopeKey[:])
fmt.Printf("yeee: %T\n", b.db)
return kvdb.Batch(b.db, func(tx kvdb.RwTx) error {
scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:])
if err != nil {
return err
}
@ -460,7 +461,7 @@ func (b *boltArbitratorLog) FetchUnresolvedContracts() ([]ContractResolver, erro
Checkpoint: b.checkpointContract,
}
var contracts []ContractResolver
err := b.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(b.db, func(tx kvdb.ReadTx) error {
contractBucket, err := fetchContractReadBucket(tx, b.scopeKey[:])
if err != nil {
return err
@ -533,7 +534,7 @@ func (b *boltArbitratorLog) FetchUnresolvedContracts() ([]ContractResolver, erro
//
// NOTE: Part of the ContractResolver interface.
func (b *boltArbitratorLog) InsertUnresolvedContracts(resolvers ...ContractResolver) error {
return b.db.Batch(func(tx *bbolt.Tx) error {
return kvdb.Batch(b.db, func(tx kvdb.RwTx) error {
contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:])
if err != nil {
return err
@ -556,7 +557,7 @@ func (b *boltArbitratorLog) InsertUnresolvedContracts(resolvers ...ContractResol
//
// NOTE: Part of the ContractResolver interface.
func (b *boltArbitratorLog) SwapContract(oldContract, newContract ContractResolver) error {
return b.db.Batch(func(tx *bbolt.Tx) error {
return kvdb.Batch(b.db, func(tx kvdb.RwTx) error {
contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:])
if err != nil {
return err
@ -576,7 +577,7 @@ func (b *boltArbitratorLog) SwapContract(oldContract, newContract ContractResolv
//
// NOTE: Part of the ContractResolver interface.
func (b *boltArbitratorLog) ResolveContract(res ContractResolver) error {
return b.db.Batch(func(tx *bbolt.Tx) error {
return kvdb.Batch(b.db, func(tx kvdb.RwTx) error {
contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:])
if err != nil {
return err
@ -594,8 +595,8 @@ func (b *boltArbitratorLog) ResolveContract(res ContractResolver) error {
//
// NOTE: Part of the ContractResolver interface.
func (b *boltArbitratorLog) LogContractResolutions(c *ContractResolutions) error {
return b.db.Batch(func(tx *bbolt.Tx) error {
scopeBucket, err := tx.CreateBucketIfNotExists(b.scopeKey[:])
return kvdb.Batch(b.db, func(tx kvdb.RwTx) error {
scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:])
if err != nil {
return err
}
@ -674,8 +675,8 @@ func (b *boltArbitratorLog) LogContractResolutions(c *ContractResolutions) error
// NOTE: Part of the ContractResolver interface.
func (b *boltArbitratorLog) FetchContractResolutions() (*ContractResolutions, error) {
c := &ContractResolutions{}
err := b.db.View(func(tx *bbolt.Tx) error {
scopeBucket := tx.Bucket(b.scopeKey[:])
err := kvdb.View(b.db, func(tx kvdb.ReadTx) error {
scopeBucket := tx.ReadBucket(b.scopeKey[:])
if scopeBucket == nil {
return errScopeBucketNoExist
}
@ -773,13 +774,13 @@ func (b *boltArbitratorLog) FetchContractResolutions() (*ContractResolutions, er
func (b *boltArbitratorLog) FetchChainActions() (ChainActionMap, error) {
actionsMap := make(ChainActionMap)
err := b.db.View(func(tx *bbolt.Tx) error {
scopeBucket := tx.Bucket(b.scopeKey[:])
err := kvdb.View(b.db, func(tx kvdb.ReadTx) error {
scopeBucket := tx.ReadBucket(b.scopeKey[:])
if scopeBucket == nil {
return errScopeBucketNoExist
}
actionsBucket := scopeBucket.Bucket(actionsBucketKey)
actionsBucket := scopeBucket.NestedReadBucket(actionsBucketKey)
if actionsBucket == nil {
return errNoActions
}
@ -815,8 +816,8 @@ func (b *boltArbitratorLog) FetchChainActions() (ChainActionMap, error) {
//
// NOTE: Part of the ContractResolver interface.
func (b *boltArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) error {
return b.db.Update(func(tx *bbolt.Tx) error {
scopeBucket, err := tx.CreateBucketIfNotExists(b.scopeKey[:])
return kvdb.Batch(b.db, func(tx kvdb.RwTx) error {
scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:])
if err != nil {
return err
}
@ -836,8 +837,8 @@ func (b *boltArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) error {
// NOTE: Part of the ContractResolver interface.
func (b *boltArbitratorLog) FetchConfirmedCommitSet() (*CommitSet, error) {
var c *CommitSet
err := b.db.View(func(tx *bbolt.Tx) error {
scopeBucket := tx.Bucket(b.scopeKey[:])
err := kvdb.View(b.db, func(tx kvdb.ReadTx) error {
scopeBucket := tx.ReadBucket(b.scopeKey[:])
if scopeBucket == nil {
return errScopeBucketNoExist
}
@ -868,8 +869,8 @@ func (b *boltArbitratorLog) FetchConfirmedCommitSet() (*CommitSet, error) {
//
// NOTE: Part of the ContractResolver interface.
func (b *boltArbitratorLog) WipeHistory() error {
return b.db.Update(func(tx *bbolt.Tx) error {
scopeBucket, err := tx.CreateBucketIfNotExists(b.scopeKey[:])
return kvdb.Update(b.db, func(tx kvdb.RwTx) error {
scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:])
if err != nil {
return err
}
@ -882,8 +883,8 @@ func (b *boltArbitratorLog) WipeHistory() error {
// Next, we'll delete any lingering contract state within the
// contracts bucket by removing the bucket itself.
err = scopeBucket.DeleteBucket(contractsBucketKey)
if err != nil && err != bbolt.ErrBucketNotFound {
err = scopeBucket.DeleteNestedBucket(contractsBucketKey)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
@ -895,13 +896,13 @@ func (b *boltArbitratorLog) WipeHistory() error {
// We'll delete any chain actions that are still stored by
// removing the enclosing bucket.
err = scopeBucket.DeleteBucket(actionsBucketKey)
if err != nil && err != bbolt.ErrBucketNotFound {
err = scopeBucket.DeleteNestedBucket(actionsBucketKey)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
// Finally, we'll delete the enclosing bucket itself.
return tx.DeleteBucket(b.scopeKey[:])
return tx.DeleteTopLevelBucket(b.scopeKey[:])
})
}
@ -909,7 +910,7 @@ func (b *boltArbitratorLog) WipeHistory() error {
// ContractResolver instances to checkpoint their state once they reach
// milestones during contract resolution.
func (b *boltArbitratorLog) checkpointContract(c ContractResolver) error {
return b.db.Batch(func(tx *bbolt.Tx) error {
return kvdb.Update(b.db, func(tx kvdb.RwTx) error {
contractBucket, err := fetchContractWriteBucket(tx, b.scopeKey[:])
if err != nil {
return err

@ -14,9 +14,9 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lnwallet"
)
@ -104,7 +104,7 @@ var (
}
)
func makeTestDB() (*bbolt.DB, func(), error) {
func makeTestDB() (kvdb.Backend, func(), error) {
// First, create a temporary directory to be used for the duration of
// this test.
tempDirName, err := ioutil.TempDir("", "arblog")
@ -112,7 +112,7 @@ func makeTestDB() (*bbolt.DB, func(), error) {
return nil, nil, err
}
db, err := bbolt.Open(tempDirName+"/test.db", 0600, nil)
db, err := kvdb.Create(kvdb.BoltBackendName, tempDirName+"/test.db", true)
if err != nil {
return nil, nil, err
}

@ -356,7 +356,7 @@ func newActiveChannelArbitrator(channel *channeldb.OpenChannel,
// TODO(roasbeef); abstraction leak...
// * rework: adaptor method to set log scope w/ factory func
chanLog, err := newBoltArbitratorLog(
c.chanSource.DB, arbCfg, c.cfg.ChainHash, chanPoint,
c.chanSource.Backend, arbCfg, c.cfg.ChainHash, chanPoint,
)
if err != nil {
blockEpoch.Cancel()
@ -554,7 +554,7 @@ func (c *ChainArbitrator) Start() error {
CloseType: closeChanInfo.CloseType,
}
chanLog, err := newBoltArbitratorLog(
c.chanSource.DB, arbCfg, c.cfg.ChainHash, chanPoint,
c.chanSource.Backend, arbCfg, c.cfg.ChainHash, chanPoint,
)
if err != nil {
blockEpoch.Cancel()

@ -13,9 +13,9 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/clock"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lnwallet"
@ -394,7 +394,7 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog,
return nil, err
}
dbPath := filepath.Join(dbDir, "testdb")
db, err := bbolt.Open(dbPath, 0600, nil)
db, err := kvdb.Create(kvdb.BoltBackendName, dbPath, true)
if err != nil {
return nil, err
}

@ -6,8 +6,8 @@ import (
"errors"
"fmt"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -68,8 +68,8 @@ var _ GossipMessageStore = (*MessageStore)(nil)
// NewMessageStore creates a new message store backed by a channeldb instance.
func NewMessageStore(db *channeldb.DB) (*MessageStore, error) {
err := db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(messageStoreBucket)
err := kvdb.Batch(db.Backend, func(tx kvdb.RwTx) error {
_, err := tx.CreateTopLevelBucket(messageStoreBucket)
return err
})
if err != nil {
@ -124,8 +124,8 @@ func (s *MessageStore) AddMessage(msg lnwire.Message, peerPubKey [33]byte) error
return err
}
return s.db.Batch(func(tx *bbolt.Tx) error {
messageStore := tx.Bucket(messageStoreBucket)
return kvdb.Batch(s.db.Backend, func(tx kvdb.RwTx) error {
messageStore := tx.ReadWriteBucket(messageStoreBucket)
if messageStore == nil {
return ErrCorruptedMessageStore
}
@ -145,8 +145,8 @@ func (s *MessageStore) DeleteMessage(msg lnwire.Message,
return err
}
return s.db.Batch(func(tx *bbolt.Tx) error {
messageStore := tx.Bucket(messageStoreBucket)
return kvdb.Batch(s.db.Backend, func(tx kvdb.RwTx) error {
messageStore := tx.ReadWriteBucket(messageStoreBucket)
if messageStore == nil {
return ErrCorruptedMessageStore
}
@ -200,8 +200,8 @@ func readMessage(msgBytes []byte) (lnwire.Message, error) {
// all peers.
func (s *MessageStore) Messages() (map[[33]byte][]lnwire.Message, error) {
msgs := make(map[[33]byte][]lnwire.Message)
err := s.db.View(func(tx *bbolt.Tx) error {
messageStore := tx.Bucket(messageStoreBucket)
err := kvdb.View(s.db, func(tx kvdb.ReadTx) error {
messageStore := tx.ReadBucket(messageStoreBucket)
if messageStore == nil {
return ErrCorruptedMessageStore
}
@ -238,13 +238,13 @@ func (s *MessageStore) MessagesForPeer(
peerPubKey [33]byte) ([]lnwire.Message, error) {
var msgs []lnwire.Message
err := s.db.View(func(tx *bbolt.Tx) error {
messageStore := tx.Bucket(messageStoreBucket)
err := kvdb.View(s.db, func(tx kvdb.ReadTx) error {
messageStore := tx.ReadBucket(messageStoreBucket)
if messageStore == nil {
return ErrCorruptedMessageStore
}
c := messageStore.Cursor()
c := messageStore.ReadCursor()
k, v := c.Seek(peerPubKey[:])
for ; bytes.HasPrefix(k, peerPubKey[:]); k, v = c.Next() {
// Deserialize the message from its raw bytes and filter
@ -273,8 +273,8 @@ func (s *MessageStore) MessagesForPeer(
// Peers returns the public key of all peers with messages within the store.
func (s *MessageStore) Peers() (map[[33]byte]struct{}, error) {
peers := make(map[[33]byte]struct{})
err := s.db.View(func(tx *bbolt.Tx) error {
messageStore := tx.Bucket(messageStoreBucket)
err := kvdb.View(s.db, func(tx kvdb.ReadTx) error {
messageStore := tx.ReadBucket(messageStoreBucket)
if messageStore == nil {
return ErrCorruptedMessageStore
}

@ -9,9 +9,9 @@ import (
"testing"
"github.com/btcsuite/btcd/btcec"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -236,8 +236,8 @@ func TestMessageStoreUnsupportedMessage(t *testing.T) {
if _, err := lnwire.WriteMessage(&rawMsg, unsupportedMsg, 0); err != nil {
t.Fatalf("unable to serialize message: %v", err)
}
err = msgStore.db.Update(func(tx *bbolt.Tx) error {
messageStore := tx.Bucket(messageStoreBucket)
err = kvdb.Update(msgStore.db, func(tx kvdb.RwTx) error {
messageStore := tx.ReadWriteBucket(messageStoreBucket)
return messageStore.Put(msgKey, rawMsg.Bytes())
})
if err != nil {

@ -12,12 +12,12 @@ import (
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/chanacceptor"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/discovery"
"github.com/lightningnetwork/lnd/htlcswitch"
"github.com/lightningnetwork/lnd/input"
@ -3287,9 +3287,9 @@ func copyPubKey(pub *btcec.PublicKey) *btcec.PublicKey {
// chanPoint to the channelOpeningStateBucket.
func (f *fundingManager) saveChannelOpeningState(chanPoint *wire.OutPoint,
state channelOpeningState, shortChanID *lnwire.ShortChannelID) error {
return f.cfg.Wallet.Cfg.Database.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(f.cfg.Wallet.Cfg.Database, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateBucketIfNotExists(channelOpeningStateBucket)
bucket, err := tx.CreateTopLevelBucket(channelOpeningStateBucket)
if err != nil {
return err
}
@ -3317,9 +3317,9 @@ func (f *fundingManager) getChannelOpeningState(chanPoint *wire.OutPoint) (
var state channelOpeningState
var shortChanID lnwire.ShortChannelID
err := f.cfg.Wallet.Cfg.Database.View(func(tx *bbolt.Tx) error {
err := kvdb.View(f.cfg.Wallet.Cfg.Database, func(tx kvdb.ReadTx) error {
bucket := tx.Bucket(channelOpeningStateBucket)
bucket := tx.ReadBucket(channelOpeningStateBucket)
if bucket == nil {
// If the bucket does not exist, it means we never added
// a channel to the db, so return ErrChannelNotFound.
@ -3349,8 +3349,8 @@ func (f *fundingManager) getChannelOpeningState(chanPoint *wire.OutPoint) (
// deleteChannelOpeningState removes any state for chanPoint from the database.
func (f *fundingManager) deleteChannelOpeningState(chanPoint *wire.OutPoint) error {
return f.cfg.Wallet.Cfg.Database.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(channelOpeningStateBucket)
return kvdb.Update(f.cfg.Wallet.Cfg.Database, func(tx kvdb.RwTx) error {
bucket := tx.ReadWriteBucket(channelOpeningStateBucket)
if bucket == nil {
return fmt.Errorf("Bucket not found")
}

5
go.mod

@ -14,7 +14,6 @@ require (
github.com/btcsuite/btcwallet/walletdb v1.2.0
github.com/btcsuite/btcwallet/wtxmgr v1.0.0
github.com/btcsuite/fastsha256 v0.0.0-20160815193821-637e65642941
github.com/coreos/bbolt v1.3.3
github.com/davecgh/go-spew v1.1.1
github.com/go-errors/errors v1.0.1
github.com/golang/protobuf v1.3.1
@ -45,8 +44,8 @@ require (
github.com/rogpeppe/fastuuid v1.2.0 // indirect
github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02
github.com/urfave/cli v1.18.0
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922
google.golang.org/grpc v1.19.0

7
go.sum

@ -185,9 +185,6 @@ golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17 h1:nVJ3guKA9qdkEQ3TUdXI9QSINo2CUPM/cySEvw2w8I0=
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -199,8 +196,6 @@ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80=
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -213,8 +208,6 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFWLFaHyYZWD4tOT+cjn0=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

@ -5,10 +5,10 @@ import (
"fmt"
"sync"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/htlcswitch/hop"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -213,13 +213,13 @@ func NewCircuitMap(cfg *CircuitMapConfig) (CircuitMap, error) {
// initBuckets ensures that the primary buckets used by the circuit are
// initialized so that we can assume their existence after startup.
func (cm *circuitMap) initBuckets() error {
return cm.cfg.DB.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(circuitKeystoneKey)
return kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) error {
_, err := tx.CreateTopLevelBucket(circuitKeystoneKey)
if err != nil {
return err
}
_, err = tx.CreateBucketIfNotExists(circuitAddKey)
_, err = tx.CreateTopLevelBucket(circuitAddKey)
return err
})
}
@ -238,10 +238,10 @@ func (cm *circuitMap) restoreMemState() error {
pending = make(map[CircuitKey]*PaymentCircuit)
)
if err := cm.cfg.DB.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) error {
// Restore any of the circuits persisted in the circuit bucket
// back into memory.
circuitBkt := tx.Bucket(circuitAddKey)
circuitBkt := tx.ReadWriteBucket(circuitAddKey)
if circuitBkt == nil {
return ErrCorruptedCircuitMap
}
@ -262,7 +262,7 @@ func (cm *circuitMap) restoreMemState() error {
// Furthermore, load the keystone bucket and resurrect the
// keystones used in any open circuits.
keystoneBkt := tx.Bucket(circuitKeystoneKey)
keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey)
if keystoneBkt == nil {
return ErrCorruptedCircuitMap
}
@ -463,8 +463,8 @@ func (cm *circuitMap) TrimOpenCircuits(chanID lnwire.ShortChannelID,
return nil
}
return cm.cfg.DB.Update(func(tx *bbolt.Tx) error {
keystoneBkt := tx.Bucket(circuitKeystoneKey)
return kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) error {
keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey)
if keystoneBkt == nil {
return ErrCorruptedCircuitMap
}
@ -616,8 +616,8 @@ func (cm *circuitMap) CommitCircuits(circuits ...*PaymentCircuit) (
// Write the entire batch of circuits to the persistent circuit bucket
// using bolt's Batch write. This method must be called from multiple,
// distinct goroutines to have any impact on performance.
err := cm.cfg.DB.Batch(func(tx *bbolt.Tx) error {
circuitBkt := tx.Bucket(circuitAddKey)
err := kvdb.Batch(cm.cfg.DB.Backend, func(tx kvdb.RwTx) error {
circuitBkt := tx.ReadWriteBucket(circuitAddKey)
if circuitBkt == nil {
return ErrCorruptedCircuitMap
}
@ -706,10 +706,10 @@ func (cm *circuitMap) OpenCircuits(keystones ...Keystone) error {
}
cm.mtx.RUnlock()
err := cm.cfg.DB.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(cm.cfg.DB, func(tx kvdb.RwTx) error {
// Now, load the circuit bucket to which we will write the
// already serialized circuit.
keystoneBkt := tx.Bucket(circuitKeystoneKey)
keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey)
if keystoneBkt == nil {
return ErrCorruptedCircuitMap
}
@ -847,13 +847,13 @@ func (cm *circuitMap) DeleteCircuits(inKeys ...CircuitKey) error {
}
cm.mtx.Unlock()
err := cm.cfg.DB.Batch(func(tx *bbolt.Tx) error {
err := kvdb.Batch(cm.cfg.DB.Backend, func(tx kvdb.RwTx) error {
for _, circuit := range removedCircuits {
// If this htlc made it to an outgoing link, load the
// keystone bucket from which we will remove the
// outgoing circuit key.
if circuit.HasKeystone() {
keystoneBkt := tx.Bucket(circuitKeystoneKey)
keystoneBkt := tx.ReadWriteBucket(circuitKeystoneKey)
if keystoneBkt == nil {
return ErrCorruptedCircuitMap
}
@ -868,7 +868,7 @@ func (cm *circuitMap) DeleteCircuits(inKeys ...CircuitKey) error {
// Remove the circuit itself based on the incoming
// circuit key.
circuitBkt := tx.Bucket(circuitAddKey)
circuitBkt := tx.ReadWriteBucket(circuitAddKey)
if circuitBkt == nil {
return ErrCorruptedCircuitMap
}

@ -8,9 +8,9 @@ import (
"sync"
"sync/atomic"
"github.com/coreos/bbolt"
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
const (
@ -56,7 +56,7 @@ type DecayedLog struct {
dbPath string
db *bbolt.DB
db kvdb.Backend
notifier chainntnfs.ChainNotifier
@ -92,7 +92,10 @@ func (d *DecayedLog) Start() error {
// Open the boltdb for use.
var err error
if d.db, err = bbolt.Open(d.dbPath, dbPermissions, nil); err != nil {
d.db, err = kvdb.Create(
kvdb.BoltBackendName, d.dbPath, true,
)
if err != nil {
return fmt.Errorf("Could not open boltdb: %v", err)
}
@ -119,13 +122,13 @@ func (d *DecayedLog) Start() error {
// initBuckets initializes the primary buckets used by the decayed log, namely
// the shared hash bucket, and batch replay
func (d *DecayedLog) initBuckets() error {
return d.db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(sharedHashBucket)
return kvdb.Update(d.db, func(tx kvdb.RwTx) error {
_, err := tx.CreateTopLevelBucket(sharedHashBucket)
if err != nil {
return ErrDecayedLogInit
}
_, err = tx.CreateBucketIfNotExists(batchReplayBucket)
_, err = tx.CreateTopLevelBucket(batchReplayBucket)
if err != nil {
return ErrDecayedLogInit
}
@ -196,11 +199,11 @@ func (d *DecayedLog) garbageCollector(epochClient *chainntnfs.BlockEpochEvent) {
func (d *DecayedLog) gcExpiredHashes(height uint32) (uint32, error) {
var numExpiredHashes uint32
err := d.db.Batch(func(tx *bbolt.Tx) error {
err := kvdb.Batch(d.db, func(tx kvdb.RwTx) error {
numExpiredHashes = 0
// Grab the shared hash bucket
sharedHashes := tx.Bucket(sharedHashBucket)
sharedHashes := tx.ReadWriteBucket(sharedHashBucket)
if sharedHashes == nil {
return fmt.Errorf("sharedHashBucket " +
"is nil")
@ -246,8 +249,8 @@ func (d *DecayedLog) gcExpiredHashes(height uint32) (uint32, error) {
// Delete removes a <shared secret hash, CLTV> key-pair from the
// sharedHashBucket.
func (d *DecayedLog) Delete(hash *sphinx.HashPrefix) error {
return d.db.Batch(func(tx *bbolt.Tx) error {
sharedHashes := tx.Bucket(sharedHashBucket)
return kvdb.Batch(d.db, func(tx kvdb.RwTx) error {
sharedHashes := tx.ReadWriteBucket(sharedHashBucket)
if sharedHashes == nil {
return ErrDecayedLogCorrupted
}
@ -261,10 +264,10 @@ func (d *DecayedLog) Delete(hash *sphinx.HashPrefix) error {
func (d *DecayedLog) Get(hash *sphinx.HashPrefix) (uint32, error) {
var value uint32
err := d.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(d.db, func(tx kvdb.ReadTx) error {
// Grab the shared hash bucket which stores the mapping from
// truncated sha-256 hashes of shared secrets to CLTV's.
sharedHashes := tx.Bucket(sharedHashBucket)
sharedHashes := tx.ReadBucket(sharedHashBucket)
if sharedHashes == nil {
return fmt.Errorf("sharedHashes is nil, could " +
"not retrieve CLTV value")
@ -294,8 +297,8 @@ func (d *DecayedLog) Put(hash *sphinx.HashPrefix, cltv uint32) error {
var scratch [4]byte
binary.BigEndian.PutUint32(scratch[:], cltv)
return d.db.Batch(func(tx *bbolt.Tx) error {
sharedHashes := tx.Bucket(sharedHashBucket)
return kvdb.Batch(d.db, func(tx kvdb.RwTx) error {
sharedHashes := tx.ReadWriteBucket(sharedHashBucket)
if sharedHashes == nil {
return ErrDecayedLogCorrupted
}
@ -327,8 +330,8 @@ func (d *DecayedLog) PutBatch(b *sphinx.Batch) (*sphinx.ReplaySet, error) {
// to generate the complete replay set. If this batch was previously
// processed, the replay set will be deserialized from disk.
var replays *sphinx.ReplaySet
if err := d.db.Batch(func(tx *bbolt.Tx) error {
sharedHashes := tx.Bucket(sharedHashBucket)
if err := kvdb.Batch(d.db, func(tx kvdb.RwTx) error {
sharedHashes := tx.ReadWriteBucket(sharedHashBucket)
if sharedHashes == nil {
return ErrDecayedLogCorrupted
}
@ -336,7 +339,7 @@ func (d *DecayedLog) PutBatch(b *sphinx.Batch) (*sphinx.ReplaySet, error) {
// Load the batch replay bucket, which will be used to either
// retrieve the result of previously processing this batch, or
// to write the result of this operation.
batchReplayBkt := tx.Bucket(batchReplayBucket)
batchReplayBkt := tx.ReadWriteBucket(batchReplayBucket)
if batchReplayBkt == nil {
return ErrDecayedLogCorrupted
}

@ -19,12 +19,12 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/build"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/contractcourt"
"github.com/lightningnetwork/lnd/htlcswitch/hodl"
"github.com/lightningnetwork/lnd/htlcswitch/hop"
@ -5170,32 +5170,32 @@ type mockPackager struct {
failLoadFwdPkgs bool
}
func (*mockPackager) AddFwdPkg(tx *bbolt.Tx, fwdPkg *channeldb.FwdPkg) error {
func (*mockPackager) AddFwdPkg(tx kvdb.RwTx, fwdPkg *channeldb.FwdPkg) error {
return nil
}
func (*mockPackager) SetFwdFilter(tx *bbolt.Tx, height uint64,
func (*mockPackager) SetFwdFilter(tx kvdb.RwTx, height uint64,
fwdFilter *channeldb.PkgFilter) error {
return nil
}
func (*mockPackager) AckAddHtlcs(tx *bbolt.Tx,
func (*mockPackager) AckAddHtlcs(tx kvdb.RwTx,
addRefs ...channeldb.AddRef) error {
return nil
}
func (m *mockPackager) LoadFwdPkgs(tx *bbolt.Tx) ([]*channeldb.FwdPkg, error) {
func (m *mockPackager) LoadFwdPkgs(tx kvdb.ReadTx) ([]*channeldb.FwdPkg, error) {
if m.failLoadFwdPkgs {
return nil, fmt.Errorf("failing LoadFwdPkgs")
}
return nil, nil
}
func (*mockPackager) RemovePkg(tx *bbolt.Tx, height uint64) error {
func (*mockPackager) RemovePkg(tx kvdb.RwTx, height uint64) error {
return nil
}
func (*mockPackager) AckSettleFails(tx *bbolt.Tx,
func (*mockPackager) AckSettleFails(tx kvdb.RwTx,
settleFailRefs ...channeldb.SettleFailRef) error {
return nil
}

@ -7,8 +7,8 @@ import (
"io"
"sync"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/multimutex"
)
@ -137,8 +137,8 @@ func (store *networkResultStore) storeResult(paymentID uint64,
var paymentIDBytes [8]byte
binary.BigEndian.PutUint64(paymentIDBytes[:], paymentID)
err := store.db.Batch(func(tx *bbolt.Tx) error {
networkResults, err := tx.CreateBucketIfNotExists(
err := kvdb.Batch(store.db.Backend, func(tx kvdb.RwTx) error {
networkResults, err := tx.CreateTopLevelBucket(
networkResultStoreBucketKey,
)
if err != nil {
@ -180,7 +180,7 @@ func (store *networkResultStore) subscribeResult(paymentID uint64) (
resultChan = make(chan *networkResult, 1)
)
err := store.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(store.db, func(tx kvdb.ReadTx) error {
var err error
result, err = fetchResult(tx, paymentID)
switch {
@ -226,7 +226,7 @@ func (store *networkResultStore) getResult(pid uint64) (
*networkResult, error) {
var result *networkResult
err := store.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(store.db, func(tx kvdb.ReadTx) error {
var err error
result, err = fetchResult(tx, pid)
return err
@ -238,11 +238,11 @@ func (store *networkResultStore) getResult(pid uint64) (
return result, nil
}
func fetchResult(tx *bbolt.Tx, pid uint64) (*networkResult, error) {
func fetchResult(tx kvdb.ReadTx, pid uint64) (*networkResult, error) {
var paymentIDBytes [8]byte
binary.BigEndian.PutUint64(paymentIDBytes[:], pid)
networkResults := tx.Bucket(networkResultStoreBucketKey)
networkResults := tx.ReadBucket(networkResultStoreBucketKey)
if networkResults == nil {
return nil, ErrPaymentIDNotFound
}

@ -3,9 +3,9 @@ package htlcswitch
import (
"sync"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// defaultSequenceBatchSize specifies the window of sequence numbers that are
@ -87,8 +87,8 @@ func (s *persistentSequencer) NextID() (uint64, error) {
// allocated will start from the last known tip on disk, which is fine
// as we only require uniqueness of the allocated numbers.
var nextHorizonID uint64
if err := s.db.Update(func(tx *bbolt.Tx) error {
nextIDBkt := tx.Bucket(nextPaymentIDKey)
if err := kvdb.Update(s.db, func(tx kvdb.RwTx) error {
nextIDBkt := tx.ReadWriteBucket(nextPaymentIDKey)
if nextIDBkt == nil {
return ErrSequencerCorrupted
}
@ -121,8 +121,8 @@ func (s *persistentSequencer) NextID() (uint64, error) {
// initDB populates the bucket used to generate payment sequence numbers.
func (s *persistentSequencer) initDB() error {
return s.db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(nextPaymentIDKey)
return kvdb.Update(s.db, func(tx kvdb.RwTx) error {
_, err := tx.CreateTopLevelBucket(nextPaymentIDKey)
return err
})
}

@ -10,10 +10,10 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/contractcourt"
"github.com/lightningnetwork/lnd/htlcswitch/hop"
"github.com/lightningnetwork/lnd/lntypes"
@ -1419,7 +1419,7 @@ func (s *Switch) closeCircuit(pkt *htlcPacket) (*PaymentCircuit, error) {
// we're the originator of the payment, so the link stops attempting to
// re-broadcast.
func (s *Switch) ackSettleFail(settleFailRefs ...channeldb.SettleFailRef) error {
return s.cfg.DB.Batch(func(tx *bbolt.Tx) error {
return kvdb.Batch(s.cfg.DB.Backend, func(tx kvdb.RwTx) error {
return s.cfg.SwitchPackager.AckSettleFails(tx, settleFailRefs...)
})
}
@ -1865,7 +1865,7 @@ func (s *Switch) reforwardResponses() error {
func (s *Switch) loadChannelFwdPkgs(source lnwire.ShortChannelID) ([]*channeldb.FwdPkg, error) {
var fwdPkgs []*channeldb.FwdPkg
if err := s.cfg.DB.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(s.cfg.DB, func(tx kvdb.RwTx) error {
var err error
fwdPkgs, err = s.cfg.SwitchPackager.LoadChannelFwdPkgs(
tx, source,

@ -21,10 +21,10 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/fastsha256"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/contractcourt"
"github.com/lightningnetwork/lnd/htlcswitch/hop"
"github.com/lightningnetwork/lnd/input"
@ -420,7 +420,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
aliceStoredChannels, err := dbAlice.FetchOpenChannels(aliceKeyPub)
switch err {
case nil:
case bbolt.ErrDatabaseNotOpen:
case kvdb.ErrDatabaseNotOpen:
dbAlice, err = channeldb.Open(dbAlice.Path())
if err != nil {
return nil, errors.Errorf("unable to reopen alice "+
@ -464,7 +464,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
bobStoredChannels, err := dbBob.FetchOpenChannels(bobKeyPub)
switch err {
case nil:
case bbolt.ErrDatabaseNotOpen:
case kvdb.ErrDatabaseNotOpen:
dbBob, err = channeldb.Open(dbBob.Path())
if err != nil {
return nil, errors.Errorf("unable to reopen bob "+

@ -30,12 +30,12 @@ import (
"github.com/btcsuite/btcwallet/chain"
"github.com/btcsuite/btcwallet/walletdb"
_ "github.com/btcsuite/btcwallet/walletdb/bdb"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/lightninglabs/neutrino"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/chainntnfs/btcdnotify"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwallet"
@ -3162,7 +3162,7 @@ func runTests(t *testing.T, walletDriver *lnwallet.WalletDriver,
// node's chainstate to initial level, cleanly
// wipe buckets
if err := clearWalletStates(alice, bob); err !=
nil && err != bbolt.ErrBucketNotFound {
nil && err != kvdb.ErrBucketNotFound {
t.Fatalf("unable to wipe wallet state: %v", err)
}
}

@ -7,7 +7,7 @@ import (
"os"
"path"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
@ -48,8 +48,8 @@ func NewService(dir string, checks ...Checker) (*Service, error) {
// Open the database that we'll use to store the primary macaroon key,
// and all generated macaroons+caveats.
macaroonDB, err := bbolt.Open(
path.Join(dir, DBFilename), 0600, bbolt.DefaultOptions,
macaroonDB, err := kvdb.Create(
kvdb.BoltBackendName, path.Join(dir, DBFilename), true,
)
if err != nil {
return nil, err

@ -8,7 +8,7 @@ import (
"path"
"testing"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/macaroons"
"google.golang.org/grpc/metadata"
"gopkg.in/macaroon-bakery.v2/bakery"
@ -33,8 +33,9 @@ func setupTestRootKeyStorage(t *testing.T) string {
if err != nil {
t.Fatalf("Error creating temp dir: %v", err)
}
db, err := bbolt.Open(path.Join(tempDir, "macaroons.db"), 0600,
bbolt.DefaultOptions)
db, err := kvdb.Create(
kvdb.BoltBackendName, path.Join(tempDir, "macaroons.db"), true,
)
if err != nil {
t.Fatalf("Error opening store DB: %v", err)
}

@ -7,7 +7,7 @@ import (
"io"
"sync"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/btcsuite/btcwallet/snacl"
)
@ -46,7 +46,7 @@ var (
// RootKeyStorage implements the bakery.RootKeyStorage interface.
type RootKeyStorage struct {
*bbolt.DB
kvdb.Backend
encKeyMtx sync.RWMutex
encKey *snacl.SecretKey
@ -54,10 +54,10 @@ type RootKeyStorage struct {
// NewRootKeyStorage creates a RootKeyStorage instance.
// TODO(aakselrod): Add support for encryption of data with passphrase.
func NewRootKeyStorage(db *bbolt.DB) (*RootKeyStorage, error) {
func NewRootKeyStorage(db kvdb.Backend) (*RootKeyStorage, error) {
// If the store's bucket doesn't exist, create it.
err := db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(rootKeyBucketName)
err := kvdb.Update(db, func(tx kvdb.RwTx) error {
_, err := tx.CreateTopLevelBucket(rootKeyBucketName)
return err
})
if err != nil {
@ -65,7 +65,7 @@ func NewRootKeyStorage(db *bbolt.DB) (*RootKeyStorage, error) {
}
// Return the DB wrapped in a RootKeyStorage object.
return &RootKeyStorage{DB: db, encKey: nil}, nil
return &RootKeyStorage{Backend: db, encKey: nil}, nil
}
// CreateUnlock sets an encryption key if one is not already set, otherwise it
@ -84,8 +84,8 @@ func (r *RootKeyStorage) CreateUnlock(password *[]byte) error {
return ErrPasswordRequired
}
return r.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(rootKeyBucketName)
return kvdb.Update(r, func(tx kvdb.RwTx) error {
bucket := tx.ReadWriteBucket(rootKeyBucketName)
dbKey := bucket.Get(encryptedKeyID)
if len(dbKey) > 0 {
// We've already stored a key, so try to unlock with
@ -131,8 +131,8 @@ func (r *RootKeyStorage) Get(_ context.Context, id []byte) ([]byte, error) {
return nil, ErrStoreLocked
}
var rootKey []byte
err := r.View(func(tx *bbolt.Tx) error {
dbKey := tx.Bucket(rootKeyBucketName).Get(id)
err := kvdb.View(r, func(tx kvdb.ReadTx) error {
dbKey := tx.ReadBucket(rootKeyBucketName).Get(id)
if len(dbKey) == 0 {
return fmt.Errorf("root key with id %s doesn't exist",
string(id))
@ -166,8 +166,8 @@ func (r *RootKeyStorage) RootKey(_ context.Context) ([]byte, []byte, error) {
}
var rootKey []byte
id := defaultRootKeyID
err := r.Update(func(tx *bbolt.Tx) error {
ns := tx.Bucket(rootKeyBucketName)
err := kvdb.Update(r, func(tx kvdb.RwTx) error {
ns := tx.ReadWriteBucket(rootKeyBucketName)
dbKey := ns.Get(id)
// If there's a root key stored in the bucket, decrypt it and
@ -212,5 +212,5 @@ func (r *RootKeyStorage) Close() error {
if r.encKey != nil {
r.encKey.Zero()
}
return r.DB.Close()
return r.Backend.Close()
}

@ -8,8 +8,7 @@ import (
"path"
"testing"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/macaroons"
"github.com/btcsuite/btcwallet/snacl"
@ -22,8 +21,9 @@ func TestStore(t *testing.T) {
}
defer os.RemoveAll(tempDir)
db, err := bbolt.Open(path.Join(tempDir, "weks.db"), 0600,
bbolt.DefaultOptions)
db, err := kvdb.Create(
kvdb.BoltBackendName, path.Join(tempDir, "weks.db"), true,
)
if err != nil {
t.Fatalf("Error opening store DB: %v", err)
}
@ -73,11 +73,13 @@ func TestStore(t *testing.T) {
}
store.Close()
// Between here and the re-opening of the store, it's possible to get
// a double-close, but that's not such a big deal since the tests will
// fail anyway in that case.
db, err = bbolt.Open(path.Join(tempDir, "weks.db"), 0600,
bbolt.DefaultOptions)
db, err = kvdb.Create(
kvdb.BoltBackendName, path.Join(tempDir, "weks.db"), true,
)
if err != nil {
t.Fatalf("Error opening store DB: %v", err)
}

@ -7,8 +7,8 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// Overview of Nursery Store Storage Hierarchy
@ -263,7 +263,7 @@ func newNurseryStore(chainHash *chainhash.Hash,
// CSV-delayed outputs (commitment and incoming HTLC's), commitment output and
// a list of outgoing two-stage htlc outputs.
func (ns *nurseryStore) Incubate(kids []kidOutput, babies []babyOutput) error {
return ns.db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(ns.db, func(tx kvdb.RwTx) error {
// If we have any kid outputs to incubate, then we'll attempt
// to add each of them to the nursery store. Any duplicate
// outputs will be ignored.
@ -290,7 +290,7 @@ func (ns *nurseryStore) Incubate(kids []kidOutput, babies []babyOutput) error {
// kindergarten bucket. The now mature kidOutput contained in the babyOutput
// will be stored as it waits out the kidOutput's CSV delay.
func (ns *nurseryStore) CribToKinder(bby *babyOutput) error {
return ns.db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(ns.db, func(tx kvdb.RwTx) error {
// First, retrieve or create the channel bucket corresponding to
// the baby output's origin channel point.
@ -374,7 +374,7 @@ func (ns *nurseryStore) CribToKinder(bby *babyOutput) error {
func (ns *nurseryStore) PreschoolToKinder(kid *kidOutput,
lastGradHeight uint32) error {
return ns.db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(ns.db, func(tx kvdb.RwTx) error {
// Create or retrieve the channel bucket corresponding to the
// kid output's origin channel point.
chanPoint := kid.OriginChanPoint()
@ -471,7 +471,7 @@ func (ns *nurseryStore) PreschoolToKinder(kid *kidOutput,
// the height and channel indexes. The height bucket will be opportunistically
// pruned from the height index as outputs are removed.
func (ns *nurseryStore) GraduateKinder(height uint32, kid *kidOutput) error {
return ns.db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(ns.db, func(tx kvdb.RwTx) error {
hghtBucket := ns.getHeightBucket(tx, height)
if hghtBucket == nil {
@ -501,8 +501,7 @@ func (ns *nurseryStore) GraduateKinder(height uint32, kid *kidOutput) error {
return err
}
chanBucket := ns.getChannelBucket(tx,
chanPoint)
chanBucket := ns.getChannelBucketWrite(tx, chanPoint)
if chanBucket == nil {
return ErrContractNotFound
}
@ -540,7 +539,7 @@ func (ns *nurseryStore) FetchClass(
// processed at the provided block height.
var kids []kidOutput
var babies []babyOutput
if err := ns.db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(ns.db, func(tx kvdb.ReadTx) error {
// Append each crib output to our list of babyOutputs.
if err := ns.forEachHeightPrefix(tx, cribPrefix, height,
func(buf []byte) error {
@ -594,16 +593,16 @@ func (ns *nurseryStore) FetchClass(
// preschool bucket.
func (ns *nurseryStore) FetchPreschools() ([]kidOutput, error) {
var kids []kidOutput
if err := ns.db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(ns.db, func(tx kvdb.ReadTx) error {
// Retrieve the existing chain bucket for this nursery store.
chainBucket := tx.Bucket(ns.pfxChainKey)
chainBucket := tx.ReadBucket(ns.pfxChainKey)
if chainBucket == nil {
return nil
}
// Load the existing channel index from the chain bucket.
chanIndex := chainBucket.Bucket(channelIndexKey)
chanIndex := chainBucket.NestedReadBucket(channelIndexKey)
if chanIndex == nil {
return nil
}
@ -626,7 +625,7 @@ func (ns *nurseryStore) FetchPreschools() ([]kidOutput, error) {
for _, chanBytes := range activeChannels {
// Retrieve the channel bucket associated with this
// channel.
chanBucket := chanIndex.Bucket(chanBytes)
chanBucket := chanIndex.NestedReadBucket(chanBytes)
if chanBucket == nil {
continue
}
@ -635,7 +634,7 @@ func (ns *nurseryStore) FetchPreschools() ([]kidOutput, error) {
// "pscl" prefix. So, we will perform a prefix scan of
// the channel bucket to efficiently enumerate all the
// desired outputs.
c := chanBucket.Cursor()
c := chanBucket.ReadCursor()
for k, v := c.Seek(psclPrefix); bytes.HasPrefix(
k, psclPrefix); k, v = c.Next() {
@ -667,16 +666,16 @@ func (ns *nurseryStore) FetchPreschools() ([]kidOutput, error) {
// index at or below the provided upper bound.
func (ns *nurseryStore) HeightsBelowOrEqual(height uint32) ([]uint32, error) {
var activeHeights []uint32
err := ns.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(ns.db, func(tx kvdb.ReadTx) error {
// Ensure that the chain bucket for this nursery store exists.
chainBucket := tx.Bucket(ns.pfxChainKey)
chainBucket := tx.ReadBucket(ns.pfxChainKey)
if chainBucket == nil {
return nil
}
// Ensure that the height index has been properly initialized for this
// chain.
hghtIndex := chainBucket.Bucket(heightIndexKey)
hghtIndex := chainBucket.NestedReadBucket(heightIndexKey)
if hghtIndex == nil {
return nil
}
@ -686,7 +685,7 @@ func (ns *nurseryStore) HeightsBelowOrEqual(height uint32) ([]uint32, error) {
var lower, upper [4]byte
byteOrder.PutUint32(upper[:], height)
c := hghtIndex.Cursor()
c := hghtIndex.ReadCursor()
for k, _ := c.Seek(lower[:]); bytes.Compare(k, upper[:]) <= 0 &&
len(k) == 4; k, _ = c.Next() {
@ -712,7 +711,7 @@ func (ns *nurseryStore) HeightsBelowOrEqual(height uint32) ([]uint32, error) {
func (ns *nurseryStore) ForChanOutputs(chanPoint *wire.OutPoint,
callback func([]byte, []byte) error) error {
return ns.db.View(func(tx *bbolt.Tx) error {
return kvdb.View(ns.db, func(tx kvdb.ReadTx) error {
return ns.forChanOutputs(tx, chanPoint, callback)
})
}
@ -720,15 +719,15 @@ func (ns *nurseryStore) ForChanOutputs(chanPoint *wire.OutPoint,
// ListChannels returns all channels the nursery is currently tracking.
func (ns *nurseryStore) ListChannels() ([]wire.OutPoint, error) {
var activeChannels []wire.OutPoint
if err := ns.db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(ns.db, func(tx kvdb.ReadTx) error {
// Retrieve the existing chain bucket for this nursery store.
chainBucket := tx.Bucket(ns.pfxChainKey)
chainBucket := tx.ReadBucket(ns.pfxChainKey)
if chainBucket == nil {
return nil
}
// Retrieve the existing channel index.
chanIndex := chainBucket.Bucket(channelIndexKey)
chanIndex := chainBucket.NestedReadBucket(channelIndexKey)
if chanIndex == nil {
return nil
}
@ -754,7 +753,7 @@ func (ns *nurseryStore) ListChannels() ([]wire.OutPoint, error) {
// IsMatureChannel determines the whether or not all of the outputs in a
// particular channel bucket have been marked as graduated.
func (ns *nurseryStore) IsMatureChannel(chanPoint *wire.OutPoint) (bool, error) {
err := ns.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(ns.db, func(tx kvdb.ReadTx) error {
// Iterate over the contents of the channel bucket, computing
// both total number of outputs, and those that have the grad
// prefix.
@ -783,15 +782,15 @@ var ErrImmatureChannel = errors.New("cannot remove immature channel, " +
// provided channel point.
// NOTE: The channel's entries in the height index are assumed to be removed.
func (ns *nurseryStore) RemoveChannel(chanPoint *wire.OutPoint) error {
return ns.db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(ns.db, func(tx kvdb.RwTx) error {
// Retrieve the existing chain bucket for this nursery store.
chainBucket := tx.Bucket(ns.pfxChainKey)
chainBucket := tx.ReadWriteBucket(ns.pfxChainKey)
if chainBucket == nil {
return nil
}
// Retrieve the channel index stored in the chain bucket.
chanIndex := chainBucket.Bucket(channelIndexKey)
chanIndex := chainBucket.NestedReadWriteBucket(channelIndexKey)
if chanIndex == nil {
return nil
}
@ -824,7 +823,7 @@ func (ns *nurseryStore) RemoveChannel(chanPoint *wire.OutPoint) error {
maturityHeight := kid.ConfHeight() + kid.BlocksToMaturity()
hghtBucket := ns.getHeightBucket(tx, maturityHeight)
hghtBucket := ns.getHeightBucketWrite(tx, maturityHeight)
if hghtBucket == nil {
return nil
}
@ -845,7 +844,7 @@ func (ns *nurseryStore) RemoveChannel(chanPoint *wire.OutPoint) error {
// its two-stage process of sweeping funds back to the user's wallet. These
// outputs are persisted in the nursery store in the crib state, and will be
// revisited after the first-stage output's CLTV has expired.
func (ns *nurseryStore) enterCrib(tx *bbolt.Tx, baby *babyOutput) error {
func (ns *nurseryStore) enterCrib(tx kvdb.RwTx, baby *babyOutput) error {
// First, retrieve or create the channel bucket corresponding to the
// baby output's origin channel point.
chanPoint := baby.OriginChanPoint()
@ -902,7 +901,7 @@ func (ns *nurseryStore) enterCrib(tx *bbolt.Tx, baby *babyOutput) error {
// through a single stage before sweeping. Outputs are stored in the preschool
// bucket until the commitment transaction has been confirmed, at which point
// they will be moved to the kindergarten bucket.
func (ns *nurseryStore) enterPreschool(tx *bbolt.Tx, kid *kidOutput) error {
func (ns *nurseryStore) enterPreschool(tx kvdb.RwTx, kid *kidOutput) error {
// First, retrieve or create the channel bucket corresponding to the
// baby output's origin channel point.
chanPoint := kid.OriginChanPoint()
@ -935,11 +934,11 @@ func (ns *nurseryStore) enterPreschool(tx *bbolt.Tx, kid *kidOutput) error {
// createChannelBucket creates or retrieves a channel bucket for the provided
// channel point.
func (ns *nurseryStore) createChannelBucket(tx *bbolt.Tx,
chanPoint *wire.OutPoint) (*bbolt.Bucket, error) {
func (ns *nurseryStore) createChannelBucket(tx kvdb.RwTx,
chanPoint *wire.OutPoint) (kvdb.RwBucket, error) {
// Ensure that the chain bucket for this nursery store exists.
chainBucket, err := tx.CreateBucketIfNotExists(ns.pfxChainKey)
chainBucket, err := tx.CreateTopLevelBucket(ns.pfxChainKey)
if err != nil {
return nil, err
}
@ -966,17 +965,17 @@ func (ns *nurseryStore) createChannelBucket(tx *bbolt.Tx,
// getChannelBucket retrieves an existing channel bucket from the nursery store,
// using the given channel point. If the bucket does not exist, or any bucket
// along its path does not exist, a nil value is returned.
func (ns *nurseryStore) getChannelBucket(tx *bbolt.Tx,
chanPoint *wire.OutPoint) *bbolt.Bucket {
func (ns *nurseryStore) getChannelBucket(tx kvdb.ReadTx,
chanPoint *wire.OutPoint) kvdb.ReadBucket {
// Retrieve the existing chain bucket for this nursery store.
chainBucket := tx.Bucket(ns.pfxChainKey)
chainBucket := tx.ReadBucket(ns.pfxChainKey)
if chainBucket == nil {
return nil
}
// Retrieve the existing channel index.
chanIndex := chainBucket.Bucket(channelIndexKey)
chanIndex := chainBucket.NestedReadBucket(channelIndexKey)
if chanIndex == nil {
return nil
}
@ -988,16 +987,44 @@ func (ns *nurseryStore) getChannelBucket(tx *bbolt.Tx,
return nil
}
return chanIndex.Bucket(chanBuffer.Bytes())
return chanIndex.NestedReadBucket(chanBuffer.Bytes())
}
// getChannelBucketWrite retrieves an existing channel bucket from the nursery store,
// using the given channel point. If the bucket does not exist, or any bucket
// along its path does not exist, a nil value is returned.
func (ns *nurseryStore) getChannelBucketWrite(tx kvdb.RwTx,
chanPoint *wire.OutPoint) kvdb.RwBucket {
// Retrieve the existing chain bucket for this nursery store.
chainBucket := tx.ReadWriteBucket(ns.pfxChainKey)
if chainBucket == nil {
return nil
}
// Retrieve the existing channel index.
chanIndex := chainBucket.NestedReadWriteBucket(channelIndexKey)
if chanIndex == nil {
return nil
}
// Serialize the provided channel point and return the bucket matching
// the serialized key.
var chanBuffer bytes.Buffer
if err := writeOutpoint(&chanBuffer, chanPoint); err != nil {
return nil
}
return chanIndex.NestedReadWriteBucket(chanBuffer.Bytes())
}
// createHeightBucket creates or retrieves an existing bucket from the height
// index, corresponding to the provided height.
func (ns *nurseryStore) createHeightBucket(tx *bbolt.Tx,
height uint32) (*bbolt.Bucket, error) {
func (ns *nurseryStore) createHeightBucket(tx kvdb.RwTx,
height uint32) (kvdb.RwBucket, error) {
// Ensure that the chain bucket for this nursery store exists.
chainBucket, err := tx.CreateBucketIfNotExists(ns.pfxChainKey)
chainBucket, err := tx.CreateTopLevelBucket(ns.pfxChainKey)
if err != nil {
return nil, err
}
@ -1021,17 +1048,17 @@ func (ns *nurseryStore) createHeightBucket(tx *bbolt.Tx,
// getHeightBucketPath retrieves an existing height bucket from the nursery
// store, using the provided block height. If the bucket does not exist, or any
// bucket along its path does not exist, a nil value is returned.
func (ns *nurseryStore) getHeightBucketPath(tx *bbolt.Tx,
height uint32) (*bbolt.Bucket, *bbolt.Bucket, *bbolt.Bucket) {
func (ns *nurseryStore) getHeightBucketPath(tx kvdb.ReadTx,
height uint32) (kvdb.ReadBucket, kvdb.ReadBucket, kvdb.ReadBucket) {
// Retrieve the existing chain bucket for this nursery store.
chainBucket := tx.Bucket(ns.pfxChainKey)
chainBucket := tx.ReadBucket(ns.pfxChainKey)
if chainBucket == nil {
return nil, nil, nil
}
// Retrieve the existing channel index.
hghtIndex := chainBucket.Bucket(heightIndexKey)
hghtIndex := chainBucket.NestedReadBucket(heightIndexKey)
if hghtIndex == nil {
return nil, nil, nil
}
@ -1041,24 +1068,63 @@ func (ns *nurseryStore) getHeightBucketPath(tx *bbolt.Tx,
var heightBytes [4]byte
byteOrder.PutUint32(heightBytes[:], height)
return chainBucket, hghtIndex, hghtIndex.Bucket(heightBytes[:])
return chainBucket, hghtIndex, hghtIndex.NestedReadBucket(heightBytes[:])
}
// getHeightBucketPathWrite retrieves an existing height bucket from the nursery
// store, using the provided block height. If the bucket does not exist, or any
// bucket along its path does not exist, a nil value is returned.
func (ns *nurseryStore) getHeightBucketPathWrite(tx kvdb.RwTx,
height uint32) (kvdb.RwBucket, kvdb.RwBucket, kvdb.RwBucket) {
// Retrieve the existing chain bucket for this nursery store.
chainBucket := tx.ReadWriteBucket(ns.pfxChainKey)
if chainBucket == nil {
return nil, nil, nil
}
// Retrieve the existing channel index.
hghtIndex := chainBucket.NestedReadWriteBucket(heightIndexKey)
if hghtIndex == nil {
return nil, nil, nil
}
// Serialize the provided block height and return the bucket matching
// the serialized key.
var heightBytes [4]byte
byteOrder.PutUint32(heightBytes[:], height)
return chainBucket, hghtIndex, hghtIndex.NestedReadWriteBucket(
heightBytes[:],
)
}
// getHeightBucket retrieves an existing height bucket from the nursery store,
// using the provided block height. If the bucket does not exist, or any bucket
// along its path does not exist, a nil value is returned.
func (ns *nurseryStore) getHeightBucket(tx *bbolt.Tx,
height uint32) *bbolt.Bucket {
func (ns *nurseryStore) getHeightBucket(tx kvdb.ReadTx,
height uint32) kvdb.ReadBucket {
_, _, hghtBucket := ns.getHeightBucketPath(tx, height)
return hghtBucket
}
// getHeightBucketWrite retrieves an existing height bucket from the nursery store,
// using the provided block height. If the bucket does not exist, or any bucket
// along its path does not exist, a nil value is returned.
func (ns *nurseryStore) getHeightBucketWrite(tx kvdb.RwTx,
height uint32) kvdb.RwBucket {
_, _, hghtBucket := ns.getHeightBucketPathWrite(tx, height)
return hghtBucket
}
// createHeightChanBucket creates or retrieves an existing height-channel bucket
// for the provided block height and channel point. This method will attempt to
// instantiate all buckets along the path if required.
func (ns *nurseryStore) createHeightChanBucket(tx *bbolt.Tx,
height uint32, chanPoint *wire.OutPoint) (*bbolt.Bucket, error) {
func (ns *nurseryStore) createHeightChanBucket(tx kvdb.RwTx,
height uint32, chanPoint *wire.OutPoint) (kvdb.RwBucket, error) {
// Ensure that the height bucket for this nursery store exists.
hghtBucket, err := ns.createHeightBucket(tx, height)
@ -1083,8 +1149,8 @@ func (ns *nurseryStore) createHeightChanBucket(tx *bbolt.Tx,
// nursery store, using the provided block height and channel point. if the
// bucket does not exist, or any bucket along its path does not exist, a nil
// value is returned.
func (ns *nurseryStore) getHeightChanBucket(tx *bbolt.Tx,
height uint32, chanPoint *wire.OutPoint) *bbolt.Bucket {
func (ns *nurseryStore) getHeightChanBucket(tx kvdb.ReadTx, // nolint:unused
height uint32, chanPoint *wire.OutPoint) kvdb.ReadBucket {
// Retrieve the existing height bucket from this nursery store.
hghtBucket := ns.getHeightBucket(tx, height)
@ -1102,7 +1168,33 @@ func (ns *nurseryStore) getHeightChanBucket(tx *bbolt.Tx,
// Finally, return the height bucket specified by the serialized channel
// point.
return hghtBucket.Bucket(chanBytes)
return hghtBucket.NestedReadBucket(chanBytes)
}
// getHeightChanBucketWrite retrieves an existing height-channel bucket from the
// nursery store, using the provided block height and channel point. if the
// bucket does not exist, or any bucket along its path does not exist, a nil
// value is returned.
func (ns *nurseryStore) getHeightChanBucketWrite(tx kvdb.RwTx,
height uint32, chanPoint *wire.OutPoint) kvdb.RwBucket {
// Retrieve the existing height bucket from this nursery store.
hghtBucket := ns.getHeightBucketWrite(tx, height)
if hghtBucket == nil {
return nil
}
// Serialize the provided channel point, which generates the key for
// looking up the proper height-channel bucket inside the height bucket.
var chanBuffer bytes.Buffer
if err := writeOutpoint(&chanBuffer, chanPoint); err != nil {
return nil
}
chanBytes := chanBuffer.Bytes()
// Finally, return the height bucket specified by the serialized channel
// point.
return hghtBucket.NestedReadWriteBucket(chanBytes)
}
// forEachHeightPrefix enumerates all outputs at the given height whose state
@ -1110,7 +1202,7 @@ func (ns *nurseryStore) getHeightChanBucket(tx *bbolt.Tx,
// enumerate crib and kindergarten outputs at a particular height. The callback
// is invoked with serialized bytes retrieved for each output of interest,
// allowing the caller to deserialize them into the appropriate type.
func (ns *nurseryStore) forEachHeightPrefix(tx *bbolt.Tx, prefix []byte,
func (ns *nurseryStore) forEachHeightPrefix(tx kvdb.ReadTx, prefix []byte,
height uint32, callback func([]byte) error) error {
// Start by retrieving the height bucket corresponding to the provided
@ -1138,7 +1230,7 @@ func (ns *nurseryStore) forEachHeightPrefix(tx *bbolt.Tx, prefix []byte,
// Additionally, grab the chain index, which we will facilitate queries
// for each of the channel buckets of each of the channels in the list
// we assembled above.
chanIndex := chainBucket.Bucket(channelIndexKey)
chanIndex := chainBucket.NestedReadBucket(channelIndexKey)
if chanIndex == nil {
return errors.New("unable to retrieve channel index")
}
@ -1151,7 +1243,7 @@ func (ns *nurseryStore) forEachHeightPrefix(tx *bbolt.Tx, prefix []byte,
for _, chanBytes := range channelsAtHeight {
// Retrieve the height-channel bucket for this channel, which
// holds a sub-bucket for all outputs maturing at this height.
hghtChanBucket := hghtBucket.Bucket(chanBytes)
hghtChanBucket := hghtBucket.NestedReadBucket(chanBytes)
if hghtChanBucket == nil {
return fmt.Errorf("unable to retrieve height-channel "+
"bucket at height %d for %x", height, chanBytes)
@ -1160,7 +1252,7 @@ func (ns *nurseryStore) forEachHeightPrefix(tx *bbolt.Tx, prefix []byte,
// Load the appropriate channel bucket from the channel index,
// this will allow us to retrieve the individual serialized
// outputs.
chanBucket := chanIndex.Bucket(chanBytes)
chanBucket := chanIndex.NestedReadBucket(chanBytes)
if chanBucket == nil {
return fmt.Errorf("unable to retrieve channel "+
"bucket: '%x'", chanBytes)
@ -1170,7 +1262,7 @@ func (ns *nurseryStore) forEachHeightPrefix(tx *bbolt.Tx, prefix []byte,
// prefix, we will perform a prefix scan of the buckets
// contained in the height-channel bucket, efficiently
// enumerating the desired outputs.
c := hghtChanBucket.Cursor()
c := hghtChanBucket.ReadCursor()
for k, _ := c.Seek(prefix); bytes.HasPrefix(
k, prefix); k, _ = c.Next() {
@ -1198,7 +1290,7 @@ func (ns *nurseryStore) forEachHeightPrefix(tx *bbolt.Tx, prefix []byte,
// provided callback. The callback accepts a key-value pair of byte slices
// corresponding to the prefixed-output key and the serialized output,
// respectively.
func (ns *nurseryStore) forChanOutputs(tx *bbolt.Tx, chanPoint *wire.OutPoint,
func (ns *nurseryStore) forChanOutputs(tx kvdb.ReadTx, chanPoint *wire.OutPoint,
callback func([]byte, []byte) error) error {
chanBucket := ns.getChannelBucket(tx, chanPoint)
@ -1216,11 +1308,11 @@ var errBucketNotEmpty = errors.New("bucket is not empty, cannot be pruned")
// removeOutputFromHeight will delete the given output from the specified
// height-channel bucket, and attempt to prune the upstream directories if they
// are empty.
func (ns *nurseryStore) removeOutputFromHeight(tx *bbolt.Tx, height uint32,
func (ns *nurseryStore) removeOutputFromHeight(tx kvdb.RwTx, height uint32,
chanPoint *wire.OutPoint, pfxKey []byte) error {
// Retrieve the height-channel bucket and delete the prefixed output.
hghtChanBucket := ns.getHeightChanBucket(tx, height, chanPoint)
hghtChanBucket := ns.getHeightChanBucketWrite(tx, height, chanPoint)
if hghtChanBucket == nil {
// Height-channel bucket already removed.
return nil
@ -1233,7 +1325,7 @@ func (ns *nurseryStore) removeOutputFromHeight(tx *bbolt.Tx, height uint32,
}
// Retrieve the height bucket that contains the height-channel bucket.
hghtBucket := ns.getHeightBucket(tx, height)
hghtBucket := ns.getHeightBucketWrite(tx, height)
if hghtBucket == nil {
return errors.New("height bucket not found")
}
@ -1268,9 +1360,9 @@ func (ns *nurseryStore) removeOutputFromHeight(tx *bbolt.Tx, height uint32,
// all active outputs at this height have been removed from their respective
// height-channel buckets. The returned boolean value indicated whether or not
// this invocation successfully pruned the height bucket.
func (ns *nurseryStore) pruneHeight(tx *bbolt.Tx, height uint32) (bool, error) {
func (ns *nurseryStore) pruneHeight(tx kvdb.RwTx, height uint32) (bool, error) {
// Fetch the existing height index and height bucket.
_, hghtIndex, hghtBucket := ns.getHeightBucketPath(tx, height)
_, hghtIndex, hghtBucket := ns.getHeightBucketPathWrite(tx, height)
if hghtBucket == nil {
return false, nil
}
@ -1287,7 +1379,7 @@ func (ns *nurseryStore) pruneHeight(tx *bbolt.Tx, height uint32) (bool, error) {
// Attempt to each height-channel bucket from the height bucket
// located above.
hghtChanBucket := hghtBucket.Bucket(chanBytes)
hghtChanBucket := hghtBucket.NestedReadWriteBucket(chanBytes)
if hghtChanBucket == nil {
return errors.New("unable to find height-channel bucket")
}
@ -1315,9 +1407,9 @@ func (ns *nurseryStore) pruneHeight(tx *bbolt.Tx, height uint32) (bool, error) {
// removeBucketIfEmpty attempts to delete a bucket specified by name from the
// provided parent bucket.
func removeBucketIfEmpty(parent *bbolt.Bucket, bktName []byte) error {
func removeBucketIfEmpty(parent kvdb.RwBucket, bktName []byte) error {
// Attempt to fetch the named bucket from its parent.
bkt := parent.Bucket(bktName)
bkt := parent.NestedReadWriteBucket(bktName)
if bkt == nil {
// No bucket was found, already removed?
return nil
@ -1328,25 +1420,25 @@ func removeBucketIfEmpty(parent *bbolt.Bucket, bktName []byte) error {
return err
}
return parent.DeleteBucket(bktName)
return parent.DeleteNestedBucket(bktName)
}
// removeBucketIfExists safely deletes the named bucket by first checking
// that it exists in the parent bucket.
func removeBucketIfExists(parent *bbolt.Bucket, bktName []byte) error {
func removeBucketIfExists(parent kvdb.RwBucket, bktName []byte) error {
// Attempt to fetch the named bucket from its parent.
bkt := parent.Bucket(bktName)
bkt := parent.NestedReadWriteBucket(bktName)
if bkt == nil {
// No bucket was found, already removed?
return nil
}
return parent.DeleteBucket(bktName)
return parent.DeleteNestedBucket(bktName)
}
// isBucketEmpty returns errBucketNotEmpty if the bucket has a non-zero number
// of children.
func isBucketEmpty(parent *bbolt.Bucket) error {
func isBucketEmpty(parent kvdb.ReadBucket) error {
return parent.ForEach(func(_, _ []byte) error {
return errBucketNotEmpty
})

@ -1,8 +1,8 @@
package routing
import (
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
)
@ -26,7 +26,7 @@ type routingGraph interface {
// database.
type dbRoutingTx struct {
graph *channeldb.ChannelGraph
tx *bbolt.Tx
tx kvdb.ReadTx
source route.Vertex
}
@ -38,7 +38,7 @@ func newDbRoutingTx(graph *channeldb.ChannelGraph) (*dbRoutingTx, error) {
return nil, err
}
tx, err := graph.Database().Begin(false)
tx, err := graph.Database().BeginReadTx()
if err != nil {
return nil, err
}
@ -62,7 +62,7 @@ func (g *dbRoutingTx) forEachNodeChannel(nodePub route.Vertex,
cb func(*channeldb.ChannelEdgeInfo, *channeldb.ChannelEdgePolicy,
*channeldb.ChannelEdgePolicy) error) error {
txCb := func(_ *bbolt.Tx, info *channeldb.ChannelEdgeInfo,
txCb := func(_ kvdb.ReadTx, info *channeldb.ChannelEdgeInfo,
p1, p2 *channeldb.ChannelEdgePolicy) error {
return cb(info, p1, p2)

@ -6,7 +6,7 @@ import (
"testing"
"time"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
)
@ -82,7 +82,7 @@ func (c *integratedRoutingContext) testPayment(expectedNofAttempts int) {
dbPath := file.Name()
defer os.Remove(dbPath)
db, err := bbolt.Open(dbPath, 0600, nil)
db, err := kvdb.Open(kvdb.BoltBackendName, dbPath, true)
if err != nil {
c.t.Fatal(err)
}

@ -4,8 +4,8 @@ import (
"sync"
"time"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
)
@ -173,7 +173,7 @@ type paymentResult struct {
}
// NewMissionControl returns a new instance of missionControl.
func NewMissionControl(db *bbolt.DB, cfg *MissionControlConfig) (
func NewMissionControl(db kvdb.Backend, cfg *MissionControlConfig) (
*MissionControl, error) {
log.Debugf("Instantiating mission control with config: "+

@ -7,8 +7,8 @@ import (
"time"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -35,20 +35,20 @@ const (
// Also changes to mission control parameters can be applied to historical data.
// Finally, it enables importing raw data from an external source.
type missionControlStore struct {
db *bbolt.DB
db kvdb.Backend
maxRecords int
numRecords int
}
func newMissionControlStore(db *bbolt.DB, maxRecords int) (*missionControlStore, error) {
func newMissionControlStore(db kvdb.Backend, maxRecords int) (*missionControlStore, error) {
store := &missionControlStore{
db: db,
maxRecords: maxRecords,
}
// Create buckets if not yet existing.
err := db.Update(func(tx *bbolt.Tx) error {
resultsBucket, err := tx.CreateBucketIfNotExists(resultsKey)
err := kvdb.Update(db, func(tx kvdb.RwTx) error {
resultsBucket, err := tx.CreateTopLevelBucket(resultsKey)
if err != nil {
return fmt.Errorf("cannot create results bucket: %v",
err)
@ -58,7 +58,7 @@ func newMissionControlStore(db *bbolt.DB, maxRecords int) (*missionControlStore,
// memory to avoid calling Stats().KeyN. The reliability of
// Stats() is doubtful and seemed to have caused crashes in the
// past (see #1874).
c := resultsBucket.Cursor()
c := resultsBucket.ReadCursor()
for k, _ := c.First(); k != nil; k, _ = c.Next() {
store.numRecords++
}
@ -74,12 +74,12 @@ func newMissionControlStore(db *bbolt.DB, maxRecords int) (*missionControlStore,
// clear removes all results from the db.
func (b *missionControlStore) clear() error {
return b.db.Update(func(tx *bbolt.Tx) error {
if err := tx.DeleteBucket(resultsKey); err != nil {
return kvdb.Update(b.db, func(tx kvdb.RwTx) error {
if err := tx.DeleteTopLevelBucket(resultsKey); err != nil {
return err
}
_, err := tx.CreateBucket(resultsKey)
_, err := tx.CreateTopLevelBucket(resultsKey)
return err
})
}
@ -88,8 +88,8 @@ func (b *missionControlStore) clear() error {
func (b *missionControlStore) fetchAll() ([]*paymentResult, error) {
var results []*paymentResult
err := b.db.View(func(tx *bbolt.Tx) error {
resultBucket := tx.Bucket(resultsKey)
err := kvdb.View(b.db, func(tx kvdb.ReadTx) error {
resultBucket := tx.ReadBucket(resultsKey)
results = make([]*paymentResult, 0)
return resultBucket.ForEach(func(k, v []byte) error {
@ -218,13 +218,13 @@ func deserializeResult(k, v []byte) (*paymentResult, error) {
// AddResult adds a new result to the db.
func (b *missionControlStore) AddResult(rp *paymentResult) error {
return b.db.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(resultsKey)
return kvdb.Update(b.db, func(tx kvdb.RwTx) error {
bucket := tx.ReadWriteBucket(resultsKey)
// Prune oldest entries.
if b.maxRecords > 0 {
for b.numRecords >= b.maxRecords {
cursor := bucket.Cursor()
cursor := bucket.ReadWriteCursor()
cursor.First()
if err := cursor.Delete(); err != nil {
return err

@ -8,9 +8,9 @@ import (
"time"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/routing/route"
)
@ -31,7 +31,7 @@ func TestMissionControlStore(t *testing.T) {
dbPath := file.Name()
db, err := bbolt.Open(dbPath, 0600, nil)
db, err := kvdb.Create(kvdb.BoltBackendName, dbPath, true)
if err != nil {
t.Fatal(err)
}

@ -6,7 +6,7 @@ import (
"testing"
"time"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
)
@ -44,7 +44,7 @@ type mcTestContext struct {
mc *MissionControl
now time.Time
db *bbolt.DB
db kvdb.Backend
dbPath string
pid uint64
@ -63,7 +63,7 @@ func createMcTestContext(t *testing.T) *mcTestContext {
ctx.dbPath = file.Name()
ctx.db, err = bbolt.Open(ctx.dbPath, 0600, nil)
ctx.db, err = kvdb.Open(kvdb.BoltBackendName, ctx.dbPath, true)
if err != nil {
t.Fatal(err)
}

@ -11,12 +11,12 @@ import (
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/clock"
"github.com/lightningnetwork/lnd/htlcswitch"
"github.com/lightningnetwork/lnd/input"
@ -2111,7 +2111,7 @@ func (r *ChannelRouter) FetchLightningNode(node route.Vertex) (*channeldb.Lightn
//
// NOTE: This method is part of the ChannelGraphSource interface.
func (r *ChannelRouter) ForEachNode(cb func(*channeldb.LightningNode) error) error {
return r.cfg.Graph.ForEachNode(nil, func(_ *bbolt.Tx, n *channeldb.LightningNode) error {
return r.cfg.Graph.ForEachNode(nil, func(_ kvdb.ReadTx, n *channeldb.LightningNode) error {
return cb(n)
})
}
@ -2123,7 +2123,7 @@ func (r *ChannelRouter) ForEachNode(cb func(*channeldb.LightningNode) error) err
func (r *ChannelRouter) ForAllOutgoingChannels(cb func(*channeldb.ChannelEdgeInfo,
*channeldb.ChannelEdgePolicy) error) error {
return r.selfNode.ForEachChannel(nil, func(_ *bbolt.Tx, c *channeldb.ChannelEdgeInfo,
return r.selfNode.ForEachChannel(nil, func(_ kvdb.ReadTx, c *channeldb.ChannelEdgeInfo,
e, _ *channeldb.ChannelEdgePolicy) error {
if e == nil {
@ -2264,7 +2264,7 @@ func generateBandwidthHints(sourceNode *channeldb.LightningNode,
// First, we'll collect the set of outbound edges from the target
// source node.
var localChans []*channeldb.ChannelEdgeInfo
err := sourceNode.ForEachChannel(nil, func(tx *bbolt.Tx,
err := sourceNode.ForEachChannel(nil, func(tx kvdb.ReadTx,
edgeInfo *channeldb.ChannelEdgeInfo,
_, _ *channeldb.ChannelEdgePolicy) error {

@ -97,7 +97,7 @@ func createTestCtxFromGraphInstance(startingHeight uint32, graphInstance *testGr
}
mc, err := NewMissionControl(
graphInstance.graph.Database().DB,
graphInstance.graph.Database(),
mcConfig,
)
if err != nil {

@ -23,7 +23,6 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwallet/wallet/txauthor"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
proxy "github.com/grpc-ecosystem/grpc-gateway/runtime"
@ -33,6 +32,7 @@ import (
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/chanfitness"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/channelnotifier"
"github.com/lightningnetwork/lnd/contractcourt"
"github.com/lightningnetwork/lnd/discovery"
@ -4494,7 +4494,7 @@ func (r *rpcServer) DescribeGraph(ctx context.Context,
// First iterate through all the known nodes (connected or unconnected
// within the graph), collating their current state into the RPC
// response.
err := graph.ForEachNode(nil, func(_ *bbolt.Tx, node *channeldb.LightningNode) error {
err := graph.ForEachNode(nil, func(_ kvdb.ReadTx, node *channeldb.LightningNode) error {
nodeAddrs := make([]*lnrpc.NodeAddress, 0)
for _, addr := range node.Addresses {
nodeAddr := &lnrpc.NodeAddress{
@ -4652,7 +4652,7 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context,
channels []*lnrpc.ChannelEdge
)
if err := node.ForEachChannel(nil, func(_ *bbolt.Tx,
if err := node.ForEachChannel(nil, func(_ kvdb.ReadTx,
edge *channeldb.ChannelEdgeInfo,
c1, c2 *channeldb.ChannelEdgePolicy) error {
@ -4750,7 +4750,7 @@ func (r *rpcServer) GetNetworkInfo(ctx context.Context,
// network, tallying up the total number of nodes, and also gathering
// each node so we can measure the graph diameter and degree stats
// below.
if err := graph.ForEachNode(nil, func(tx *bbolt.Tx, node *channeldb.LightningNode) error {
if err := graph.ForEachNode(nil, func(tx kvdb.ReadTx, node *channeldb.LightningNode) error {
// Increment the total number of nodes with each iteration.
numNodes++
@ -4760,7 +4760,7 @@ func (r *rpcServer) GetNetworkInfo(ctx context.Context,
// through the db transaction from the outer view so we can
// re-use it within this inner view.
var outDegree uint32
if err := node.ForEachChannel(tx, func(_ *bbolt.Tx,
if err := node.ForEachChannel(tx, func(_ kvdb.ReadTx,
edge *channeldb.ChannelEdgeInfo, _, _ *channeldb.ChannelEdgePolicy) error {
// Bump up the out degree for this node for each
@ -5225,7 +5225,7 @@ func (r *rpcServer) FeeReport(ctx context.Context,
}
var feeReports []*lnrpc.ChannelFeeReport
err = selfNode.ForEachChannel(nil, func(_ *bbolt.Tx, chanInfo *channeldb.ChannelEdgeInfo,
err = selfNode.ForEachChannel(nil, func(_ kvdb.ReadTx, chanInfo *channeldb.ChannelEdgeInfo,
edgePolicy, _ *channeldb.ChannelEdgePolicy) error {
// Self node should always have policies for its channels.

@ -23,7 +23,6 @@ import (
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
sphinx "github.com/lightningnetwork/lightning-onion"
"github.com/lightningnetwork/lnd/autopilot"
@ -32,6 +31,7 @@ import (
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/chanfitness"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/channelnotifier"
"github.com/lightningnetwork/lnd/clock"
"github.com/lightningnetwork/lnd/contractcourt"
@ -708,7 +708,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB,
routingConfig := routerrpc.GetRoutingConfig(cfg.SubRPCServers.RouterRPC)
s.missionControl, err = routing.NewMissionControl(
chanDB.DB,
chanDB,
&routing.MissionControlConfig{
AprioriHopProbability: routingConfig.AprioriHopProbability,
PenaltyHalfLife: routingConfig.PenaltyHalfLife,
@ -820,7 +820,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB,
sweep.DefaultBatchWindowDuration)
sweeperStore, err := sweep.NewSweeperStore(
chanDB.DB, activeNetParams.GenesisHash,
chanDB, activeNetParams.GenesisHash,
)
if err != nil {
srvrLog.Errorf("unable to create sweeper store: %v", err)
@ -2126,7 +2126,7 @@ func (s *server) establishPersistentConnections() error {
// each of the nodes.
selfPub := s.identityPriv.PubKey().SerializeCompressed()
err = sourceNode.ForEachChannel(nil, func(
tx *bbolt.Tx,
tx kvdb.ReadTx,
chanInfo *channeldb.ChannelEdgeInfo,
policy, _ *channeldb.ChannelEdgePolicy) error {

@ -8,7 +8,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
var (
@ -56,26 +56,28 @@ type SweeperStore interface {
}
type sweeperStore struct {
db *bbolt.DB
db kvdb.Backend
}
// NewSweeperStore returns a new store instance.
func NewSweeperStore(db *bbolt.DB, chainHash *chainhash.Hash) (
func NewSweeperStore(db kvdb.Backend, chainHash *chainhash.Hash) (
SweeperStore, error) {
err := db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(
err := kvdb.Update(db, func(tx kvdb.RwTx) error {
_, err := tx.CreateTopLevelBucket(
lastTxBucketKey,
)
if err != nil {
return err
}
if tx.Bucket(txHashesBucketKey) != nil {
if tx.ReadWriteBucket(txHashesBucketKey) != nil {
return nil
}
txHashesBucket, err := tx.CreateBucket(txHashesBucketKey)
txHashesBucket, err := tx.CreateTopLevelBucket(
txHashesBucketKey,
)
if err != nil {
return err
}
@ -97,7 +99,7 @@ func NewSweeperStore(db *bbolt.DB, chainHash *chainhash.Hash) (
// migrateTxHashes migrates nursery finalized txes to the tx hashes bucket. This
// is not implemented as a database migration, to keep the downgrade path open.
func migrateTxHashes(tx *bbolt.Tx, txHashesBucket *bbolt.Bucket,
func migrateTxHashes(tx kvdb.RwTx, txHashesBucket kvdb.RwBucket,
chainHash *chainhash.Hash) error {
log.Infof("Migrating UTXO nursery finalized TXIDs")
@ -113,20 +115,20 @@ func migrateTxHashes(tx *bbolt.Tx, txHashesBucket *bbolt.Bucket,
}
// Get chain bucket if exists.
chainBucket := tx.Bucket(b.Bytes())
chainBucket := tx.ReadWriteBucket(b.Bytes())
if chainBucket == nil {
return nil
}
// Retrieve the existing height index.
hghtIndex := chainBucket.Bucket(utxnHeightIndexKey)
hghtIndex := chainBucket.NestedReadWriteBucket(utxnHeightIndexKey)
if hghtIndex == nil {
return nil
}
// Retrieve all heights.
err := hghtIndex.ForEach(func(k, v []byte) error {
heightBucket := hghtIndex.Bucket(k)
heightBucket := hghtIndex.NestedReadWriteBucket(k)
if heightBucket == nil {
return nil
}
@ -163,13 +165,13 @@ func migrateTxHashes(tx *bbolt.Tx, txHashesBucket *bbolt.Bucket,
// NotifyPublishTx signals that we are about to publish a tx.
func (s *sweeperStore) NotifyPublishTx(sweepTx *wire.MsgTx) error {
return s.db.Update(func(tx *bbolt.Tx) error {
lastTxBucket := tx.Bucket(lastTxBucketKey)
return kvdb.Update(s.db, func(tx kvdb.RwTx) error {
lastTxBucket := tx.ReadWriteBucket(lastTxBucketKey)
if lastTxBucket == nil {
return errors.New("last tx bucket does not exist")
}
txHashesBucket := tx.Bucket(txHashesBucketKey)
txHashesBucket := tx.ReadWriteBucket(txHashesBucketKey)
if txHashesBucket == nil {
return errors.New("tx hashes bucket does not exist")
}
@ -194,8 +196,8 @@ func (s *sweeperStore) NotifyPublishTx(sweepTx *wire.MsgTx) error {
func (s *sweeperStore) GetLastPublishedTx() (*wire.MsgTx, error) {
var sweepTx *wire.MsgTx
err := s.db.View(func(tx *bbolt.Tx) error {
lastTxBucket := tx.Bucket(lastTxBucketKey)
err := kvdb.View(s.db, func(tx kvdb.ReadTx) error {
lastTxBucket := tx.ReadBucket(lastTxBucketKey)
if lastTxBucket == nil {
return errors.New("last tx bucket does not exist")
}
@ -225,8 +227,8 @@ func (s *sweeperStore) GetLastPublishedTx() (*wire.MsgTx, error) {
func (s *sweeperStore) IsOurTx(hash chainhash.Hash) (bool, error) {
var ours bool
err := s.db.View(func(tx *bbolt.Tx) error {
txHashesBucket := tx.Bucket(txHashesBucketKey)
err := kvdb.View(s.db, func(tx kvdb.ReadTx) error {
txHashesBucket := tx.ReadBucket(txHashesBucketKey)
if txHashesBucket == nil {
return errors.New("tx hashes bucket does not exist")
}

@ -53,7 +53,7 @@ func TestStore(t *testing.T) {
testStore(t, func() (SweeperStore, error) {
var chain chainhash.Hash
return NewSweeperStore(cdb.DB, &chain)
return NewSweeperStore(cdb, &chain)
})
})
t.Run("mock", func(t *testing.T) {

@ -8,7 +8,7 @@ import (
"net"
"github.com/btcsuite/btcd/btcec"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -113,7 +113,7 @@ var (
// ClientDB is single database providing a persistent storage engine for the
// wtclient.
type ClientDB struct {
db *bbolt.DB
db kvdb.Backend
dbPath string
}
@ -146,7 +146,7 @@ func OpenClientDB(dbPath string) (*ClientDB, error) {
// initialized. This allows us to assume their presence throughout all
// operations. If an known top-level bucket is expected to exist but is
// missing, this will trigger a ErrUninitializedDB error.
err = clientDB.db.Update(initClientDBBuckets)
err = kvdb.Update(clientDB.db, initClientDBBuckets)
if err != nil {
bdb.Close()
return nil, err
@ -157,7 +157,7 @@ func OpenClientDB(dbPath string) (*ClientDB, error) {
// initClientDBBuckets creates all top-level buckets required to handle database
// operations required by the latest version.
func initClientDBBuckets(tx *bbolt.Tx) error {
func initClientDBBuckets(tx kvdb.RwTx) error {
buckets := [][]byte{
cSessionKeyIndexBkt,
cChanSummaryBkt,
@ -167,7 +167,7 @@ func initClientDBBuckets(tx *bbolt.Tx) error {
}
for _, bucket := range buckets {
_, err := tx.CreateBucketIfNotExists(bucket)
_, err := tx.CreateTopLevelBucket(bucket)
if err != nil {
return err
}
@ -179,7 +179,7 @@ func initClientDBBuckets(tx *bbolt.Tx) error {
// bdb returns the backing bbolt.DB instance.
//
// NOTE: Part of the versionedDB interface.
func (c *ClientDB) bdb() *bbolt.DB {
func (c *ClientDB) bdb() kvdb.Backend {
return c.db
}
@ -188,7 +188,7 @@ func (c *ClientDB) bdb() *bbolt.DB {
// NOTE: Part of the versionedDB interface.
func (c *ClientDB) Version() (uint32, error) {
var version uint32
err := c.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
var err error
version, err = getDBVersion(tx)
return err
@ -215,13 +215,13 @@ func (c *ClientDB) CreateTower(lnAddr *lnwire.NetAddress) (*Tower, error) {
copy(towerPubKey[:], lnAddr.IdentityKey.SerializeCompressed())
var tower *Tower
err := c.db.Update(func(tx *bbolt.Tx) error {
towerIndex := tx.Bucket(cTowerIndexBkt)
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
towerIndex := tx.ReadWriteBucket(cTowerIndexBkt)
if towerIndex == nil {
return ErrUninitializedDB
}
towers := tx.Bucket(cTowerBkt)
towers := tx.ReadWriteBucket(cTowerBkt)
if towers == nil {
return ErrUninitializedDB
}
@ -248,7 +248,7 @@ func (c *ClientDB) CreateTower(lnAddr *lnwire.NetAddress) (*Tower, error) {
//
// TODO(wilmer): with an index of tower -> sessions we
// can avoid the linear lookup.
sessions := tx.Bucket(cSessionBkt)
sessions := tx.ReadWriteBucket(cSessionBkt)
if sessions == nil {
return ErrUninitializedDB
}
@ -308,12 +308,12 @@ func (c *ClientDB) CreateTower(lnAddr *lnwire.NetAddress) (*Tower, error) {
//
// NOTE: An error is not returned if the tower doesn't exist.
func (c *ClientDB) RemoveTower(pubKey *btcec.PublicKey, addr net.Addr) error {
return c.db.Update(func(tx *bbolt.Tx) error {
towers := tx.Bucket(cTowerBkt)
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
towers := tx.ReadWriteBucket(cTowerBkt)
if towers == nil {
return ErrUninitializedDB
}
towerIndex := tx.Bucket(cTowerIndexBkt)
towerIndex := tx.ReadWriteBucket(cTowerIndexBkt)
if towerIndex == nil {
return ErrUninitializedDB
}
@ -342,7 +342,7 @@ func (c *ClientDB) RemoveTower(pubKey *btcec.PublicKey, addr net.Addr) error {
//
// TODO(wilmer): with an index of tower -> sessions we can avoid
// the linear lookup.
sessions := tx.Bucket(cSessionBkt)
sessions := tx.ReadWriteBucket(cSessionBkt)
if sessions == nil {
return ErrUninitializedDB
}
@ -383,8 +383,8 @@ func (c *ClientDB) RemoveTower(pubKey *btcec.PublicKey, addr net.Addr) error {
// LoadTowerByID retrieves a tower by its tower ID.
func (c *ClientDB) LoadTowerByID(towerID TowerID) (*Tower, error) {
var tower *Tower
err := c.db.View(func(tx *bbolt.Tx) error {
towers := tx.Bucket(cTowerBkt)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
towers := tx.ReadBucket(cTowerBkt)
if towers == nil {
return ErrUninitializedDB
}
@ -403,12 +403,12 @@ func (c *ClientDB) LoadTowerByID(towerID TowerID) (*Tower, error) {
// LoadTower retrieves a tower by its public key.
func (c *ClientDB) LoadTower(pubKey *btcec.PublicKey) (*Tower, error) {
var tower *Tower
err := c.db.View(func(tx *bbolt.Tx) error {
towers := tx.Bucket(cTowerBkt)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
towers := tx.ReadBucket(cTowerBkt)
if towers == nil {
return ErrUninitializedDB
}
towerIndex := tx.Bucket(cTowerIndexBkt)
towerIndex := tx.ReadBucket(cTowerIndexBkt)
if towerIndex == nil {
return ErrUninitializedDB
}
@ -432,8 +432,8 @@ func (c *ClientDB) LoadTower(pubKey *btcec.PublicKey) (*Tower, error) {
// ListTowers retrieves the list of towers available within the database.
func (c *ClientDB) ListTowers() ([]*Tower, error) {
var towers []*Tower
err := c.db.View(func(tx *bbolt.Tx) error {
towerBucket := tx.Bucket(cTowerBkt)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
towerBucket := tx.ReadBucket(cTowerBkt)
if towerBucket == nil {
return ErrUninitializedDB
}
@ -461,8 +461,8 @@ func (c *ClientDB) ListTowers() ([]*Tower, error) {
// CreateClientSession is invoked should return the same index.
func (c *ClientDB) NextSessionKeyIndex(towerID TowerID) (uint32, error) {
var index uint32
err := c.db.Update(func(tx *bbolt.Tx) error {
keyIndex := tx.Bucket(cSessionKeyIndexBkt)
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
keyIndex := tx.ReadWriteBucket(cSessionKeyIndexBkt)
if keyIndex == nil {
return ErrUninitializedDB
}
@ -509,20 +509,20 @@ func (c *ClientDB) NextSessionKeyIndex(towerID TowerID) (uint32, error) {
// CreateClientSession records a newly negotiated client session in the set of
// active sessions. The session can be identified by its SessionID.
func (c *ClientDB) CreateClientSession(session *ClientSession) error {
return c.db.Update(func(tx *bbolt.Tx) error {
keyIndexes := tx.Bucket(cSessionKeyIndexBkt)
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
keyIndexes := tx.ReadWriteBucket(cSessionKeyIndexBkt)
if keyIndexes == nil {
return ErrUninitializedDB
}
sessions := tx.Bucket(cSessionBkt)
sessions := tx.ReadWriteBucket(cSessionBkt)
if sessions == nil {
return ErrUninitializedDB
}
// Check that client session with this session id doesn't
// already exist.
existingSessionBytes := sessions.Bucket(session.ID[:])
existingSessionBytes := sessions.NestedReadWriteBucket(session.ID[:])
if existingSessionBytes != nil {
return ErrClientSessionAlreadyExists
}
@ -558,8 +558,8 @@ func (c *ClientDB) CreateClientSession(session *ClientSession) error {
// response that do not correspond to this tower.
func (c *ClientDB) ListClientSessions(id *TowerID) (map[SessionID]*ClientSession, error) {
var clientSessions map[SessionID]*ClientSession
err := c.db.View(func(tx *bbolt.Tx) error {
sessions := tx.Bucket(cSessionBkt)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
sessions := tx.ReadBucket(cSessionBkt)
if sessions == nil {
return ErrUninitializedDB
}
@ -577,7 +577,7 @@ func (c *ClientDB) ListClientSessions(id *TowerID) (map[SessionID]*ClientSession
// listClientSessions returns the set of all client sessions known to the db. An
// optional tower ID can be used to filter out any client sessions in the
// response that do not correspond to this tower.
func listClientSessions(sessions *bbolt.Bucket,
func listClientSessions(sessions kvdb.ReadBucket,
id *TowerID) (map[SessionID]*ClientSession, error) {
clientSessions := make(map[SessionID]*ClientSession)
@ -612,8 +612,8 @@ func listClientSessions(sessions *bbolt.Bucket,
// channel summaries.
func (c *ClientDB) FetchChanSummaries() (ChannelSummaries, error) {
summaries := make(map[lnwire.ChannelID]ClientChanSummary)
err := c.db.View(func(tx *bbolt.Tx) error {
chanSummaries := tx.Bucket(cChanSummaryBkt)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
chanSummaries := tx.ReadBucket(cChanSummaryBkt)
if chanSummaries == nil {
return ErrUninitializedDB
}
@ -648,8 +648,8 @@ func (c *ClientDB) FetchChanSummaries() (ChannelSummaries, error) {
func (c *ClientDB) RegisterChannel(chanID lnwire.ChannelID,
sweepPkScript []byte) error {
return c.db.Update(func(tx *bbolt.Tx) error {
chanSummaries := tx.Bucket(cChanSummaryBkt)
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
chanSummaries := tx.ReadWriteBucket(cChanSummaryBkt)
if chanSummaries == nil {
return ErrUninitializedDB
}
@ -692,8 +692,8 @@ func (c *ClientDB) CommitUpdate(id *SessionID,
update *CommittedUpdate) (uint16, error) {
var lastApplied uint16
err := c.db.Update(func(tx *bbolt.Tx) error {
sessions := tx.Bucket(cSessionBkt)
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
sessions := tx.ReadWriteBucket(cSessionBkt)
if sessions == nil {
return ErrUninitializedDB
}
@ -708,7 +708,7 @@ func (c *ClientDB) CommitUpdate(id *SessionID,
}
// Can't fail if the above didn't fail.
sessionBkt := sessions.Bucket(id[:])
sessionBkt := sessions.NestedReadWriteBucket(id[:])
// Ensure the session commits sub-bucket is initialized.
sessionCommits, err := sessionBkt.CreateBucketIfNotExists(
@ -796,8 +796,8 @@ func (c *ClientDB) CommitUpdate(id *SessionID,
func (c *ClientDB) AckUpdate(id *SessionID, seqNum uint16,
lastApplied uint16) error {
return c.db.Update(func(tx *bbolt.Tx) error {
sessions := tx.Bucket(cSessionBkt)
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
sessions := tx.ReadWriteBucket(cSessionBkt)
if sessions == nil {
return ErrUninitializedDB
}
@ -835,11 +835,11 @@ func (c *ClientDB) AckUpdate(id *SessionID, seqNum uint16,
}
// Can't fail because of getClientSession succeeded.
sessionBkt := sessions.Bucket(id[:])
sessionBkt := sessions.NestedReadWriteBucket(id[:])
// If the commits sub-bucket doesn't exist, there can't possibly
// be a corresponding committed update to remove.
sessionCommits := sessionBkt.Bucket(cSessionCommits)
sessionCommits := sessionBkt.NestedReadWriteBucket(cSessionCommits)
if sessionCommits == nil {
return ErrCommittedUpdateNotFound
}
@ -894,10 +894,10 @@ func (c *ClientDB) AckUpdate(id *SessionID, seqNum uint16,
// bucket corresponding to the serialized session id. This does not deserialize
// the CommittedUpdates or AckUpdates associated with the session. If the caller
// requires this info, use getClientSession.
func getClientSessionBody(sessions *bbolt.Bucket,
func getClientSessionBody(sessions kvdb.ReadBucket,
idBytes []byte) (*ClientSession, error) {
sessionBkt := sessions.Bucket(idBytes)
sessionBkt := sessions.NestedReadBucket(idBytes)
if sessionBkt == nil {
return nil, ErrClientSessionNotFound
}
@ -922,7 +922,7 @@ func getClientSessionBody(sessions *bbolt.Bucket,
// getClientSession loads the full ClientSession associated with the serialized
// session id. This method populates the CommittedUpdates and AckUpdates in
// addition to the ClientSession's body.
func getClientSession(sessions *bbolt.Bucket,
func getClientSession(sessions kvdb.ReadBucket,
idBytes []byte) (*ClientSession, error) {
session, err := getClientSessionBody(sessions, idBytes)
@ -950,17 +950,17 @@ func getClientSession(sessions *bbolt.Bucket,
// getClientSessionCommits retrieves all committed updates for the session
// identified by the serialized session id.
func getClientSessionCommits(sessions *bbolt.Bucket,
func getClientSessionCommits(sessions kvdb.ReadBucket,
idBytes []byte) ([]CommittedUpdate, error) {
// Can't fail because client session body has already been read.
sessionBkt := sessions.Bucket(idBytes)
sessionBkt := sessions.NestedReadBucket(idBytes)
// Initialize commitedUpdates so that we can return an initialized map
// if no committed updates exist.
committedUpdates := make([]CommittedUpdate, 0)
sessionCommits := sessionBkt.Bucket(cSessionCommits)
sessionCommits := sessionBkt.NestedReadBucket(cSessionCommits)
if sessionCommits == nil {
return committedUpdates, nil
}
@ -986,17 +986,17 @@ func getClientSessionCommits(sessions *bbolt.Bucket,
// getClientSessionAcks retrieves all acked updates for the session identified
// by the serialized session id.
func getClientSessionAcks(sessions *bbolt.Bucket,
func getClientSessionAcks(sessions kvdb.ReadBucket,
idBytes []byte) (map[uint16]BackupID, error) {
// Can't fail because client session body has already been read.
sessionBkt := sessions.Bucket(idBytes)
sessionBkt := sessions.NestedReadBucket(idBytes)
// Initialize ackedUpdates so that we can return an initialized map if
// no acked updates exist.
ackedUpdates := make(map[uint16]BackupID)
sessionAcks := sessionBkt.Bucket(cSessionAcks)
sessionAcks := sessionBkt.NestedReadBucket(cSessionAcks)
if sessionAcks == nil {
return ackedUpdates, nil
}
@ -1023,7 +1023,7 @@ func getClientSessionAcks(sessions *bbolt.Bucket,
// putClientSessionBody stores the body of the ClientSession (everything but the
// CommittedUpdates and AckedUpdates).
func putClientSessionBody(sessions *bbolt.Bucket,
func putClientSessionBody(sessions kvdb.RwBucket,
session *ClientSession) error {
sessionBkt, err := sessions.CreateBucketIfNotExists(session.ID[:])
@ -1042,7 +1042,7 @@ func putClientSessionBody(sessions *bbolt.Bucket,
// markSessionStatus updates the persisted state of the session to the new
// status.
func markSessionStatus(sessions *bbolt.Bucket, session *ClientSession,
func markSessionStatus(sessions kvdb.RwBucket, session *ClientSession,
status CSessionStatus) error {
session.Status = status
@ -1050,7 +1050,7 @@ func markSessionStatus(sessions *bbolt.Bucket, session *ClientSession,
}
// getChanSummary loads a ClientChanSummary for the passed chanID.
func getChanSummary(chanSummaries *bbolt.Bucket,
func getChanSummary(chanSummaries kvdb.ReadBucket,
chanID lnwire.ChannelID) (*ClientChanSummary, error) {
chanSummaryBytes := chanSummaries.Get(chanID[:])
@ -1068,7 +1068,7 @@ func getChanSummary(chanSummaries *bbolt.Bucket,
}
// putChanSummary stores a ClientChanSummary for the passed chanID.
func putChanSummary(chanSummaries *bbolt.Bucket, chanID lnwire.ChannelID,
func putChanSummary(chanSummaries kvdb.RwBucket, chanID lnwire.ChannelID,
summary *ClientChanSummary) error {
var b bytes.Buffer
@ -1081,7 +1081,7 @@ func putChanSummary(chanSummaries *bbolt.Bucket, chanID lnwire.ChannelID,
}
// getTower loads a Tower identified by its serialized tower id.
func getTower(towers *bbolt.Bucket, id []byte) (*Tower, error) {
func getTower(towers kvdb.ReadBucket, id []byte) (*Tower, error) {
towerBytes := towers.Get(id)
if towerBytes == nil {
return nil, ErrTowerNotFound
@ -1099,7 +1099,7 @@ func getTower(towers *bbolt.Bucket, id []byte) (*Tower, error) {
}
// putTower stores a Tower identified by its serialized tower id.
func putTower(towers *bbolt.Bucket, tower *Tower) error {
func putTower(towers kvdb.RwBucket, tower *Tower) error {
var b bytes.Buffer
err := tower.Encode(&b)
if err != nil {

@ -6,7 +6,7 @@ import (
"os"
"path/filepath"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
const (
@ -49,7 +49,7 @@ func fileExists(path string) bool {
// one doesn't exist. The boolean returned indicates if the database did not
// exist before, or if it has been created but no version metadata exists within
// it.
func createDBIfNotExist(dbPath, name string) (*bbolt.DB, bool, error) {
func createDBIfNotExist(dbPath, name string) (kvdb.Backend, bool, error) {
path := filepath.Join(dbPath, name)
// If the database file doesn't exist, this indicates we much initialize
@ -65,12 +65,7 @@ func createDBIfNotExist(dbPath, name string) (*bbolt.DB, bool, error) {
// Specify bbolt freelist options to reduce heap pressure in case the
// freelist grows to be very large.
options := &bbolt.Options{
NoFreelistSync: true,
FreelistType: bbolt.FreelistMapType,
}
bdb, err := bbolt.Open(path, dbFilePermission, options)
bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true)
if err != nil {
return nil, false, err
}
@ -82,8 +77,8 @@ func createDBIfNotExist(dbPath, name string) (*bbolt.DB, bool, error) {
// set firstInit to true so that we can treat is initialize the bucket.
if !firstInit {
var metadataExists bool
err = bdb.View(func(tx *bbolt.Tx) error {
metadataExists = tx.Bucket(metadataBkt) != nil
err = kvdb.View(bdb, func(tx kvdb.ReadTx) error {
metadataExists = tx.ReadBucket(metadataBkt) != nil
return nil
})
if err != nil {

@ -5,8 +5,8 @@ import (
"errors"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/watchtower/blob"
)
@ -55,7 +55,7 @@ var (
// TowerDB is single database providing a persistent storage engine for the
// wtserver and lookout subsystems.
type TowerDB struct {
db *bbolt.DB
db kvdb.Backend
dbPath string
}
@ -88,7 +88,7 @@ func OpenTowerDB(dbPath string) (*TowerDB, error) {
// initialized. This allows us to assume their presence throughout all
// operations. If an known top-level bucket is expected to exist but is
// missing, this will trigger a ErrUninitializedDB error.
err = towerDB.db.Update(initTowerDBBuckets)
err = kvdb.Update(towerDB.db, initTowerDBBuckets)
if err != nil {
bdb.Close()
return nil, err
@ -99,7 +99,7 @@ func OpenTowerDB(dbPath string) (*TowerDB, error) {
// initTowerDBBuckets creates all top-level buckets required to handle database
// operations required by the latest version.
func initTowerDBBuckets(tx *bbolt.Tx) error {
func initTowerDBBuckets(tx kvdb.RwTx) error {
buckets := [][]byte{
sessionsBkt,
updateIndexBkt,
@ -108,7 +108,7 @@ func initTowerDBBuckets(tx *bbolt.Tx) error {
}
for _, bucket := range buckets {
_, err := tx.CreateBucketIfNotExists(bucket)
_, err := tx.CreateTopLevelBucket(bucket)
if err != nil {
return err
}
@ -120,7 +120,7 @@ func initTowerDBBuckets(tx *bbolt.Tx) error {
// bdb returns the backing bbolt.DB instance.
//
// NOTE: Part of the versionedDB interface.
func (t *TowerDB) bdb() *bbolt.DB {
func (t *TowerDB) bdb() kvdb.Backend {
return t.db
}
@ -129,7 +129,7 @@ func (t *TowerDB) bdb() *bbolt.DB {
// NOTE: Part of the versionedDB interface.
func (t *TowerDB) Version() (uint32, error) {
var version uint32
err := t.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(t.db, func(tx kvdb.ReadTx) error {
var err error
version, err = getDBVersion(tx)
return err
@ -150,8 +150,8 @@ func (t *TowerDB) Close() error {
// returned if the session could not be found.
func (t *TowerDB) GetSessionInfo(id *SessionID) (*SessionInfo, error) {
var session *SessionInfo
err := t.db.View(func(tx *bbolt.Tx) error {
sessions := tx.Bucket(sessionsBkt)
err := kvdb.View(t.db, func(tx kvdb.ReadTx) error {
sessions := tx.ReadBucket(sessionsBkt)
if sessions == nil {
return ErrUninitializedDB
}
@ -170,13 +170,13 @@ func (t *TowerDB) GetSessionInfo(id *SessionID) (*SessionInfo, error) {
// InsertSessionInfo records a negotiated session in the tower database. An
// error is returned if the session already exists.
func (t *TowerDB) InsertSessionInfo(session *SessionInfo) error {
return t.db.Update(func(tx *bbolt.Tx) error {
sessions := tx.Bucket(sessionsBkt)
return kvdb.Update(t.db, func(tx kvdb.RwTx) error {
sessions := tx.ReadWriteBucket(sessionsBkt)
if sessions == nil {
return ErrUninitializedDB
}
updateIndex := tx.Bucket(updateIndexBkt)
updateIndex := tx.ReadWriteBucket(updateIndexBkt)
if updateIndex == nil {
return ErrUninitializedDB
}
@ -219,18 +219,18 @@ func (t *TowerDB) InsertSessionInfo(session *SessionInfo) error {
// properly and the last applied values echoed by the client are sane.
func (t *TowerDB) InsertStateUpdate(update *SessionStateUpdate) (uint16, error) {
var lastApplied uint16
err := t.db.Update(func(tx *bbolt.Tx) error {
sessions := tx.Bucket(sessionsBkt)
err := kvdb.Update(t.db, func(tx kvdb.RwTx) error {
sessions := tx.ReadWriteBucket(sessionsBkt)
if sessions == nil {
return ErrUninitializedDB
}
updates := tx.Bucket(updatesBkt)
updates := tx.ReadWriteBucket(updatesBkt)
if updates == nil {
return ErrUninitializedDB
}
updateIndex := tx.Bucket(updateIndexBkt)
updateIndex := tx.ReadWriteBucket(updateIndexBkt)
if updateIndex == nil {
return ErrUninitializedDB
}
@ -303,18 +303,18 @@ func (t *TowerDB) InsertStateUpdate(update *SessionStateUpdate) (uint16, error)
// DeleteSession removes all data associated with a particular session id from
// the tower's database.
func (t *TowerDB) DeleteSession(target SessionID) error {
return t.db.Update(func(tx *bbolt.Tx) error {
sessions := tx.Bucket(sessionsBkt)
return kvdb.Update(t.db, func(tx kvdb.RwTx) error {
sessions := tx.ReadWriteBucket(sessionsBkt)
if sessions == nil {
return ErrUninitializedDB
}
updates := tx.Bucket(updatesBkt)
updates := tx.ReadWriteBucket(updatesBkt)
if updates == nil {
return ErrUninitializedDB
}
updateIndex := tx.Bucket(updateIndexBkt)
updateIndex := tx.ReadWriteBucket(updateIndexBkt)
if updateIndex == nil {
return ErrUninitializedDB
}
@ -341,7 +341,7 @@ func (t *TowerDB) DeleteSession(target SessionID) error {
for _, hint := range hints {
// Remove the state updates for any blobs stored under
// the target session identifier.
updatesForHint := updates.Bucket(hint[:])
updatesForHint := updates.NestedReadWriteBucket(hint[:])
if updatesForHint == nil {
continue
}
@ -371,7 +371,7 @@ func (t *TowerDB) DeleteSession(target SessionID) error {
// No more updates for this hint, prune hint bucket.
default:
err = updates.DeleteBucket(hint[:])
err = updates.DeleteNestedBucket(hint[:])
if err != nil {
return err
}
@ -389,13 +389,13 @@ func (t *TowerDB) DeleteSession(target SessionID) error {
// they exist in the database.
func (t *TowerDB) QueryMatches(breachHints []blob.BreachHint) ([]Match, error) {
var matches []Match
err := t.db.View(func(tx *bbolt.Tx) error {
sessions := tx.Bucket(sessionsBkt)
err := kvdb.View(t.db, func(tx kvdb.ReadTx) error {
sessions := tx.ReadBucket(sessionsBkt)
if sessions == nil {
return ErrUninitializedDB
}
updates := tx.Bucket(updatesBkt)
updates := tx.ReadBucket(updatesBkt)
if updates == nil {
return ErrUninitializedDB
}
@ -405,7 +405,7 @@ func (t *TowerDB) QueryMatches(breachHints []blob.BreachHint) ([]Match, error) {
for _, hint := range breachHints {
// If a bucket does not exist for this hint, no matches
// are known.
updatesForHint := updates.Bucket(hint[:])
updatesForHint := updates.NestedReadBucket(hint[:])
if updatesForHint == nil {
continue
}
@ -471,8 +471,8 @@ func (t *TowerDB) QueryMatches(breachHints []blob.BreachHint) ([]Match, error) {
// SetLookoutTip stores the provided epoch as the latest lookout tip epoch in
// the tower database.
func (t *TowerDB) SetLookoutTip(epoch *chainntnfs.BlockEpoch) error {
return t.db.Update(func(tx *bbolt.Tx) error {
lookoutTip := tx.Bucket(lookoutTipBkt)
return kvdb.Update(t.db, func(tx kvdb.RwTx) error {
lookoutTip := tx.ReadWriteBucket(lookoutTipBkt)
if lookoutTip == nil {
return ErrUninitializedDB
}
@ -485,8 +485,8 @@ func (t *TowerDB) SetLookoutTip(epoch *chainntnfs.BlockEpoch) error {
// database.
func (t *TowerDB) GetLookoutTip() (*chainntnfs.BlockEpoch, error) {
var epoch *chainntnfs.BlockEpoch
err := t.db.View(func(tx *bbolt.Tx) error {
lookoutTip := tx.Bucket(lookoutTipBkt)
err := kvdb.View(t.db, func(tx kvdb.ReadTx) error {
lookoutTip := tx.ReadBucket(lookoutTipBkt)
if lookoutTip == nil {
return ErrUninitializedDB
}
@ -505,7 +505,7 @@ func (t *TowerDB) GetLookoutTip() (*chainntnfs.BlockEpoch, error) {
// getSession retrieves the session info from the sessions bucket identified by
// its session id. An error is returned if the session is not found or a
// deserialization error occurs.
func getSession(sessions *bbolt.Bucket, id []byte) (*SessionInfo, error) {
func getSession(sessions kvdb.ReadBucket, id []byte) (*SessionInfo, error) {
sessionBytes := sessions.Get(id)
if sessionBytes == nil {
return nil, ErrSessionNotFound
@ -522,7 +522,7 @@ func getSession(sessions *bbolt.Bucket, id []byte) (*SessionInfo, error) {
// putSession stores the session info in the sessions bucket identified by its
// session id. An error is returned if a serialization error occurs.
func putSession(sessions *bbolt.Bucket, session *SessionInfo) error {
func putSession(sessions kvdb.RwBucket, session *SessionInfo) error {
var b bytes.Buffer
err := session.Encode(&b)
if err != nil {
@ -536,7 +536,7 @@ func putSession(sessions *bbolt.Bucket, session *SessionInfo) error {
// session id. This ensures that future calls to getHintsForSession or
// putHintForSession can rely on the bucket already being created, and fail if
// index has not been initialized as this points to improper usage.
func touchSessionHintBkt(updateIndex *bbolt.Bucket, id *SessionID) error {
func touchSessionHintBkt(updateIndex kvdb.RwBucket, id *SessionID) error {
_, err := updateIndex.CreateBucketIfNotExists(id[:])
return err
}
@ -544,17 +544,17 @@ func touchSessionHintBkt(updateIndex *bbolt.Bucket, id *SessionID) error {
// removeSessionHintBkt prunes the session-hint bucket for the given session id
// and all of the hints contained inside. This should be used to clean up the
// index upon session deletion.
func removeSessionHintBkt(updateIndex *bbolt.Bucket, id *SessionID) error {
return updateIndex.DeleteBucket(id[:])
func removeSessionHintBkt(updateIndex kvdb.RwBucket, id *SessionID) error {
return updateIndex.DeleteNestedBucket(id[:])
}
// getHintsForSession returns all known hints belonging to the given session id.
// If the index for the session has not been initialized, this method returns
// ErrNoSessionHintIndex.
func getHintsForSession(updateIndex *bbolt.Bucket,
func getHintsForSession(updateIndex kvdb.ReadBucket,
id *SessionID) ([]blob.BreachHint, error) {
sessionHints := updateIndex.Bucket(id[:])
sessionHints := updateIndex.NestedReadBucket(id[:])
if sessionHints == nil {
return nil, ErrNoSessionHintIndex
}
@ -582,10 +582,10 @@ func getHintsForSession(updateIndex *bbolt.Bucket,
// session id, and used to perform efficient removal of updates. If the index
// for the session has not been initialized, this method returns
// ErrNoSessionHintIndex.
func putHintForSession(updateIndex *bbolt.Bucket, id *SessionID,
func putHintForSession(updateIndex kvdb.RwBucket, id *SessionID,
hint blob.BreachHint) error {
sessionHints := updateIndex.Bucket(id[:])
sessionHints := updateIndex.NestedReadWriteBucket(id[:])
if sessionHints == nil {
return ErrNoSessionHintIndex
}
@ -594,7 +594,7 @@ func putHintForSession(updateIndex *bbolt.Bucket, id *SessionID,
}
// putLookoutEpoch stores the given lookout tip block epoch in provided bucket.
func putLookoutEpoch(bkt *bbolt.Bucket, epoch *chainntnfs.BlockEpoch) error {
func putLookoutEpoch(bkt kvdb.RwBucket, epoch *chainntnfs.BlockEpoch) error {
epochBytes := make([]byte, 36)
copy(epochBytes, epoch.Hash[:])
byteOrder.PutUint32(epochBytes[32:], uint32(epoch.Height))
@ -604,7 +604,7 @@ func putLookoutEpoch(bkt *bbolt.Bucket, epoch *chainntnfs.BlockEpoch) error {
// getLookoutEpoch retrieves the lookout tip block epoch from the given bucket.
// A nil epoch is returned if no update exists.
func getLookoutEpoch(bkt *bbolt.Bucket) *chainntnfs.BlockEpoch {
func getLookoutEpoch(bkt kvdb.ReadBucket) *chainntnfs.BlockEpoch {
epochBytes := bkt.Get(lookoutTipKey)
if len(epochBytes) != 36 {
return nil
@ -625,7 +625,7 @@ func getLookoutEpoch(bkt *bbolt.Bucket) *chainntnfs.BlockEpoch {
var errBucketNotEmpty = errors.New("bucket not empty")
// isBucketEmpty returns errBucketNotEmpty if the bucket is not empty.
func isBucketEmpty(bkt *bbolt.Bucket) error {
func isBucketEmpty(bkt kvdb.ReadBucket) error {
return bkt.ForEach(func(_, _ []byte) error {
return errBucketNotEmpty
})

@ -1,14 +1,14 @@
package wtdb
import (
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// migration is a function which takes a prior outdated version of the database
// instances and mutates the key/bucket structure to arrive at a more
// up-to-date version of the database.
type migration func(tx *bbolt.Tx) error
type migration func(tx kvdb.RwTx) error
// version pairs a version number with the migration that would need to be
// applied from the prior version to upgrade.
@ -46,8 +46,8 @@ func getMigrations(versions []version, curVersion uint32) []version {
// getDBVersion retrieves the current database version from the metadata bucket
// using the dbVersionKey.
func getDBVersion(tx *bbolt.Tx) (uint32, error) {
metadata := tx.Bucket(metadataBkt)
func getDBVersion(tx kvdb.ReadTx) (uint32, error) {
metadata := tx.ReadBucket(metadataBkt)
if metadata == nil {
return 0, ErrUninitializedDB
}
@ -62,8 +62,8 @@ func getDBVersion(tx *bbolt.Tx) (uint32, error) {
// initDBVersion initializes the top-level metadata bucket and writes the passed
// version number as the current version.
func initDBVersion(tx *bbolt.Tx, version uint32) error {
_, err := tx.CreateBucketIfNotExists(metadataBkt)
func initDBVersion(tx kvdb.RwTx, version uint32) error {
_, err := tx.CreateTopLevelBucket(metadataBkt)
if err != nil {
return err
}
@ -73,8 +73,8 @@ func initDBVersion(tx *bbolt.Tx, version uint32) error {
// putDBVersion stores the passed database version in the metadata bucket under
// the dbVersionKey.
func putDBVersion(tx *bbolt.Tx, version uint32) error {
metadata := tx.Bucket(metadataBkt)
func putDBVersion(tx kvdb.RwTx, version uint32) error {
metadata := tx.ReadWriteBucket(metadataBkt)
if metadata == nil {
return ErrUninitializedDB
}
@ -89,7 +89,7 @@ func putDBVersion(tx *bbolt.Tx, version uint32) error {
// on either.
type versionedDB interface {
// bdb returns the underlying bbolt database.
bdb() *bbolt.DB
bdb() kvdb.Backend
// Version returns the current version stored in the database.
Version() (uint32, error)
@ -105,7 +105,7 @@ func initOrSyncVersions(db versionedDB, init bool, versions []version) error {
// If the database has not yet been created, we'll initialize the
// database version with the latest known version.
if init {
return db.bdb().Update(func(tx *bbolt.Tx) error {
return kvdb.Update(db.bdb(), func(tx kvdb.RwTx) error {
return initDBVersion(tx, getLatestDBVersion(versions))
})
}
@ -141,7 +141,7 @@ func syncVersions(db versionedDB, versions []version) error {
// Otherwise, apply any migrations in order to bring the database
// version up to the highest known version.
updates := getMigrations(versions, curVersion)
return db.bdb().Update(func(tx *bbolt.Tx) error {
return kvdb.Update(db.bdb(), func(tx kvdb.RwTx) error {
for i, update := range updates {
if update.migration == nil {
continue