channeldb: convert to uniformly use new kvdb abstractions

In this commit, we migrate all the code in `channeldb` to only reference
the new `kvdb` package rather than `bbolt` directly.

In many instances, we need to add two version to fetch a bucket as both
read and write when needed. As an example, we add a new
`fetchChanBucketRw` function. This function is identical to
`fetchChanBucket`, but it will be used to fetch the main channel bucket
for all _write_ transactions. We need a new method as you can pass a
write transaction where a read is accepted, but not the other way around
due to the stronger typing of the new `kvdb` package.
This commit is contained in:
Olaoluwa Osuntokun 2019-12-12 18:22:19 -08:00
parent fc808ac538
commit f0911765af
No known key found for this signature in database
GPG Key ID: BC13F65E2DC84465
36 changed files with 804 additions and 752 deletions

View File

@ -15,7 +15,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire"
@ -692,7 +692,7 @@ func (c *OpenChannel) RefreshShortChanID() error {
c.Lock()
defer c.Unlock()
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -718,27 +718,30 @@ func (c *OpenChannel) RefreshShortChanID() error {
// fetchChanBucket is a helper function that returns the bucket where a
// channel's data resides in given: the public key for the node, the outpoint,
// and the chainhash that the channel resides on.
func fetchChanBucket(tx *bbolt.Tx, nodeKey *btcec.PublicKey,
outPoint *wire.OutPoint, chainHash chainhash.Hash) (*bbolt.Bucket, error) {
func fetchChanBucket(tx kvdb.ReadTx, nodeKey *btcec.PublicKey,
outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.ReadBucket, error) {
// First fetch the top level bucket which stores all data related to
// current, active channels.
openChanBucket := tx.Bucket(openChannelBucket)
openChanBucket := tx.ReadBucket(openChannelBucket)
if openChanBucket == nil {
return nil, ErrNoChanDBExists
}
// TODO(roasbeef): CreateTopLevelBucket on the interface isn't like
// CreateIfNotExists, will return error
// Within this top level bucket, fetch the bucket dedicated to storing
// open channel data specific to the remote node.
nodePub := nodeKey.SerializeCompressed()
nodeChanBucket := openChanBucket.Bucket(nodePub)
nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
if nodeChanBucket == nil {
return nil, ErrNoActiveChannels
}
// We'll then recurse down an additional layer in order to fetch the
// bucket for this particular chain.
chainBucket := nodeChanBucket.Bucket(chainHash[:])
chainBucket := nodeChanBucket.NestedReadBucket(chainHash[:])
if chainBucket == nil {
return nil, ErrNoActiveChannels
}
@ -749,7 +752,7 @@ func fetchChanBucket(tx *bbolt.Tx, nodeKey *btcec.PublicKey,
if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
return nil, err
}
chanBucket := chainBucket.Bucket(chanPointBuf.Bytes())
chanBucket := chainBucket.NestedReadBucket(chanPointBuf.Bytes())
if chanBucket == nil {
return nil, ErrChannelNotFound
}
@ -757,12 +760,27 @@ func fetchChanBucket(tx *bbolt.Tx, nodeKey *btcec.PublicKey,
return chanBucket, nil
}
// fetchChanBucketRw is a helper function that returns the bucket where a
// channel's data resides in given: the public key for the node, the outpoint,
// and the chainhash that the channel resides on. This differs from
// fetchChanBucket in that it returns a writeable bucket.
func fetchChanBucketRw(tx kvdb.RwTx, nodeKey *btcec.PublicKey,
outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RwBucket, error) {
readBucket, err := fetchChanBucket(tx, nodeKey, outPoint, chainHash)
if err != nil {
return nil, err
}
return readBucket.(kvdb.RwBucket), nil
}
// fullSync syncs the contents of an OpenChannel while re-using an existing
// database transaction.
func (c *OpenChannel) fullSync(tx *bbolt.Tx) error {
func (c *OpenChannel) fullSync(tx kvdb.RwTx) error {
// First fetch the top level bucket which stores all data related to
// current, active channels.
openChanBucket, err := tx.CreateBucketIfNotExists(openChannelBucket)
openChanBucket, err := tx.CreateTopLevelBucket(openChannelBucket)
if err != nil {
return err
}
@ -792,7 +810,7 @@ func (c *OpenChannel) fullSync(tx *bbolt.Tx) error {
chanPointBuf.Bytes(),
)
switch {
case err == bbolt.ErrBucketExists:
case err == kvdb.ErrBucketExists:
// If this channel already exists, then in order to avoid
// overriding it, we'll return an error back up to the caller.
return ErrChanAlreadyExists
@ -809,7 +827,7 @@ func (c *OpenChannel) MarkAsOpen(openLoc lnwire.ShortChannelID) error {
c.Lock()
defer c.Unlock()
if err := c.Db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -825,7 +843,7 @@ func (c *OpenChannel) MarkAsOpen(openLoc lnwire.ShortChannelID) error {
channel.IsPending = false
channel.ShortChannelID = openLoc
return putOpenChannel(chanBucket, channel)
return putOpenChannel(chanBucket.(kvdb.RwBucket), channel)
}); err != nil {
return err
}
@ -849,7 +867,7 @@ func (c *OpenChannel) MarkDataLoss(commitPoint *btcec.PublicKey) error {
return err
}
putCommitPoint := func(chanBucket *bbolt.Bucket) error {
putCommitPoint := func(chanBucket kvdb.RwBucket) error {
return chanBucket.Put(dataLossCommitPointKey, b.Bytes())
}
@ -861,7 +879,7 @@ func (c *OpenChannel) MarkDataLoss(commitPoint *btcec.PublicKey) error {
func (c *OpenChannel) DataLossCommitPoint() (*btcec.PublicKey, error) {
var commitPoint *btcec.PublicKey
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -990,7 +1008,7 @@ func (c *OpenChannel) ChanSyncMsg() (*lnwire.ChannelReestablish, error) {
// active.
//
// NOTE: The primary mutex should already be held before this method is called.
func (c *OpenChannel) isBorked(chanBucket *bbolt.Bucket) (bool, error) {
func (c *OpenChannel) isBorked(chanBucket kvdb.ReadBucket) (bool, error) {
channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
if err != nil {
return false, err
@ -1042,14 +1060,14 @@ func (c *OpenChannel) markBroadcasted(status ChannelStatus, key []byte,
// If a closing tx is provided, we'll generate a closure to write the
// transaction in the appropriate bucket under the given key.
var putClosingTx func(*bbolt.Bucket) error
var putClosingTx func(kvdb.RwBucket) error
if closeTx != nil {
var b bytes.Buffer
if err := WriteElement(&b, closeTx); err != nil {
return err
}
putClosingTx = func(chanBucket *bbolt.Bucket) error {
putClosingTx = func(chanBucket kvdb.RwBucket) error {
return chanBucket.Put(key, b.Bytes())
}
}
@ -1083,7 +1101,7 @@ func (c *OpenChannel) BroadcastedCooperative() (*wire.MsgTx, error) {
func (c *OpenChannel) getClosingTx(key []byte) (*wire.MsgTx, error) {
var closeTx *wire.MsgTx
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -1113,10 +1131,10 @@ func (c *OpenChannel) getClosingTx(key []byte) (*wire.MsgTx, error) {
// list of closures that are given the chanBucket in order to atomically add
// extra information together with the new status.
func (c *OpenChannel) putChanStatus(status ChannelStatus,
fs ...func(*bbolt.Bucket) error) error {
fs ...func(kvdb.RwBucket) error) error {
if err := c.Db.Update(func(tx *bbolt.Tx) error {
chanBucket, err := fetchChanBucket(
if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
if err != nil {
@ -1159,8 +1177,8 @@ func (c *OpenChannel) putChanStatus(status ChannelStatus,
}
func (c *OpenChannel) clearChanStatus(status ChannelStatus) error {
if err := c.Db.Update(func(tx *bbolt.Tx) error {
chanBucket, err := fetchChanBucket(
if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
if err != nil {
@ -1189,7 +1207,7 @@ func (c *OpenChannel) clearChanStatus(status ChannelStatus) error {
// putChannel serializes, and stores the current state of the channel in its
// entirety.
func putOpenChannel(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func putOpenChannel(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
// First, we'll write out all the relatively static fields, that are
// decided upon initial channel creation.
if err := putChanInfo(chanBucket, channel); err != nil {
@ -1213,7 +1231,7 @@ func putOpenChannel(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
// fetchOpenChannel retrieves, and deserializes (including decrypting
// sensitive) the complete channel currently active with the passed nodeID.
func fetchOpenChannel(chanBucket *bbolt.Bucket,
func fetchOpenChannel(chanBucket kvdb.ReadBucket,
chanPoint *wire.OutPoint) (*OpenChannel, error) {
channel := &OpenChannel{
@ -1260,20 +1278,20 @@ func (c *OpenChannel) SyncPending(addr net.Addr, pendingHeight uint32) error {
c.FundingBroadcastHeight = pendingHeight
return c.Db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return syncNewChannel(tx, c, []net.Addr{addr})
})
}
// syncNewChannel will write the passed channel to disk, and also create a
// LinkNode (if needed) for the channel peer.
func syncNewChannel(tx *bbolt.Tx, c *OpenChannel, addrs []net.Addr) error {
func syncNewChannel(tx kvdb.RwTx, c *OpenChannel, addrs []net.Addr) error {
// First, sync all the persistent channel state to disk.
if err := c.fullSync(tx); err != nil {
return err
}
nodeInfoBucket, err := tx.CreateBucketIfNotExists(nodeInfoBucket)
nodeInfoBucket, err := tx.CreateTopLevelBucket(nodeInfoBucket)
if err != nil {
return err
}
@ -1316,8 +1334,8 @@ func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment,
return ErrNoRestoredChannelMutation
}
err := c.Db.Update(func(tx *bbolt.Tx) error {
chanBucket, err := fetchChanBucket(
err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
if err != nil {
@ -1786,10 +1804,10 @@ func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error {
return ErrNoRestoredChannelMutation
}
return c.Db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
// First, we'll grab the writable bucket where this channel's
// data resides.
chanBucket, err := fetchChanBucket(
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
if err != nil {
@ -1854,7 +1872,7 @@ func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error {
// these pointers, causing the tip and the tail to point to the same entry.
func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, error) {
var cd *CommitDiff
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -1891,7 +1909,7 @@ func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, error) {
// updates that still need to be signed for.
func (c *OpenChannel) UnsignedAckedUpdates() ([]LogUpdate, error) {
var updates []LogUpdate
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -1932,8 +1950,8 @@ func (c *OpenChannel) InsertNextRevocation(revKey *btcec.PublicKey) error {
c.RemoteNextRevocation = revKey
err := c.Db.Update(func(tx *bbolt.Tx) error {
chanBucket, err := fetchChanBucket(
err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
if err != nil {
@ -1969,8 +1987,8 @@ func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg) error {
var newRemoteCommit *ChannelCommitment
err := c.Db.Update(func(tx *bbolt.Tx) error {
chanBucket, err := fetchChanBucket(
err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
if err != nil {
@ -2089,7 +2107,7 @@ func (c *OpenChannel) LoadFwdPkgs() ([]*FwdPkg, error) {
defer c.RUnlock()
var fwdPkgs []*FwdPkg
if err := c.Db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
var err error
fwdPkgs, err = c.Packager.LoadFwdPkgs(tx)
return err
@ -2107,7 +2125,7 @@ func (c *OpenChannel) AckAddHtlcs(addRefs ...AddRef) error {
c.Lock()
defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return c.Packager.AckAddHtlcs(tx, addRefs...)
})
}
@ -2120,7 +2138,7 @@ func (c *OpenChannel) AckSettleFails(settleFailRefs ...SettleFailRef) error {
c.Lock()
defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return c.Packager.AckSettleFails(tx, settleFailRefs...)
})
}
@ -2131,7 +2149,7 @@ func (c *OpenChannel) SetFwdFilter(height uint64, fwdFilter *PkgFilter) error {
c.Lock()
defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return c.Packager.SetFwdFilter(tx, height, fwdFilter)
})
}
@ -2144,7 +2162,7 @@ func (c *OpenChannel) RemoveFwdPkg(height uint64) error {
c.Lock()
defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return c.Packager.RemovePkg(tx, height)
})
}
@ -2165,7 +2183,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
}
var commit ChannelCommitment
if err := c.Db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -2173,7 +2191,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
return err
}
logBucket := chanBucket.Bucket(revocationLogBucket)
logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
if logBucket == nil {
return ErrNoPastDeltas
}
@ -2182,7 +2200,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
// this channel, we'll jump to the _last_ key in bucket. As we
// store the update number on disk in a big-endian format,
// this will retrieve the latest entry.
cursor := logBucket.Cursor()
cursor := logBucket.ReadCursor()
_, tailLogEntry := cursor.Last()
logEntryReader := bytes.NewReader(tailLogEntry)
@ -2212,7 +2230,7 @@ func (c *OpenChannel) CommitmentHeight() (uint64, error) {
defer c.RUnlock()
var height uint64
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
// Get the bucket dedicated to storing the metadata for open
// channels.
chanBucket, err := fetchChanBucket(
@ -2247,7 +2265,7 @@ func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, e
defer c.RUnlock()
var commit ChannelCommitment
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -2255,7 +2273,7 @@ func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, e
return err
}
logBucket := chanBucket.Bucket(revocationLogBucket)
logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
if logBucket == nil {
return ErrNoPastDeltas
}
@ -2405,19 +2423,19 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
c.Lock()
defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error {
openChanBucket := tx.Bucket(openChannelBucket)
return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
openChanBucket := tx.ReadWriteBucket(openChannelBucket)
if openChanBucket == nil {
return ErrNoChanDBExists
}
nodePub := c.IdentityPub.SerializeCompressed()
nodeChanBucket := openChanBucket.Bucket(nodePub)
nodeChanBucket := openChanBucket.NestedReadWriteBucket(nodePub)
if nodeChanBucket == nil {
return ErrNoActiveChannels
}
chainBucket := nodeChanBucket.Bucket(c.ChainHash[:])
chainBucket := nodeChanBucket.NestedReadWriteBucket(c.ChainHash[:])
if chainBucket == nil {
return ErrNoActiveChannels
}
@ -2428,7 +2446,9 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
return err
}
chanKey := chanPointBuf.Bytes()
chanBucket := chainBucket.Bucket(chanKey)
chanBucket := chainBucket.NestedReadWriteBucket(
chanKey,
)
if chanBucket == nil {
return ErrNoActiveChannels
}
@ -2452,21 +2472,21 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
// With the base channel data deleted, attempt to delete the
// information stored within the revocation log.
logBucket := chanBucket.Bucket(revocationLogBucket)
logBucket := chanBucket.NestedReadWriteBucket(revocationLogBucket)
if logBucket != nil {
err = chanBucket.DeleteBucket(revocationLogBucket)
err = chanBucket.DeleteNestedBucket(revocationLogBucket)
if err != nil {
return err
}
}
err = chainBucket.DeleteBucket(chanPointBuf.Bytes())
err = chainBucket.DeleteNestedBucket(chanPointBuf.Bytes())
if err != nil {
return err
}
// Add channel state to the historical channel bucket.
historicalBucket, err := tx.CreateBucketIfNotExists(
historicalBucket, err := tx.CreateTopLevelBucket(
historicalChannelBucket,
)
if err != nil {
@ -2570,7 +2590,7 @@ func (c *OpenChannel) Snapshot() *ChannelSnapshot {
// latest fully committed state is returned. The first commitment returned is
// the local commitment, and the second returned is the remote commitment.
func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitment, error) {
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -2592,7 +2612,7 @@ func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitmen
// acting on a possible contract breach to ensure, that the caller has the most
// up to date information required to deliver justice.
func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, error) {
err := c.Db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
@ -2609,10 +2629,10 @@ func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, error) {
return c.RevocationStore, nil
}
func putChannelCloseSummary(tx *bbolt.Tx, chanID []byte,
func putChannelCloseSummary(tx kvdb.RwTx, chanID []byte,
summary *ChannelCloseSummary, lastChanState *OpenChannel) error {
closedChanBucket, err := tx.CreateBucketIfNotExists(closedChannelBucket)
closedChanBucket, err := tx.CreateTopLevelBucket(closedChannelBucket)
if err != nil {
return err
}
@ -2788,7 +2808,7 @@ func fundingTxPresent(channel *OpenChannel) bool {
!channel.hasChanStatus(ChanStatusRestored)
}
func putChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func putChanInfo(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
var w bytes.Buffer
if err := WriteElements(&w,
channel.ChanType, channel.ChainHash, channel.FundingOutpoint,
@ -2835,7 +2855,7 @@ func putChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
// putOptionalUpfrontShutdownScript adds a shutdown script under the key
// provided if it has a non-zero length.
func putOptionalUpfrontShutdownScript(chanBucket *bbolt.Bucket, key []byte,
func putOptionalUpfrontShutdownScript(chanBucket kvdb.RwBucket, key []byte,
script []byte) error {
// If the script is empty, we do not need to add anything.
if len(script) == 0 {
@ -2853,7 +2873,7 @@ func putOptionalUpfrontShutdownScript(chanBucket *bbolt.Bucket, key []byte,
// getOptionalUpfrontShutdownScript reads the shutdown script stored under the
// key provided if it is present. Upfront shutdown scripts are optional, so the
// function returns with no error if the key is not present.
func getOptionalUpfrontShutdownScript(chanBucket *bbolt.Bucket, key []byte,
func getOptionalUpfrontShutdownScript(chanBucket kvdb.ReadBucket, key []byte,
script *lnwire.DeliveryAddress) error {
// Return early if the bucket does not exit, a shutdown script was not set.
@ -2885,7 +2905,7 @@ func serializeChanCommit(w io.Writer, c *ChannelCommitment) error {
return SerializeHtlcs(w, c.Htlcs...)
}
func putChanCommitment(chanBucket *bbolt.Bucket, c *ChannelCommitment,
func putChanCommitment(chanBucket kvdb.RwBucket, c *ChannelCommitment,
local bool) error {
var commitKey []byte
@ -2903,7 +2923,7 @@ func putChanCommitment(chanBucket *bbolt.Bucket, c *ChannelCommitment,
return chanBucket.Put(commitKey, b.Bytes())
}
func putChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func putChanCommitments(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
// If this is a restored channel, then we don't have any commitments to
// write.
if channel.hasChanStatus(ChanStatusRestored) {
@ -2922,7 +2942,7 @@ func putChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
)
}
func putChanRevocationState(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func putChanRevocationState(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
var b bytes.Buffer
err := WriteElements(
@ -2957,7 +2977,7 @@ func readChanConfig(b io.Reader, c *ChannelConfig) error {
)
}
func fetchChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func fetchChanInfo(chanBucket kvdb.ReadBucket, channel *OpenChannel) error {
infoBytes := chanBucket.Get(chanInfoKey)
if infoBytes == nil {
return ErrNoChanInfoFound
@ -3024,7 +3044,7 @@ func deserializeChanCommit(r io.Reader) (ChannelCommitment, error) {
return c, nil
}
func fetchChanCommitment(chanBucket *bbolt.Bucket, local bool) (ChannelCommitment, error) {
func fetchChanCommitment(chanBucket kvdb.ReadBucket, local bool) (ChannelCommitment, error) {
var commitKey []byte
if local {
commitKey = append(chanCommitmentKey, byte(0x00))
@ -3041,7 +3061,7 @@ func fetchChanCommitment(chanBucket *bbolt.Bucket, local bool) (ChannelCommitmen
return deserializeChanCommit(r)
}
func fetchChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func fetchChanCommitments(chanBucket kvdb.ReadBucket, channel *OpenChannel) error {
var err error
// If this is a restored channel, then we don't have any commitments to
@ -3062,7 +3082,7 @@ func fetchChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error
return nil
}
func fetchChanRevocationState(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
func fetchChanRevocationState(chanBucket kvdb.ReadBucket, channel *OpenChannel) error {
revBytes := chanBucket.Get(revocationStateKey)
if revBytes == nil {
return ErrNoRevocationsFound
@ -3088,7 +3108,7 @@ func fetchChanRevocationState(chanBucket *bbolt.Bucket, channel *OpenChannel) er
return ReadElements(r, &channel.RemoteNextRevocation)
}
func deleteOpenChannel(chanBucket *bbolt.Bucket, chanPointBytes []byte) error {
func deleteOpenChannel(chanBucket kvdb.RwBucket, chanPointBytes []byte) error {
if err := chanBucket.Delete(chanInfoKey); err != nil {
return err
@ -3122,7 +3142,7 @@ func makeLogKey(updateNum uint64) [8]byte {
return key
}
func appendChannelLogEntry(log *bbolt.Bucket,
func appendChannelLogEntry(log kvdb.RwBucket,
commit *ChannelCommitment) error {
var b bytes.Buffer
@ -3134,7 +3154,7 @@ func appendChannelLogEntry(log *bbolt.Bucket,
return log.Put(logEntrykey[:], b.Bytes())
}
func fetchChannelLogEntry(log *bbolt.Bucket,
func fetchChannelLogEntry(log kvdb.ReadBucket,
updateNum uint64) (ChannelCommitment, error) {
logEntrykey := makeLogKey(updateNum)

View File

@ -11,7 +11,6 @@ import (
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/channeldb/migration12"
@ -142,7 +141,7 @@ var (
// information related to nodes, routing data, open/closed channels, fee
// schedules, and reputation data.
type DB struct {
*bbolt.DB
kvdb.Backend
dbPath string
graph *ChannelGraph
clock clock.Clock
@ -166,20 +165,15 @@ func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) {
// Specify bbolt freelist options to reduce heap pressure in case the
// freelist grows to be very large.
options := &bbolt.Options{
NoFreelistSync: opts.NoFreelistSync,
FreelistType: bbolt.FreelistMapType,
}
bdb, err := bbolt.Open(path, dbFilePermission, options)
bdb, err := kvdb.Open(kvdb.BoltBackendName, path, opts.NoFreelistSync)
if err != nil {
return nil, err
}
chanDB := &DB{
DB: bdb,
dbPath: dbPath,
clock: opts.clock,
Backend: bdb,
dbPath: dbPath,
clock: opts.clock,
}
chanDB.graph = newChannelGraph(
chanDB, opts.RejectCacheSize, opts.ChannelCacheSize,
@ -203,41 +197,41 @@ func (d *DB) Path() string {
// database. The deletion is done in a single transaction, therefore this
// operation is fully atomic.
func (d *DB) Wipe() error {
return d.Update(func(tx *bbolt.Tx) error {
err := tx.DeleteBucket(openChannelBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
return kvdb.Update(d, func(tx kvdb.RwTx) error {
err := tx.DeleteTopLevelBucket(openChannelBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(closedChannelBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(closedChannelBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(invoiceBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(invoiceBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(nodeInfoBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(nodeInfoBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(nodeBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(nodeBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(edgeBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(edgeBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(edgeIndexBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(edgeIndexBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
err = tx.DeleteBucket(graphMetaBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(graphMetaBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
@ -257,36 +251,36 @@ func createChannelDB(dbPath string) error {
}
path := filepath.Join(dbPath, dbName)
bdb, err := bbolt.Open(path, dbFilePermission, nil)
bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true)
if err != nil {
return err
}
err = bdb.Update(func(tx *bbolt.Tx) error {
if _, err := tx.CreateBucket(openChannelBucket); err != nil {
err = kvdb.Update(bdb, func(tx kvdb.RwTx) error {
if _, err := tx.CreateTopLevelBucket(openChannelBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(closedChannelBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(closedChannelBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(forwardingLogBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(forwardingLogBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(fwdPackagesKey); err != nil {
if _, err := tx.CreateTopLevelBucket(fwdPackagesKey); err != nil {
return err
}
if _, err := tx.CreateBucket(invoiceBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(invoiceBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(nodeInfoBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(nodeInfoBucket); err != nil {
return err
}
nodes, err := tx.CreateBucket(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -299,7 +293,7 @@ func createChannelDB(dbPath string) error {
return err
}
edges, err := tx.CreateBucket(edgeBucket)
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return err
}
@ -316,7 +310,7 @@ func createChannelDB(dbPath string) error {
return err
}
graphMeta, err := tx.CreateBucket(graphMetaBucket)
graphMeta, err := tx.CreateTopLevelBucket(graphMetaBucket)
if err != nil {
return err
}
@ -325,7 +319,7 @@ func createChannelDB(dbPath string) error {
return err
}
if _, err := tx.CreateBucket(metaBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(metaBucket); err != nil {
return err
}
@ -358,7 +352,7 @@ func fileExists(path string) bool {
// zero-length slice is returned.
func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error) {
var channels []*OpenChannel
err := d.View(func(tx *bbolt.Tx) error {
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
var err error
channels, err = d.fetchOpenChannels(tx, nodeID)
return err
@ -371,11 +365,11 @@ func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error)
// stored currently active/open channels associated with the target nodeID. In
// the case that no active channels are known to have been created with this
// node, then a zero-length slice is returned.
func (d *DB) fetchOpenChannels(tx *bbolt.Tx,
func (d *DB) fetchOpenChannels(tx kvdb.ReadTx,
nodeID *btcec.PublicKey) ([]*OpenChannel, error) {
// Get the bucket dedicated to storing the metadata for open channels.
openChanBucket := tx.Bucket(openChannelBucket)
openChanBucket := tx.ReadBucket(openChannelBucket)
if openChanBucket == nil {
return nil, nil
}
@ -383,7 +377,7 @@ func (d *DB) fetchOpenChannels(tx *bbolt.Tx,
// Within this top level bucket, fetch the bucket dedicated to storing
// open channel data specific to the remote node.
pub := nodeID.SerializeCompressed()
nodeChanBucket := openChanBucket.Bucket(pub)
nodeChanBucket := openChanBucket.NestedReadBucket(pub)
if nodeChanBucket == nil {
return nil, nil
}
@ -399,7 +393,7 @@ func (d *DB) fetchOpenChannels(tx *bbolt.Tx,
// If we've found a valid chainhash bucket, then we'll retrieve
// that so we can extract all the channels.
chainBucket := nodeChanBucket.Bucket(chainHash)
chainBucket := nodeChanBucket.NestedReadBucket(chainHash)
if chainBucket == nil {
return fmt.Errorf("unable to read bucket for chain=%x",
chainHash[:])
@ -424,7 +418,7 @@ func (d *DB) fetchOpenChannels(tx *bbolt.Tx,
// fetchNodeChannels retrieves all active channels from the target chainBucket
// which is under a node's dedicated channel bucket. This function is typically
// used to fetch all the active channels related to a particular node.
func (d *DB) fetchNodeChannels(chainBucket *bbolt.Bucket) ([]*OpenChannel, error) {
func (d *DB) fetchNodeChannels(chainBucket kvdb.ReadBucket) ([]*OpenChannel, error) {
var channels []*OpenChannel
@ -438,7 +432,7 @@ func (d *DB) fetchNodeChannels(chainBucket *bbolt.Bucket) ([]*OpenChannel, error
// Once we've found a valid channel bucket, we'll extract it
// from the node's chain bucket.
chanBucket := chainBucket.Bucket(chanPoint)
chanBucket := chainBucket.NestedReadBucket(chanPoint)
var outPoint wire.OutPoint
err := readOutpoint(bytes.NewReader(chanPoint), &outPoint)
@ -483,10 +477,10 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
// structure and skipping fully decoding each channel, we save a good
// bit of CPU as we don't need to do things like decompress public
// keys.
chanScan := func(tx *bbolt.Tx) error {
chanScan := func(tx kvdb.ReadTx) error {
// Get the bucket dedicated to storing the metadata for open
// channels.
openChanBucket := tx.Bucket(openChannelBucket)
openChanBucket := tx.ReadBucket(openChannelBucket)
if openChanBucket == nil {
return ErrNoActiveChannels
}
@ -501,7 +495,7 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
return nil
}
nodeChanBucket := openChanBucket.Bucket(nodePub)
nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
if nodeChanBucket == nil {
return nil
}
@ -515,7 +509,9 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
return nil
}
chainBucket := nodeChanBucket.Bucket(chainHash)
chainBucket := nodeChanBucket.NestedReadBucket(
chainHash,
)
if chainBucket == nil {
return fmt.Errorf("unable to read "+
"bucket for chain=%x", chainHash[:])
@ -523,7 +519,7 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
// Finally we reach the leaf bucket that stores
// all the chanPoints for this node.
chanBucket := chainBucket.Bucket(
chanBucket := chainBucket.NestedReadBucket(
targetChanPoint.Bytes(),
)
if chanBucket == nil {
@ -545,7 +541,7 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
})
}
err := d.View(chanScan)
err := kvdb.View(d, chanScan)
if err != nil {
return nil, err
}
@ -637,10 +633,10 @@ func waitingCloseFilter(waitingClose bool) fetchChannelsFilter {
func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error) {
var channels []*OpenChannel
err := d.View(func(tx *bbolt.Tx) error {
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
// Get the bucket dedicated to storing the metadata for open
// channels.
openChanBucket := tx.Bucket(openChannelBucket)
openChanBucket := tx.ReadBucket(openChannelBucket)
if openChanBucket == nil {
return ErrNoActiveChannels
}
@ -648,7 +644,7 @@ func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error
// Next, fetch the bucket dedicated to storing metadata related
// to all nodes. All keys within this bucket are the serialized
// public keys of all our direct counterparties.
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
if nodeMetaBucket == nil {
return fmt.Errorf("node bucket not created")
}
@ -656,7 +652,7 @@ func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error
// Finally for each node public key in the bucket, fetch all
// the channels related to this particular node.
return nodeMetaBucket.ForEach(func(k, v []byte) error {
nodeChanBucket := openChanBucket.Bucket(k)
nodeChanBucket := openChanBucket.NestedReadBucket(k)
if nodeChanBucket == nil {
return nil
}
@ -671,7 +667,9 @@ func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error
// If we've found a valid chainhash bucket,
// then we'll retrieve that so we can extract
// all the channels.
chainBucket := nodeChanBucket.Bucket(chainHash)
chainBucket := nodeChanBucket.NestedReadBucket(
chainHash,
)
if chainBucket == nil {
return fmt.Errorf("unable to read "+
"bucket for chain=%x", chainHash[:])
@ -727,8 +725,8 @@ func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error
func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) {
var chanSummaries []*ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error {
closeBucket := tx.Bucket(closedChannelBucket)
if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
closeBucket := tx.ReadBucket(closedChannelBucket)
if closeBucket == nil {
return ErrNoClosedChannels
}
@ -765,8 +763,8 @@ var ErrClosedChannelNotFound = errors.New("unable to find closed channel summary
// point of the channel in question.
func (d *DB) FetchClosedChannel(chanID *wire.OutPoint) (*ChannelCloseSummary, error) {
var chanSummary *ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error {
closeBucket := tx.Bucket(closedChannelBucket)
if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
closeBucket := tx.ReadBucket(closedChannelBucket)
if closeBucket == nil {
return ErrClosedChannelNotFound
}
@ -799,15 +797,15 @@ func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) (
*ChannelCloseSummary, error) {
var chanSummary *ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error {
closeBucket := tx.Bucket(closedChannelBucket)
if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
closeBucket := tx.ReadBucket(closedChannelBucket)
if closeBucket == nil {
return ErrClosedChannelNotFound
}
// The first 30 bytes of the channel ID and outpoint will be
// equal.
cursor := closeBucket.Cursor()
cursor := closeBucket.ReadCursor()
op, c := cursor.Seek(cid[:30])
// We scan over all possible candidates for this channel ID.
@ -847,7 +845,7 @@ func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) (
// the pending funds in a channel that has been forcibly closed have been
// swept.
func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
return d.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(d, func(tx kvdb.RwTx) error {
var b bytes.Buffer
if err := writeOutpoint(&b, chanPoint); err != nil {
return err
@ -855,7 +853,7 @@ func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
chanID := b.Bytes()
closedChanBucket, err := tx.CreateBucketIfNotExists(
closedChanBucket, err := tx.CreateTopLevelBucket(
closedChannelBucket,
)
if err != nil {
@ -900,7 +898,7 @@ func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
// pruneLinkNode determines whether we should garbage collect a link node from
// the database due to no longer having any open channels with it. If there are
// any left, then this acts as a no-op.
func (d *DB) pruneLinkNode(tx *bbolt.Tx, remotePub *btcec.PublicKey) error {
func (d *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) error {
openChannels, err := d.fetchOpenChannels(tx, remotePub)
if err != nil {
return fmt.Errorf("unable to fetch open channels for peer %x: "+
@ -920,7 +918,7 @@ func (d *DB) pruneLinkNode(tx *bbolt.Tx, remotePub *btcec.PublicKey) error {
// PruneLinkNodes attempts to prune all link nodes found within the databse with
// whom we no longer have any open channels with.
func (d *DB) PruneLinkNodes() error {
return d.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(d, func(tx kvdb.RwTx) error {
linkNodes, err := d.fetchAllLinkNodes(tx)
if err != nil {
return err
@ -964,7 +962,7 @@ func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error {
defer chanGraph.cacheMu.Unlock()
var chansRestored []uint64
err := d.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
for _, channelShell := range channelShells {
channel := channelShell.Chan
@ -1001,7 +999,7 @@ func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error {
Capacity: channel.Capacity,
}
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -1075,7 +1073,7 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) {
graphNode LightningNode
)
dbErr := d.View(func(tx *bbolt.Tx) error {
dbErr := kvdb.View(d, func(tx kvdb.ReadTx) error {
var err error
linkNode, err = fetchLinkNode(tx, nodePub)
@ -1086,7 +1084,7 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) {
// We'll also query the graph for this peer to see if they have
// any addresses that we don't currently have stored within the
// link node database.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -1213,7 +1211,7 @@ func (d *DB) syncVersions(versions []version) error {
migrations, migrationVersions := getMigrationsToApply(
versions, meta.DbVersionNumber,
)
return d.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(d, func(tx kvdb.RwTx) error {
for i, migration := range migrations {
if migration == nil {
continue
@ -1261,12 +1259,12 @@ func getMigrationsToApply(versions []version, version uint32) ([]migration, []ui
// fetchHistoricalChanBucket returns a the channel bucket for a given outpoint
// from the historical channel bucket. If the bucket does not exist,
// ErrNoHistoricalBucket is returned.
func fetchHistoricalChanBucket(tx *bbolt.Tx,
outPoint *wire.OutPoint) (*bbolt.Bucket, error) {
func fetchHistoricalChanBucket(tx kvdb.ReadTx,
outPoint *wire.OutPoint) (kvdb.ReadBucket, error) {
// First fetch the top level bucket which stores all data related to
// historically stored channels.
historicalChanBucket := tx.Bucket(historicalChannelBucket)
historicalChanBucket := tx.ReadBucket(historicalChannelBucket)
if historicalChanBucket == nil {
return nil, ErrNoHistoricalBucket
}
@ -1277,7 +1275,7 @@ func fetchHistoricalChanBucket(tx *bbolt.Tx,
if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
return nil, err
}
chanBucket := historicalChanBucket.Bucket(chanPointBuf.Bytes())
chanBucket := historicalChanBucket.NestedReadBucket(chanPointBuf.Bytes())
if chanBucket == nil {
return nil, ErrChannelNotFound
}
@ -1289,7 +1287,7 @@ func fetchHistoricalChanBucket(tx *bbolt.Tx,
// bucket.
func (d *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, error) {
var channel *OpenChannel
err := d.View(func(tx *bbolt.Tx) error {
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchHistoricalChanBucket(tx, outPoint)
if err != nil {
return err

View File

@ -8,7 +8,7 @@ import (
"time"
"github.com/btcsuite/btcd/btcec"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
@ -61,7 +61,7 @@ type duplicateHTLCAttemptInfo struct {
// fetchDuplicatePaymentStatus fetches the payment status of the payment. If the
// payment isn't found, it will default to "StatusUnknown".
func fetchDuplicatePaymentStatus(bucket *bbolt.Bucket) PaymentStatus {
func fetchDuplicatePaymentStatus(bucket kvdb.ReadBucket) PaymentStatus {
if bucket.Get(duplicatePaymentSettleInfoKey) != nil {
return StatusSucceeded
}
@ -129,7 +129,7 @@ func deserializeDuplicatePaymentCreationInfo(r io.Reader) (
return c, nil
}
func fetchDuplicatePayment(bucket *bbolt.Bucket) (*MPPayment, error) {
func fetchDuplicatePayment(bucket kvdb.ReadBucket) (*MPPayment, error) {
seqBytes := bucket.Get(duplicatePaymentSequenceKey)
if seqBytes == nil {
return nil, fmt.Errorf("sequence number not found")
@ -209,7 +209,7 @@ func fetchDuplicatePayment(bucket *bbolt.Bucket) (*MPPayment, error) {
return payment, nil
}
func fetchDuplicatePayments(paymentHashBucket *bbolt.Bucket) ([]*MPPayment,
func fetchDuplicatePayments(paymentHashBucket kvdb.ReadBucket) ([]*MPPayment,
error) {
var payments []*MPPayment
@ -217,13 +217,13 @@ func fetchDuplicatePayments(paymentHashBucket *bbolt.Bucket) ([]*MPPayment,
// For older versions of lnd, duplicate payments to a payment has was
// possible. These will be found in a sub-bucket indexed by their
// sequence number if available.
dup := paymentHashBucket.Bucket(duplicatePaymentsBucket)
dup := paymentHashBucket.NestedReadBucket(duplicatePaymentsBucket)
if dup == nil {
return nil, nil
}
err := dup.ForEach(func(k, v []byte) error {
subBucket := dup.Bucket(k)
subBucket := dup.NestedReadBucket(k)
if subBucket == nil {
// We one bucket for each duplicate to be found.
return fmt.Errorf("non bucket element" +

View File

@ -6,7 +6,7 @@ import (
"sort"
"time"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -111,10 +111,10 @@ func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) error {
var timestamp [8]byte
return f.db.Batch(func(tx *bbolt.Tx) error {
return kvdb.Batch(f.db.Backend, func(tx kvdb.RwTx) error {
// First, we'll fetch the bucket that stores our time series
// log.
logBucket, err := tx.CreateBucketIfNotExists(
logBucket, err := tx.CreateTopLevelBucket(
forwardingLogBucket,
)
if err != nil {
@ -204,10 +204,10 @@ func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, e
recordsToSkip := q.IndexOffset
recordOffset := q.IndexOffset
err := f.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(f.db, func(tx kvdb.ReadTx) error {
// If the bucket wasn't found, then there aren't any events to
// be returned.
logBucket := tx.Bucket(forwardingLogBucket)
logBucket := tx.ReadBucket(forwardingLogBucket)
if logBucket == nil {
return ErrNoForwardingEvents
}
@ -223,7 +223,7 @@ func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, e
// our seek through the log in order to satisfy the query.
// We'll continue until either we reach the end of the range,
// or reach our max number of events.
logCursor := logBucket.Cursor()
logCursor := logBucket.ReadCursor()
timestamp, events := logCursor.Seek(startTime[:])
for ; timestamp != nil && bytes.Compare(timestamp, endTime[:]) <= 0; timestamp, events = logCursor.Next() {
// If our current return payload exceeds the max number

View File

@ -7,7 +7,7 @@ import (
"fmt"
"io"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -318,7 +318,7 @@ type SettleFailRef struct {
type SettleFailAcker interface {
// AckSettleFails atomically updates the settle-fail filters in *other*
// channels' forwarding packages.
AckSettleFails(tx *bbolt.Tx, settleFailRefs ...SettleFailRef) error
AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) error
}
// GlobalFwdPkgReader is an interface used to retrieve the forwarding packages
@ -326,7 +326,7 @@ type SettleFailAcker interface {
type GlobalFwdPkgReader interface {
// LoadChannelFwdPkgs loads all known forwarding packages for the given
// channel.
LoadChannelFwdPkgs(tx *bbolt.Tx,
LoadChannelFwdPkgs(tx kvdb.RwTx,
source lnwire.ShortChannelID) ([]*FwdPkg, error)
}
@ -357,14 +357,14 @@ func NewSwitchPackager() *SwitchPackager {
// AckSettleFails atomically updates the settle-fail filters in *other*
// channels' forwarding packages, to mark that the switch has received a settle
// or fail residing in the forwarding package of a link.
func (*SwitchPackager) AckSettleFails(tx *bbolt.Tx,
func (*SwitchPackager) AckSettleFails(tx kvdb.RwTx,
settleFailRefs ...SettleFailRef) error {
return ackSettleFails(tx, settleFailRefs)
}
// LoadChannelFwdPkgs loads all forwarding packages for a particular channel.
func (*SwitchPackager) LoadChannelFwdPkgs(tx *bbolt.Tx,
func (*SwitchPackager) LoadChannelFwdPkgs(tx kvdb.RwTx,
source lnwire.ShortChannelID) ([]*FwdPkg, error) {
return loadChannelFwdPkgs(tx, source)
@ -376,19 +376,19 @@ func (*SwitchPackager) LoadChannelFwdPkgs(tx *bbolt.Tx,
type FwdPackager interface {
// AddFwdPkg serializes and writes a FwdPkg for this channel at the
// remote commitment height included in the forwarding package.
AddFwdPkg(tx *bbolt.Tx, fwdPkg *FwdPkg) error
AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) error
// SetFwdFilter looks up the forwarding package at the remote `height`
// and sets the `fwdFilter`, marking the Adds for which:
// 1) We are not the exit node
// 2) Passed all validation
// 3) Should be forwarded to the switch immediately after a failure
SetFwdFilter(tx *bbolt.Tx, height uint64, fwdFilter *PkgFilter) error
SetFwdFilter(tx kvdb.RwTx, height uint64, fwdFilter *PkgFilter) error
// AckAddHtlcs atomically updates the add filters in this channel's
// forwarding packages to mark the resolution of an Add that was
// received from the remote party.
AckAddHtlcs(tx *bbolt.Tx, addRefs ...AddRef) error
AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) error
// SettleFailAcker allows a link to acknowledge settle/fail HTLCs
// belonging to other channels.
@ -396,11 +396,11 @@ type FwdPackager interface {
// LoadFwdPkgs loads all known forwarding packages owned by this
// channel.
LoadFwdPkgs(tx *bbolt.Tx) ([]*FwdPkg, error)
LoadFwdPkgs(tx kvdb.ReadTx) ([]*FwdPkg, error)
// RemovePkg deletes a forwarding package owned by this channel at
// the provided remote `height`.
RemovePkg(tx *bbolt.Tx, height uint64) error
RemovePkg(tx kvdb.RwTx, height uint64) error
}
// ChannelPackager is used by a channel to manage the lifecycle of its forwarding
@ -420,8 +420,8 @@ func NewChannelPackager(source lnwire.ShortChannelID) *ChannelPackager {
}
// AddFwdPkg writes a newly locked in forwarding package to disk.
func (*ChannelPackager) AddFwdPkg(tx *bbolt.Tx, fwdPkg *FwdPkg) error {
fwdPkgBkt, err := tx.CreateBucketIfNotExists(fwdPackagesKey)
func (*ChannelPackager) AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) error {
fwdPkgBkt, err := tx.CreateTopLevelBucket(fwdPackagesKey)
if err != nil {
return err
}
@ -485,7 +485,7 @@ func (*ChannelPackager) AddFwdPkg(tx *bbolt.Tx, fwdPkg *FwdPkg) error {
}
// putLogUpdate writes an htlc to the provided `bkt`, using `index` as the key.
func putLogUpdate(bkt *bbolt.Bucket, idx uint16, htlc *LogUpdate) error {
func putLogUpdate(bkt kvdb.RwBucket, idx uint16, htlc *LogUpdate) error {
var b bytes.Buffer
if err := htlc.Encode(&b); err != nil {
return err
@ -497,19 +497,19 @@ func putLogUpdate(bkt *bbolt.Bucket, idx uint16, htlc *LogUpdate) error {
// LoadFwdPkgs scans the forwarding log for any packages that haven't been
// processed, and returns their deserialized log updates in a map indexed by the
// remote commitment height at which the updates were locked in.
func (p *ChannelPackager) LoadFwdPkgs(tx *bbolt.Tx) ([]*FwdPkg, error) {
func (p *ChannelPackager) LoadFwdPkgs(tx kvdb.ReadTx) ([]*FwdPkg, error) {
return loadChannelFwdPkgs(tx, p.source)
}
// loadChannelFwdPkgs loads all forwarding packages owned by `source`.
func loadChannelFwdPkgs(tx *bbolt.Tx, source lnwire.ShortChannelID) ([]*FwdPkg, error) {
fwdPkgBkt := tx.Bucket(fwdPackagesKey)
func loadChannelFwdPkgs(tx kvdb.ReadTx, source lnwire.ShortChannelID) ([]*FwdPkg, error) {
fwdPkgBkt := tx.ReadBucket(fwdPackagesKey)
if fwdPkgBkt == nil {
return nil, nil
}
sourceKey := makeLogKey(source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(sourceKey[:])
sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:])
if sourceBkt == nil {
return nil, nil
}
@ -543,23 +543,23 @@ func loadChannelFwdPkgs(tx *bbolt.Tx, source lnwire.ShortChannelID) ([]*FwdPkg,
// loadFwPkg reads the packager's fwd pkg at a given height, and determines the
// appropriate FwdState.
func loadFwdPkg(fwdPkgBkt *bbolt.Bucket, source lnwire.ShortChannelID,
func loadFwdPkg(fwdPkgBkt kvdb.ReadBucket, source lnwire.ShortChannelID,
height uint64) (*FwdPkg, error) {
sourceKey := makeLogKey(source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(sourceKey[:])
sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:])
if sourceBkt == nil {
return nil, ErrCorruptedFwdPkg
}
heightKey := makeLogKey(height)
heightBkt := sourceBkt.Bucket(heightKey[:])
heightBkt := sourceBkt.NestedReadBucket(heightKey[:])
if heightBkt == nil {
return nil, ErrCorruptedFwdPkg
}
// Load ADDs from disk.
addBkt := heightBkt.Bucket(addBucketKey)
addBkt := heightBkt.NestedReadBucket(addBucketKey)
if addBkt == nil {
return nil, ErrCorruptedFwdPkg
}
@ -582,7 +582,7 @@ func loadFwdPkg(fwdPkgBkt *bbolt.Bucket, source lnwire.ShortChannelID,
}
// Load SETTLE/FAILs from disk.
failSettleBkt := heightBkt.Bucket(failSettleBucketKey)
failSettleBkt := heightBkt.NestedReadBucket(failSettleBucketKey)
if failSettleBkt == nil {
return nil, ErrCorruptedFwdPkg
}
@ -649,7 +649,7 @@ func loadFwdPkg(fwdPkgBkt *bbolt.Bucket, source lnwire.ShortChannelID,
// loadHtlcs retrieves all serialized htlcs in a bucket, returning
// them in order of the indexes they were written under.
func loadHtlcs(bkt *bbolt.Bucket) ([]LogUpdate, error) {
func loadHtlcs(bkt kvdb.ReadBucket) ([]LogUpdate, error) {
var htlcs []LogUpdate
if err := bkt.ForEach(func(_, v []byte) error {
var htlc LogUpdate
@ -674,22 +674,22 @@ func loadHtlcs(bkt *bbolt.Bucket) ([]LogUpdate, error) {
// leaving this channel. After a restart, we skip validation of these Adds,
// since they are assumed to have already been validated, and make the switch or
// outgoing link responsible for handling replays.
func (p *ChannelPackager) SetFwdFilter(tx *bbolt.Tx, height uint64,
func (p *ChannelPackager) SetFwdFilter(tx kvdb.RwTx, height uint64,
fwdFilter *PkgFilter) error {
fwdPkgBkt := tx.Bucket(fwdPackagesKey)
fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
if fwdPkgBkt == nil {
return ErrCorruptedFwdPkg
}
source := makeLogKey(p.source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(source[:])
sourceBkt := fwdPkgBkt.NestedReadWriteBucket(source[:])
if sourceBkt == nil {
return ErrCorruptedFwdPkg
}
heightKey := makeLogKey(height)
heightBkt := sourceBkt.Bucket(heightKey[:])
heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:])
if heightBkt == nil {
return ErrCorruptedFwdPkg
}
@ -713,18 +713,18 @@ func (p *ChannelPackager) SetFwdFilter(tx *bbolt.Tx, height uint64,
// AckAddHtlcs accepts a list of references to add htlcs, and updates the
// AckAddFilter of those forwarding packages to indicate that a settle or fail
// has been received in response to the add.
func (p *ChannelPackager) AckAddHtlcs(tx *bbolt.Tx, addRefs ...AddRef) error {
func (p *ChannelPackager) AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) error {
if len(addRefs) == 0 {
return nil
}
fwdPkgBkt := tx.Bucket(fwdPackagesKey)
fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
if fwdPkgBkt == nil {
return ErrCorruptedFwdPkg
}
sourceKey := makeLogKey(p.source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(sourceKey[:])
sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceKey[:])
if sourceBkt == nil {
return ErrCorruptedFwdPkg
}
@ -753,11 +753,11 @@ func (p *ChannelPackager) AckAddHtlcs(tx *bbolt.Tx, addRefs ...AddRef) error {
// ackAddHtlcsAtHeight updates the AddAckFilter of a single forwarding package
// with a list of indexes, writing the resulting filter back in its place.
func ackAddHtlcsAtHeight(sourceBkt *bbolt.Bucket, height uint64,
func ackAddHtlcsAtHeight(sourceBkt kvdb.RwBucket, height uint64,
indexes []uint16) error {
heightKey := makeLogKey(height)
heightBkt := sourceBkt.Bucket(heightKey[:])
heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:])
if heightBkt == nil {
// If the height bucket isn't found, this could be because the
// forwarding package was already removed. We'll return nil to
@ -796,17 +796,17 @@ func ackAddHtlcsAtHeight(sourceBkt *bbolt.Bucket, height uint64,
// package. This should only be called after the source of the Add has locked in
// the settle/fail, or it becomes otherwise safe to forgo retransmitting the
// settle/fail after a restart.
func (p *ChannelPackager) AckSettleFails(tx *bbolt.Tx, settleFailRefs ...SettleFailRef) error {
func (p *ChannelPackager) AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) error {
return ackSettleFails(tx, settleFailRefs)
}
// ackSettleFails persistently acknowledges a batch of settle fail references.
func ackSettleFails(tx *bbolt.Tx, settleFailRefs []SettleFailRef) error {
func ackSettleFails(tx kvdb.RwTx, settleFailRefs []SettleFailRef) error {
if len(settleFailRefs) == 0 {
return nil
}
fwdPkgBkt := tx.Bucket(fwdPackagesKey)
fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
if fwdPkgBkt == nil {
return ErrCorruptedFwdPkg
}
@ -832,7 +832,7 @@ func ackSettleFails(tx *bbolt.Tx, settleFailRefs []SettleFailRef) error {
// settle/fail htlcs.
for dest, destHeights := range destHeightDiffs {
destKey := makeLogKey(dest.ToUint64())
destBkt := fwdPkgBkt.Bucket(destKey[:])
destBkt := fwdPkgBkt.NestedReadWriteBucket(destKey[:])
if destBkt == nil {
// If the destination bucket is not found, this is
// likely the result of the destination channel being
@ -855,11 +855,11 @@ func ackSettleFails(tx *bbolt.Tx, settleFailRefs []SettleFailRef) error {
// ackSettleFailsAtHeight given a destination bucket, acks the provided indexes
// at particular a height by updating the settle fail filter.
func ackSettleFailsAtHeight(destBkt *bbolt.Bucket, height uint64,
func ackSettleFailsAtHeight(destBkt kvdb.RwBucket, height uint64,
indexes []uint16) error {
heightKey := makeLogKey(height)
heightBkt := destBkt.Bucket(heightKey[:])
heightBkt := destBkt.NestedReadWriteBucket(heightKey[:])
if heightBkt == nil {
// If the height bucket isn't found, this could be because the
// forwarding package was already removed. We'll return nil to
@ -895,21 +895,21 @@ func ackSettleFailsAtHeight(destBkt *bbolt.Bucket, height uint64,
// RemovePkg deletes the forwarding package at the given height from the
// packager's source bucket.
func (p *ChannelPackager) RemovePkg(tx *bbolt.Tx, height uint64) error {
fwdPkgBkt := tx.Bucket(fwdPackagesKey)
func (p *ChannelPackager) RemovePkg(tx kvdb.RwTx, height uint64) error {
fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
if fwdPkgBkt == nil {
return nil
}
sourceBytes := makeLogKey(p.source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(sourceBytes[:])
sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceBytes[:])
if sourceBkt == nil {
return ErrCorruptedFwdPkg
}
heightKey := makeLogKey(height)
return sourceBkt.DeleteBucket(heightKey[:])
return sourceBkt.DeleteNestedBucket(heightKey[:])
}
// uint16Key writes the provided 16-bit unsigned integer to a 2-byte slice.

View File

@ -8,8 +8,8 @@ import (
"testing"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -207,7 +207,7 @@ func TestPackagerEmptyFwdPkg(t *testing.T) {
// Next, create and write a new forwarding package with no htlcs.
fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, nil, nil)
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err)
@ -226,7 +226,7 @@ func TestPackagerEmptyFwdPkg(t *testing.T) {
// Now, write the forwarding decision. In this case, its just an empty
// fwd filter.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err)
@ -244,7 +244,7 @@ func TestPackagerEmptyFwdPkg(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err)
@ -279,7 +279,7 @@ func TestPackagerOnlyAdds(t *testing.T) {
nAdds := len(adds)
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err)
@ -300,7 +300,7 @@ func TestPackagerOnlyAdds(t *testing.T) {
// added any adds to the fwdfilter, this would indicate that all of the
// adds were 1) settled locally by this link (exit hop), or 2) the htlc
// was failed locally.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err)
@ -324,7 +324,7 @@ func TestPackagerOnlyAdds(t *testing.T) {
Index: uint16(i),
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckAddHtlcs(tx, addRef)
}); err != nil {
t.Fatalf("unable to ack add htlc: %v", err)
@ -343,7 +343,7 @@ func TestPackagerOnlyAdds(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err)
@ -381,7 +381,7 @@ func TestPackagerOnlySettleFails(t *testing.T) {
nSettleFails := len(settleFails)
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err)
@ -402,7 +402,7 @@ func TestPackagerOnlySettleFails(t *testing.T) {
// added any adds to the fwdfilter, this would indicate that all of the
// adds were 1) settled locally by this link (exit hop), or 2) the htlc
// was failed locally.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err)
@ -428,7 +428,7 @@ func TestPackagerOnlySettleFails(t *testing.T) {
Index: uint16(i),
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckSettleFails(tx, failSettleRef)
}); err != nil {
t.Fatalf("unable to ack add htlc: %v", err)
@ -448,7 +448,7 @@ func TestPackagerOnlySettleFails(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err)
@ -486,7 +486,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
nAdds := len(adds)
nSettleFails := len(settleFails)
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err)
@ -507,7 +507,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
// added any adds to the fwdfilter, this would indicate that all of the
// adds were 1) settled locally by this link (exit hop), or 2) the htlc
// was failed locally.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err)
@ -532,7 +532,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
Index: uint16(i),
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckAddHtlcs(tx, addRef)
}); err != nil {
t.Fatalf("unable to ack add htlc: %v", err)
@ -559,7 +559,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
Index: uint16(i),
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckSettleFails(tx, failSettleRef)
}); err != nil {
t.Fatalf("unable to remove settle/fail htlc: %v", err)
@ -579,7 +579,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err)
@ -619,7 +619,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
nAdds := len(adds)
nSettleFails := len(settleFails)
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err)
@ -640,7 +640,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
// added any adds to the fwdfilter, this would indicate that all of the
// adds were 1) settled locally by this link (exit hop), or 2) the htlc
// was failed locally.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err)
@ -669,7 +669,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
Index: uint16(i),
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckSettleFails(tx, failSettleRef)
}); err != nil {
t.Fatalf("unable to remove settle/fail htlc: %v", err)
@ -696,7 +696,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
Index: uint16(i),
}
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckAddHtlcs(tx, addRef)
}); err != nil {
t.Fatalf("unable to ack add htlc: %v", err)
@ -716,7 +716,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error {
if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err)
@ -778,11 +778,11 @@ func assertSettleFailFilterIsFull(t *testing.T, fwdPkg *channeldb.FwdPkg, expect
// loadFwdPkgs is a helper method that reads all forwarding packages for a
// particular packager.
func loadFwdPkgs(t *testing.T, db *bbolt.DB,
func loadFwdPkgs(t *testing.T, db kvdb.Backend,
packager channeldb.FwdPackager) []*channeldb.FwdPkg {
var fwdPkgs []*channeldb.FwdPkg
if err := db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(db, func(tx kvdb.ReadTx) error {
var err error
fwdPkgs, err = packager.LoadFwdPkgs(tx)
return err
@ -795,7 +795,7 @@ func loadFwdPkgs(t *testing.T, db *bbolt.DB,
// makeFwdPkgDB initializes a test database for forwarding packages. If the
// provided path is an empty, it will create a temp dir/file to use.
func makeFwdPkgDB(t *testing.T, path string) *bbolt.DB {
func makeFwdPkgDB(t *testing.T, path string) kvdb.Backend {
if path == "" {
var err error
path, err = ioutil.TempDir("", "fwdpkgdb")
@ -806,10 +806,10 @@ func makeFwdPkgDB(t *testing.T, path string) *bbolt.DB {
path = filepath.Join(path, "fwdpkg.db")
}
db, err := bbolt.Open(path, 0600, nil)
bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true)
if err != nil {
t.Fatalf("unable to open boltdb: %v", err)
}
return db
return bdb
}

View File

@ -18,7 +18,7 @@ import (
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
)
@ -206,10 +206,10 @@ func (c *ChannelGraph) Database() *DB {
func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error {
// TODO(roasbeef): ptr map to reduce # of allocs? no duplicates
return c.db.View(func(tx *bbolt.Tx) error {
return kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First, grab the node bucket. This will be used to populate
// the Node pointers in each edge read from disk.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -217,11 +217,11 @@ func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePoli
// Next, grab the edge bucket which stores the edges, and also
// the index itself so we can group the directed edges together
// logically.
edges := tx.Bucket(edgeBucket)
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -265,8 +265,8 @@ func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePoli
// should be passed as the first argument. Otherwise the first argument should
// be nil and a fresh transaction will be created to execute the graph
// traversal.
func (c *ChannelGraph) ForEachNodeChannel(tx *bbolt.Tx, nodePub []byte,
cb func(*bbolt.Tx, *ChannelEdgeInfo, *ChannelEdgePolicy,
func (c *ChannelGraph) ForEachNodeChannel(tx kvdb.ReadTx, nodePub []byte,
cb func(kvdb.ReadTx, *ChannelEdgeInfo, *ChannelEdgePolicy,
*ChannelEdgePolicy) error) error {
db := c.db
@ -281,13 +281,15 @@ func (c *ChannelGraph) DisabledChannelIDs() ([]uint64, error) {
var disabledChanIDs []uint64
chanEdgeFound := make(map[uint64]struct{})
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
disabledEdgePolicyIndex := edges.Bucket(disabledEdgePolicyBucket)
disabledEdgePolicyIndex := edges.NestedReadBucket(
disabledEdgePolicyBucket,
)
if disabledEdgePolicyIndex == nil {
return nil
}
@ -326,11 +328,11 @@ func (c *ChannelGraph) DisabledChannelIDs() ([]uint64, error) {
//
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
// traversal when graph gets mega
func (c *ChannelGraph) ForEachNode(tx *bbolt.Tx, cb func(*bbolt.Tx, *LightningNode) error) error {
traversal := func(tx *bbolt.Tx) error {
func (c *ChannelGraph) ForEachNode(tx kvdb.RwTx, cb func(kvdb.ReadTx, *LightningNode) error) error {
traversal := func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -359,7 +361,7 @@ func (c *ChannelGraph) ForEachNode(tx *bbolt.Tx, cb func(*bbolt.Tx, *LightningNo
// If no transaction was provided, then we'll create a new transaction
// to execute the transaction within.
if tx == nil {
return c.db.View(traversal)
return kvdb.View(c.db, traversal)
}
// Otherwise, we re-use the existing transaction to execute the graph
@ -373,10 +375,10 @@ func (c *ChannelGraph) ForEachNode(tx *bbolt.Tx, cb func(*bbolt.Tx, *LightningNo
// node based off the source node.
func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
var source *LightningNode
err := c.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -400,7 +402,7 @@ func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
// of the graph. The source node is treated as the center node within a
// star-graph. This method may be used to kick off a path finding algorithm in
// order to explore the reachability of another node based off the source node.
func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) {
func (c *ChannelGraph) sourceNode(nodes kvdb.ReadBucket) (*LightningNode, error) {
selfPub := nodes.Get(sourceKey)
if selfPub == nil {
return nil, ErrSourceNodeNotSet
@ -423,10 +425,10 @@ func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) {
func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
nodePubBytes := node.PubKeyBytes[:]
return c.db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -452,13 +454,13 @@ func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
//
// TODO(roasbeef): also need sig of announcement
func (c *ChannelGraph) AddLightningNode(node *LightningNode) error {
return c.db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
return addLightningNode(tx, node)
})
}
func addLightningNode(tx *bbolt.Tx, node *LightningNode) error {
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
func addLightningNode(tx kvdb.RwTx, node *LightningNode) error {
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -483,13 +485,13 @@ func addLightningNode(tx *bbolt.Tx, node *LightningNode) error {
func (c *ChannelGraph) LookupAlias(pub *btcec.PublicKey) (string, error) {
var alias string
err := c.db.View(func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodesNotFound
}
aliases := nodes.Bucket(aliasIndexBucket)
aliases := nodes.NestedReadBucket(aliasIndexBucket)
if aliases == nil {
return ErrGraphNodesNotFound
}
@ -516,8 +518,8 @@ func (c *ChannelGraph) LookupAlias(pub *btcec.PublicKey) (string, error) {
// from the database according to the node's public key.
func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
// TODO(roasbeef): ensure dangling edges are removed...
return c.db.Update(func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodeNotFound
}
@ -528,10 +530,10 @@ func (c *ChannelGraph) DeleteLightningNode(nodePub route.Vertex) error {
// deleteLightningNode uses an existing database transaction to remove a
// vertex/node from the database according to the node's public key.
func (c *ChannelGraph) deleteLightningNode(nodes *bbolt.Bucket,
func (c *ChannelGraph) deleteLightningNode(nodes kvdb.RwBucket,
compressedPubKey []byte) error {
aliases := nodes.Bucket(aliasIndexBucket)
aliases := nodes.NestedReadWriteBucket(aliasIndexBucket)
if aliases == nil {
return ErrGraphNodesNotFound
}
@ -556,7 +558,7 @@ func (c *ChannelGraph) deleteLightningNode(nodes *bbolt.Bucket,
// Finally, we'll delete the index entry for the node within the
// nodeUpdateIndexBucket as this node is no longer active, so we don't
// need to track its last update.
nodeUpdateIndex := nodes.Bucket(nodeUpdateIndexBucket)
nodeUpdateIndex := nodes.NestedReadWriteBucket(nodeUpdateIndexBucket)
if nodeUpdateIndex == nil {
return ErrGraphNodesNotFound
}
@ -581,7 +583,7 @@ func (c *ChannelGraph) AddChannelEdge(edge *ChannelEdgeInfo) error {
c.cacheMu.Lock()
defer c.cacheMu.Unlock()
err := c.db.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
return c.addChannelEdge(tx, edge)
})
if err != nil {
@ -596,16 +598,16 @@ func (c *ChannelGraph) AddChannelEdge(edge *ChannelEdgeInfo) error {
// addChannelEdge is the private form of AddChannelEdge that allows callers to
// utilize an existing db transaction.
func (c *ChannelGraph) addChannelEdge(tx *bbolt.Tx, edge *ChannelEdgeInfo) error {
func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx, edge *ChannelEdgeInfo) error {
// Construct the channel's primary key which is the 8-byte channel ID.
var chanKey [8]byte
binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
edges, err := tx.CreateBucketIfNotExists(edgeBucket)
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return err
}
@ -731,12 +733,12 @@ func (c *ChannelGraph) HasChannelEdge(
return upd1Time, upd2Time, exists, isZombie, nil
}
if err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
if err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -748,7 +750,7 @@ func (c *ChannelGraph) HasChannelEdge(
// index.
if edgeIndex.Get(channelID[:]) == nil {
exists = false
zombieIndex := edges.Bucket(zombieBucket)
zombieIndex := edges.NestedReadBucket(zombieBucket)
if zombieIndex != nil {
isZombie, _, _ = isZombieEdge(
zombieIndex, chanID,
@ -764,7 +766,7 @@ func (c *ChannelGraph) HasChannelEdge(
// If the channel has been found in the graph, then retrieve
// the edges itself so we can return the last updated
// timestamps.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodeNotFound
}
@ -808,13 +810,13 @@ func (c *ChannelGraph) UpdateChannelEdge(edge *ChannelEdgeInfo) error {
var chanKey [8]byte
binary.BigEndian.PutUint64(chanKey[:], edge.ChannelID)
return c.db.Update(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
edges := tx.ReadWriteBucket(edgeBucket)
if edge == nil {
return ErrEdgeNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrEdgeNotFound
}
@ -851,10 +853,10 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
var chansClosed []*ChannelEdgeInfo
err := c.db.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
// First grab the edges bucket which houses the information
// we'd like to delete
edges, err := tx.CreateBucketIfNotExists(edgeBucket)
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return err
}
@ -868,7 +870,7 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
if err != nil {
return err
}
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return ErrSourceNodeNotSet
}
@ -919,7 +921,7 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
chansClosed = append(chansClosed, &edgeInfo)
}
metaBucket, err := tx.CreateBucketIfNotExists(graphMetaBucket)
metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
if err != nil {
return err
}
@ -965,16 +967,16 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
// that we only maintain a graph of reachable nodes. In the event that a pruned
// node gains more channels, it will be re-added back to the graph.
func (c *ChannelGraph) PruneGraphNodes() error {
return c.db.Update(func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodesNotFound
}
edges := tx.Bucket(edgeBucket)
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return ErrGraphNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -986,8 +988,8 @@ func (c *ChannelGraph) PruneGraphNodes() error {
// pruneGraphNodes attempts to remove any nodes from the graph who have had a
// channel closed within the current block. If the node still has existing
// channels in the graph, this will act as a no-op.
func (c *ChannelGraph) pruneGraphNodes(nodes *bbolt.Bucket,
edgeIndex *bbolt.Bucket) error {
func (c *ChannelGraph) pruneGraphNodes(nodes kvdb.RwBucket,
edgeIndex kvdb.RwBucket) error {
log.Trace("Pruning nodes from graph with no open channels")
@ -1113,8 +1115,8 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
// Keep track of the channels that are removed from the graph.
var removedChans []*ChannelEdgeInfo
if err := c.db.Update(func(tx *bbolt.Tx) error {
edges, err := tx.CreateBucketIfNotExists(edgeBucket)
if err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return err
}
@ -1130,7 +1132,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
if err != nil {
return err
}
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -1140,7 +1142,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
// NOTE: we must delete the edges after the cursor loop, since
// modifying the bucket while traversing is not safe.
var keys [][]byte
cursor := edgeIndex.Cursor()
cursor := edgeIndex.ReadWriteCursor()
for k, v := cursor.Seek(chanIDStart[:]); k != nil &&
bytes.Compare(k, chanIDEnd[:]) <= 0; k, v = cursor.Next() {
@ -1166,7 +1168,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
// Delete all the entries in the prune log having a height
// greater or equal to the block disconnected.
metaBucket, err := tx.CreateBucketIfNotExists(graphMetaBucket)
metaBucket, err := tx.CreateTopLevelBucket(graphMetaBucket)
if err != nil {
return err
}
@ -1185,7 +1187,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf
// To avoid modifying the bucket while traversing, we delete
// the keys in a second loop.
var pruneKeys [][]byte
pruneCursor := pruneBucket.Cursor()
pruneCursor := pruneBucket.ReadWriteCursor()
for k, _ := pruneCursor.Seek(pruneKeyStart[:]); k != nil &&
bytes.Compare(k, pruneKeyEnd[:]) <= 0; k, _ = pruneCursor.Next() {
@ -1221,17 +1223,17 @@ func (c *ChannelGraph) PruneTip() (*chainhash.Hash, uint32, error) {
tipHeight uint32
)
err := c.db.View(func(tx *bbolt.Tx) error {
graphMeta := tx.Bucket(graphMetaBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
graphMeta := tx.ReadBucket(graphMetaBucket)
if graphMeta == nil {
return ErrGraphNotFound
}
pruneBucket := graphMeta.Bucket(pruneLogBucket)
pruneBucket := graphMeta.NestedReadBucket(pruneLogBucket)
if pruneBucket == nil {
return ErrGraphNeverPruned
}
pruneCursor := pruneBucket.Cursor()
pruneCursor := pruneBucket.ReadCursor()
// The prune key with the largest block height will be our
// prune tip.
@ -1266,20 +1268,20 @@ func (c *ChannelGraph) DeleteChannelEdges(chanIDs ...uint64) error {
c.cacheMu.Lock()
defer c.cacheMu.Unlock()
err := c.db.Update(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return ErrEdgeNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrEdgeNotFound
}
chanIndex := edges.Bucket(channelPointBucket)
chanIndex := edges.NestedReadWriteBucket(channelPointBucket)
if chanIndex == nil {
return ErrEdgeNotFound
}
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodeNotFound
}
@ -1319,7 +1321,7 @@ func (c *ChannelGraph) DeleteChannelEdges(chanIDs ...uint64) error {
// the database, then ErrEdgeNotFound is returned.
func (c *ChannelGraph) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
var chanID uint64
if err := c.db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
var err error
chanID, err = getChanID(tx, chanPoint)
return err
@ -1331,17 +1333,17 @@ func (c *ChannelGraph) ChannelID(chanPoint *wire.OutPoint) (uint64, error) {
}
// getChanID returns the assigned channel ID for a given channel point.
func getChanID(tx *bbolt.Tx, chanPoint *wire.OutPoint) (uint64, error) {
func getChanID(tx kvdb.ReadTx, chanPoint *wire.OutPoint) (uint64, error) {
var b bytes.Buffer
if err := writeOutpoint(&b, chanPoint); err != nil {
return 0, err
}
edges := tx.Bucket(edgeBucket)
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return 0, ErrGraphNoEdgesFound
}
chanIndex := edges.Bucket(channelPointBucket)
chanIndex := edges.NestedReadBucket(channelPointBucket)
if chanIndex == nil {
return 0, ErrGraphNoEdgesFound
}
@ -1364,19 +1366,19 @@ func getChanID(tx *bbolt.Tx, chanPoint *wire.OutPoint) (uint64, error) {
func (c *ChannelGraph) HighestChanID() (uint64, error) {
var cid uint64
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
// In order to find the highest chan ID, we'll fetch a cursor
// and use that to seek to the "end" of our known rage.
cidCursor := edgeIndex.Cursor()
cidCursor := edgeIndex.ReadCursor()
lastChanID, _ := cidCursor.Last()
@ -1428,28 +1430,28 @@ func (c *ChannelGraph) ChanUpdatesInHorizon(startTime, endTime time.Time) ([]Cha
defer c.cacheMu.Unlock()
var hits int
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
edgeUpdateIndex := edges.Bucket(edgeUpdateIndexBucket)
edgeUpdateIndex := edges.NestedReadBucket(edgeUpdateIndexBucket)
if edgeUpdateIndex == nil {
return ErrGraphNoEdgesFound
}
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodesNotFound
}
// We'll now obtain a cursor to perform a range query within
// the index to find all channels within the horizon.
updateCursor := edgeUpdateIndex.Cursor()
updateCursor := edgeUpdateIndex.ReadCursor()
var startTimeBytes, endTimeBytes [8 + 8]byte
byteOrder.PutUint64(
@ -1548,20 +1550,20 @@ func (c *ChannelGraph) ChanUpdatesInHorizon(startTime, endTime time.Time) ([]Cha
func (c *ChannelGraph) NodeUpdatesInHorizon(startTime, endTime time.Time) ([]LightningNode, error) {
var nodesInHorizon []LightningNode
err := c.db.View(func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodesNotFound
}
nodeUpdateIndex := nodes.Bucket(nodeUpdateIndexBucket)
nodeUpdateIndex := nodes.NestedReadBucket(nodeUpdateIndexBucket)
if nodeUpdateIndex == nil {
return ErrGraphNodesNotFound
}
// We'll now obtain a cursor to perform a range query within
// the index to find all node announcements within the horizon.
updateCursor := nodeUpdateIndex.Cursor()
updateCursor := nodeUpdateIndex.ReadCursor()
var startTimeBytes, endTimeBytes [8 + 33]byte
byteOrder.PutUint64(
@ -1610,12 +1612,12 @@ func (c *ChannelGraph) NodeUpdatesInHorizon(startTime, endTime time.Time) ([]Lig
func (c *ChannelGraph) FilterKnownChanIDs(chanIDs []uint64) ([]uint64, error) {
var newChanIDs []uint64
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -1623,7 +1625,7 @@ func (c *ChannelGraph) FilterKnownChanIDs(chanIDs []uint64) ([]uint64, error) {
// Fetch the zombie index, it may not exist if no edges have
// ever been marked as zombies. If the index has been
// initialized, we will use it later to skip known zombie edges.
zombieIndex := edges.Bucket(zombieBucket)
zombieIndex := edges.NestedReadBucket(zombieBucket)
// We'll run through the set of chanIDs and collate only the
// set of channel that are unable to be found within our db.
@ -1686,17 +1688,17 @@ func (c *ChannelGraph) FilterChannelRange(startHeight, endHeight uint32) ([]uint
byteOrder.PutUint64(chanIDStart[:], startChanID.ToUint64())
byteOrder.PutUint64(chanIDEnd[:], endChanID.ToUint64())
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
cursor := edgeIndex.Cursor()
cursor := edgeIndex.ReadCursor()
// We'll now iterate through the database, and find each
// channel ID that resides within the specified range.
@ -1739,16 +1741,16 @@ func (c *ChannelGraph) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
cidBytes [8]byte
)
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -1794,12 +1796,12 @@ func (c *ChannelGraph) FetchChanInfos(chanIDs []uint64) ([]ChannelEdge, error) {
return chanEdges, nil
}
func delEdgeUpdateIndexEntry(edgesBucket *bbolt.Bucket, chanID uint64,
func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64,
edge1, edge2 *ChannelEdgePolicy) error {
// First, we'll fetch the edge update index bucket which currently
// stores an entry for the channel we're about to delete.
updateIndex := edgesBucket.Bucket(edgeUpdateIndexBucket)
updateIndex := edgesBucket.NestedReadWriteBucket(edgeUpdateIndexBucket)
if updateIndex == nil {
// No edges in bucket, return early.
return nil
@ -1833,7 +1835,7 @@ func delEdgeUpdateIndexEntry(edgesBucket *bbolt.Bucket, chanID uint64,
}
func delChannelEdge(edges, edgeIndex, chanIndex, zombieIndex,
nodes *bbolt.Bucket, chanID []byte, isZombie bool) error {
nodes kvdb.RwBucket, chanID []byte, isZombie bool) error {
edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID)
if err != nil {
@ -1919,7 +1921,7 @@ func (c *ChannelGraph) UpdateEdgePolicy(edge *ChannelEdgePolicy) error {
defer c.cacheMu.Unlock()
var isUpdate1 bool
err := c.db.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
var err error
isUpdate1, err = updateEdgePolicy(tx, edge)
return err
@ -1961,17 +1963,17 @@ func (c *ChannelGraph) UpdateEdgePolicy(edge *ChannelEdgePolicy) error {
// buckets using an existing database transaction. The returned boolean will be
// true if the updated policy belongs to node1, and false if the policy belonged
// to node2.
func updateEdgePolicy(tx *bbolt.Tx, edge *ChannelEdgePolicy) (bool, error) {
edges := tx.Bucket(edgeBucket)
func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy) (bool, error) {
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return false, ErrEdgeNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return false, ErrEdgeNotFound
}
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return false, err
}
@ -2138,14 +2140,14 @@ func (l *LightningNode) NodeAnnouncement(signed bool) (*lnwire.NodeAnnouncement,
// isPublic determines whether the node is seen as public within the graph from
// the source node's point of view. An existing database transaction can also be
// specified.
func (l *LightningNode) isPublic(tx *bbolt.Tx, sourcePubKey []byte) (bool, error) {
func (l *LightningNode) isPublic(tx kvdb.ReadTx, sourcePubKey []byte) (bool, error) {
// In order to determine whether this node is publicly advertised within
// the graph, we'll need to look at all of its edges and check whether
// they extend to any other node than the source node. errDone will be
// used to terminate the check early.
nodeIsPublic := false
errDone := errors.New("done")
err := l.ForEachChannel(tx, func(_ *bbolt.Tx, info *ChannelEdgeInfo,
err := l.ForEachChannel(tx, func(_ kvdb.ReadTx, info *ChannelEdgeInfo,
_, _ *ChannelEdgePolicy) error {
// If this edge doesn't extend to the source node, we'll
@ -2184,15 +2186,15 @@ func (l *LightningNode) isPublic(tx *bbolt.Tx, sourcePubKey []byte) (bool, error
// should be passed as the first argument. Otherwise the first argument should
// be nil and a fresh transaction will be created to execute the graph
// traversal.
func (c *ChannelGraph) FetchLightningNode(tx *bbolt.Tx, nodePub route.Vertex) (
func (c *ChannelGraph) FetchLightningNode(tx kvdb.ReadTx, nodePub route.Vertex) (
*LightningNode, error) {
var node *LightningNode
fetchNode := func(tx *bbolt.Tx) error {
fetchNode := func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -2220,7 +2222,7 @@ func (c *ChannelGraph) FetchLightningNode(tx *bbolt.Tx, nodePub route.Vertex) (
var err error
if tx == nil {
err = c.db.View(fetchNode)
err = kvdb.View(c.db, fetchNode)
} else {
err = fetchNode(tx)
}
@ -2242,10 +2244,10 @@ func (c *ChannelGraph) HasLightningNode(nodePub [33]byte) (time.Time, bool, erro
exists bool
)
err := c.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -2280,19 +2282,19 @@ func (c *ChannelGraph) HasLightningNode(nodePub [33]byte) (time.Time, bool, erro
// nodeTraversal is used to traverse all channels of a node given by its
// public key and passes channel information into the specified callback.
func nodeTraversal(tx *bbolt.Tx, nodePub []byte, db *DB,
cb func(*bbolt.Tx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error {
func nodeTraversal(tx kvdb.ReadTx, nodePub []byte, db *DB,
cb func(kvdb.ReadTx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error {
traversal := func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
traversal := func(tx kvdb.ReadTx) error {
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
edges := tx.Bucket(edgeBucket)
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -2312,7 +2314,7 @@ func nodeTraversal(tx *bbolt.Tx, nodePub []byte, db *DB,
// bucket until the retrieved key no longer has the public key
// as its prefix. This indicates that we've stepped over into
// another node's edges, so we can terminate our scan.
edgeCursor := edges.Cursor()
edgeCursor := edges.ReadCursor()
for nodeEdge, _ := edgeCursor.Seek(nodeStart[:]); bytes.HasPrefix(nodeEdge, nodePub); nodeEdge, _ = edgeCursor.Next() {
// If the prefix still matches, the channel id is
// returned in nodeEdge. Channel id is used to lookup
@ -2357,7 +2359,7 @@ func nodeTraversal(tx *bbolt.Tx, nodePub []byte, db *DB,
// If no transaction was provided, then we'll create a new transaction
// to execute the transaction within.
if tx == nil {
return db.View(traversal)
return kvdb.View(db, traversal)
}
// Otherwise, we re-use the existing transaction to execute the graph
@ -2378,8 +2380,8 @@ func nodeTraversal(tx *bbolt.Tx, nodePub []byte, db *DB,
// should be passed as the first argument. Otherwise the first argument should
// be nil and a fresh transaction will be created to execute the graph
// traversal.
func (l *LightningNode) ForEachChannel(tx *bbolt.Tx,
cb func(*bbolt.Tx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error {
func (l *LightningNode) ForEachChannel(tx kvdb.ReadTx,
cb func(kvdb.ReadTx, *ChannelEdgeInfo, *ChannelEdgePolicy, *ChannelEdgePolicy) error) error {
nodePub := l.PubKeyBytes[:]
db := l.db
@ -2570,7 +2572,7 @@ func (c *ChannelEdgeInfo) OtherNodeKeyBytes(thisNodeKey []byte) (
// the target node in the channel. This is useful when one knows the pubkey of
// one of the nodes, and wishes to obtain the full LightningNode for the other
// end of the channel.
func (c *ChannelEdgeInfo) FetchOtherNode(tx *bbolt.Tx, thisNodeKey []byte) (*LightningNode, error) {
func (c *ChannelEdgeInfo) FetchOtherNode(tx kvdb.ReadTx, thisNodeKey []byte) (*LightningNode, error) {
// Ensure that the node passed in is actually a member of the channel.
var targetNodeBytes [33]byte
@ -2584,10 +2586,10 @@ func (c *ChannelEdgeInfo) FetchOtherNode(tx *bbolt.Tx, thisNodeKey []byte) (*Lig
}
var targetNode *LightningNode
fetchNodeFunc := func(tx *bbolt.Tx) error {
fetchNodeFunc := func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -2607,7 +2609,7 @@ func (c *ChannelEdgeInfo) FetchOtherNode(tx *bbolt.Tx, thisNodeKey []byte) (*Lig
// otherwise we can use the existing db transaction.
var err error
if tx == nil {
err = c.db.View(fetchNodeFunc)
err = kvdb.View(c.db, fetchNodeFunc)
} else {
err = fetchNodeFunc(tx)
}
@ -2883,10 +2885,10 @@ func (c *ChannelGraph) FetchChannelEdgesByOutpoint(op *wire.OutPoint,
policy2 *ChannelEdgePolicy
)
err := c.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First, grab the node bucket. This will be used to populate
// the Node pointers in each edge read from disk.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -2894,18 +2896,18 @@ func (c *ChannelGraph) FetchChannelEdgesByOutpoint(op *wire.OutPoint,
// Next, grab the edge bucket which stores the edges, and also
// the index itself so we can group the directed edges together
// logically.
edges := tx.Bucket(edgeBucket)
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
// If the channel's outpoint doesn't exist within the outpoint
// index, then the edge does not exist.
chanIndex := edges.Bucket(channelPointBucket)
chanIndex := edges.NestedReadBucket(channelPointBucket)
if chanIndex == nil {
return ErrGraphNoEdgesFound
}
@ -2967,10 +2969,10 @@ func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64,
channelID [8]byte
)
err := c.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First, grab the node bucket. This will be used to populate
// the Node pointers in each edge read from disk.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -2978,11 +2980,11 @@ func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64,
// Next, grab the edge bucket which stores the edges, and also
// the index itself so we can group the directed edges together
// logically.
edges := tx.Bucket(edgeBucket)
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -2998,7 +3000,7 @@ func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64,
// If the zombie index doesn't exist, or the edge is not
// marked as a zombie within it, then we'll return the
// original ErrEdgeNotFound error.
zombieIndex := edges.Bucket(zombieBucket)
zombieIndex := edges.NestedReadBucket(zombieBucket)
if zombieIndex == nil {
return ErrEdgeNotFound
}
@ -3057,8 +3059,8 @@ func (c *ChannelGraph) FetchChannelEdgesByID(chanID uint64,
// source node's point of view.
func (c *ChannelGraph) IsPublicNode(pubKey [33]byte) (bool, error) {
var nodeIsPublic bool
err := c.db.View(func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNodesNotFound
}
@ -3143,19 +3145,19 @@ func (e *EdgePoint) String() string {
// closes on the resident blockchain.
func (c *ChannelGraph) ChannelView() ([]EdgePoint, error) {
var edgePoints []EdgePoint
if err := c.db.View(func(tx *bbolt.Tx) error {
if err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// We're going to iterate over the entire channel index, so
// we'll need to fetch the edgeBucket to get to the index as
// it's a sub-bucket.
edges := tx.Bucket(edgeBucket)
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
chanIndex := edges.Bucket(channelPointBucket)
chanIndex := edges.NestedReadBucket(channelPointBucket)
if chanIndex == nil {
return ErrGraphNoEdgesFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrGraphNoEdgesFound
}
@ -3209,7 +3211,7 @@ func (c *ChannelGraph) NewChannelEdgePolicy() *ChannelEdgePolicy {
// markEdgeZombie marks an edge as a zombie within our zombie index. The public
// keys should represent the node public keys of the two parties involved in the
// edge.
func markEdgeZombie(zombieIndex *bbolt.Bucket, chanID uint64, pubKey1,
func markEdgeZombie(zombieIndex kvdb.RwBucket, chanID uint64, pubKey1,
pubKey2 [33]byte) error {
var k [8]byte
@ -3227,12 +3229,12 @@ func (c *ChannelGraph) MarkEdgeLive(chanID uint64) error {
c.cacheMu.Lock()
defer c.cacheMu.Unlock()
err := c.db.Update(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.Update(c.db, func(tx kvdb.RwTx) error {
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
zombieIndex := edges.Bucket(zombieBucket)
zombieIndex := edges.NestedReadWriteBucket(zombieBucket)
if zombieIndex == nil {
return nil
}
@ -3260,12 +3262,12 @@ func (c *ChannelGraph) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
pubKey1, pubKey2 [33]byte
)
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
zombieIndex := edges.Bucket(zombieBucket)
zombieIndex := edges.NestedReadBucket(zombieBucket)
if zombieIndex == nil {
return nil
}
@ -3283,7 +3285,7 @@ func (c *ChannelGraph) IsZombieEdge(chanID uint64) (bool, [33]byte, [33]byte) {
// isZombieEdge returns whether an entry exists for the given channel in the
// zombie index. If an entry exists, then the two node public keys corresponding
// to this edge are also returned.
func isZombieEdge(zombieIndex *bbolt.Bucket,
func isZombieEdge(zombieIndex kvdb.ReadBucket,
chanID uint64) (bool, [33]byte, [33]byte) {
var k [8]byte
@ -3304,12 +3306,12 @@ func isZombieEdge(zombieIndex *bbolt.Bucket,
// NumZombies returns the current number of zombie channels in the graph.
func (c *ChannelGraph) NumZombies() (uint64, error) {
var numZombies uint64
err := c.db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return nil
}
zombieIndex := edges.Bucket(zombieBucket)
zombieIndex := edges.NestedReadBucket(zombieBucket)
if zombieIndex == nil {
return nil
}
@ -3326,8 +3328,8 @@ func (c *ChannelGraph) NumZombies() (uint64, error) {
return numZombies, nil
}
func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket,
updateIndex *bbolt.Bucket, node *LightningNode) error {
func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket,
updateIndex kvdb.RwBucket, node *LightningNode) error {
var (
scratch [16]byte
@ -3455,7 +3457,7 @@ func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket,
return nodeBucket.Put(nodePub, b.Bytes())
}
func fetchLightningNode(nodeBucket *bbolt.Bucket,
func fetchLightningNode(nodeBucket kvdb.ReadBucket,
nodePub []byte) (LightningNode, error) {
nodeBytes := nodeBucket.Get(nodePub)
@ -3563,7 +3565,7 @@ func deserializeLightningNode(r io.Reader) (LightningNode, error) {
return node, nil
}
func putChanEdgeInfo(edgeIndex *bbolt.Bucket, edgeInfo *ChannelEdgeInfo, chanID [8]byte) error {
func putChanEdgeInfo(edgeIndex kvdb.RwBucket, edgeInfo *ChannelEdgeInfo, chanID [8]byte) error {
var b bytes.Buffer
if _, err := b.Write(edgeInfo.NodeKey1Bytes[:]); err != nil {
@ -3629,7 +3631,7 @@ func putChanEdgeInfo(edgeIndex *bbolt.Bucket, edgeInfo *ChannelEdgeInfo, chanID
return edgeIndex.Put(chanID[:], b.Bytes())
}
func fetchChanEdgeInfo(edgeIndex *bbolt.Bucket,
func fetchChanEdgeInfo(edgeIndex kvdb.ReadBucket,
chanID []byte) (ChannelEdgeInfo, error) {
edgeInfoBytes := edgeIndex.Get(chanID)
@ -3718,7 +3720,7 @@ func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, error) {
return edgeInfo, nil
}
func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy,
func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy,
from, to []byte) error {
var edgeKey [33 + 8]byte
@ -3798,7 +3800,7 @@ func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy,
// in this bucket.
// Maintaining the bucket this way allows a fast retrieval of disabled
// channels, for example when prune is needed.
func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64,
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
direction bool, disabled bool) error {
var disabledEdgeKey [8 + 1]byte
@ -3823,7 +3825,7 @@ func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64,
// putChanEdgePolicyUnknown marks the edge policy as unknown
// in the edges bucket.
func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64,
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
from []byte) error {
var edgeKey [33 + 8]byte
@ -3838,8 +3840,8 @@ func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64,
return edges.Put(edgeKey[:], unknownPolicy)
}
func fetchChanEdgePolicy(edges *bbolt.Bucket, chanID []byte,
nodePub []byte, nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) {
func fetchChanEdgePolicy(edges kvdb.ReadBucket, chanID []byte,
nodePub []byte, nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) {
var edgeKey [33 + 8]byte
copy(edgeKey[:], nodePub)
@ -3871,8 +3873,8 @@ func fetchChanEdgePolicy(edges *bbolt.Bucket, chanID []byte,
return ep, nil
}
func fetchChanEdgePolicies(edgeIndex *bbolt.Bucket, edges *bbolt.Bucket,
nodes *bbolt.Bucket, chanID []byte,
func fetchChanEdgePolicies(edgeIndex kvdb.ReadBucket, edges kvdb.ReadBucket,
nodes kvdb.ReadBucket, chanID []byte,
db *DB) (*ChannelEdgePolicy, *ChannelEdgePolicy, error) {
edgeInfo := edgeIndex.Get(chanID)
@ -3980,7 +3982,7 @@ func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy,
}
func deserializeChanEdgePolicy(r io.Reader,
nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) {
nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) {
edge := &ChannelEdgePolicy{}

View File

@ -17,8 +17,8 @@ import (
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route"
)
@ -882,7 +882,7 @@ func TestGraphTraversal(t *testing.T) {
// Iterate over each node as returned by the graph, if all nodes are
// reached, then the map created above should be empty.
err = graph.ForEachNode(nil, func(_ *bbolt.Tx, node *LightningNode) error {
err = graph.ForEachNode(nil, func(_ kvdb.ReadTx, node *LightningNode) error {
delete(nodeIndex, node.Alias)
return nil
})
@ -978,7 +978,7 @@ func TestGraphTraversal(t *testing.T) {
// Finally, we want to test the ability to iterate over all the
// outgoing channels for a particular node.
numNodeChans := 0
err = firstNode.ForEachChannel(nil, func(_ *bbolt.Tx, _ *ChannelEdgeInfo,
err = firstNode.ForEachChannel(nil, func(_ kvdb.ReadTx, _ *ChannelEdgeInfo,
outEdge, inEdge *ChannelEdgePolicy) error {
// All channels between first and second node should have fully
@ -1051,7 +1051,7 @@ func assertNumChans(t *testing.T, graph *ChannelGraph, n int) {
func assertNumNodes(t *testing.T, graph *ChannelGraph, n int) {
numNodes := 0
err := graph.ForEachNode(nil, func(_ *bbolt.Tx, _ *LightningNode) error {
err := graph.ForEachNode(nil, func(_ kvdb.ReadTx, _ *LightningNode) error {
numNodes++
return nil
})
@ -2097,10 +2097,9 @@ func TestIncompleteChannelPolicies(t *testing.T) {
}
// Ensure that channel is reported with unknown policies.
checkPolicies := func(node *LightningNode, expectedIn, expectedOut bool) {
calls := 0
node.ForEachChannel(nil, func(_ *bbolt.Tx, _ *ChannelEdgeInfo,
err := node.ForEachChannel(nil, func(_ kvdb.ReadTx, _ *ChannelEdgeInfo,
outEdge, inEdge *ChannelEdgePolicy) error {
if !expectedOut && outEdge != nil {
@ -2123,6 +2122,9 @@ func TestIncompleteChannelPolicies(t *testing.T) {
return nil
})
if err != nil {
t.Fatalf("unable to scan channels: %v", err)
}
if calls != 1 {
t.Fatalf("Expected only one callback call")
@ -2233,17 +2235,27 @@ func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) {
timestampSet[t] = struct{}{}
}
err := db.View(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
edges := tx.ReadBucket(edgeBucket)
if edges == nil {
return ErrGraphNoEdgesFound
}
edgeUpdateIndex := edges.Bucket(edgeUpdateIndexBucket)
edgeUpdateIndex := edges.NestedReadBucket(
edgeUpdateIndexBucket,
)
if edgeUpdateIndex == nil {
return ErrGraphNoEdgesFound
}
numEntries := edgeUpdateIndex.Stats().KeyN
var numEntries int
err := edgeUpdateIndex.ForEach(func(k, v []byte) error {
numEntries++
return nil
})
if err != nil {
return err
}
expectedEntries := len(timestampSet)
if numEntries != expectedEntries {
return fmt.Errorf("expected %v entries in the "+
@ -2832,8 +2844,8 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) {
// Attempting to deserialize these bytes should return an error.
r := bytes.NewReader(stripped)
err = db.View(func(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
err = kvdb.View(db, func(tx kvdb.ReadTx) error {
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -2852,13 +2864,13 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) {
}
// Put the stripped bytes in the DB.
err = db.Update(func(tx *bbolt.Tx) error {
edges := tx.Bucket(edgeBucket)
err = kvdb.Update(db, func(tx kvdb.RwTx) error {
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return ErrEdgeNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return ErrEdgeNotFound
}

View File

@ -8,7 +8,7 @@ import (
"io"
"time"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/htlcswitch/hop"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
@ -401,8 +401,8 @@ func (d *DB) AddInvoice(newInvoice *Invoice, paymentHash lntypes.Hash) (
}
var invoiceAddIndex uint64
err := d.Update(func(tx *bbolt.Tx) error {
invoices, err := tx.CreateBucketIfNotExists(invoiceBucket)
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
if err != nil {
return err
}
@ -479,13 +479,13 @@ func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, error) {
var startIndex [8]byte
byteOrder.PutUint64(startIndex[:], sinceAddIndex)
err := d.DB.View(func(tx *bbolt.Tx) error {
invoices := tx.Bucket(invoiceBucket)
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil {
return ErrNoInvoicesCreated
}
addIndex := invoices.Bucket(addIndexBucket)
addIndex := invoices.NestedReadBucket(addIndexBucket)
if addIndex == nil {
return ErrNoInvoicesCreated
}
@ -493,7 +493,7 @@ func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, error) {
// We'll now run through each entry in the add index starting
// at our starting index. We'll continue until we reach the
// very end of the current key space.
invoiceCursor := addIndex.Cursor()
invoiceCursor := addIndex.ReadCursor()
// We'll seek to the starting index, then manually advance the
// cursor in order to skip the entry with the since add index.
@ -534,12 +534,12 @@ func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, error) {
// terms of the payment.
func (d *DB) LookupInvoice(paymentHash [32]byte) (Invoice, error) {
var invoice Invoice
err := d.View(func(tx *bbolt.Tx) error {
invoices := tx.Bucket(invoiceBucket)
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil {
return ErrNoInvoicesCreated
}
invoiceIndex := invoices.Bucket(invoiceIndexBucket)
invoiceIndex := invoices.NestedReadBucket(invoiceIndexBucket)
if invoiceIndex == nil {
return ErrNoInvoicesCreated
}
@ -589,13 +589,13 @@ func (d *DB) FetchAllInvoicesWithPaymentHash(pendingOnly bool) (
var result []InvoiceWithPaymentHash
err := d.View(func(tx *bbolt.Tx) error {
invoices := tx.Bucket(invoiceBucket)
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil {
return ErrNoInvoicesCreated
}
invoiceIndex := invoices.Bucket(invoiceIndexBucket)
invoiceIndex := invoices.NestedReadBucket(invoiceIndexBucket)
if invoiceIndex == nil {
// Mask the error if there's no invoice
// index as that simply means there are no
@ -695,21 +695,21 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) {
InvoiceQuery: q,
}
err := d.View(func(tx *bbolt.Tx) error {
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
// If the bucket wasn't found, then there aren't any invoices
// within the database yet, so we can simply exit.
invoices := tx.Bucket(invoiceBucket)
invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil {
return ErrNoInvoicesCreated
}
invoiceAddIndex := invoices.Bucket(addIndexBucket)
invoiceAddIndex := invoices.NestedReadBucket(addIndexBucket)
if invoiceAddIndex == nil {
return ErrNoInvoicesCreated
}
// keyForIndex is a helper closure that retrieves the invoice
// key for the given add index of an invoice.
keyForIndex := func(c *bbolt.Cursor, index uint64) []byte {
keyForIndex := func(c kvdb.ReadCursor, index uint64) []byte {
var keyIndex [8]byte
byteOrder.PutUint64(keyIndex[:], index)
_, invoiceKey := c.Seek(keyIndex[:])
@ -718,7 +718,7 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) {
// nextKey is a helper closure to determine what the next
// invoice key is when iterating over the invoice add index.
nextKey := func(c *bbolt.Cursor) ([]byte, []byte) {
nextKey := func(c kvdb.ReadCursor) ([]byte, []byte) {
if q.Reversed {
return c.Prev()
}
@ -728,7 +728,7 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) {
// We'll be using a cursor to seek into the database and return
// a slice of invoices. We'll need to determine where to start
// our cursor depending on the parameters set within the query.
c := invoiceAddIndex.Cursor()
c := invoiceAddIndex.ReadCursor()
invoiceKey := keyForIndex(c, q.IndexOffset+1)
// If the query is specifying reverse iteration, then we must
@ -822,8 +822,8 @@ func (d *DB) UpdateInvoice(paymentHash lntypes.Hash,
callback InvoiceUpdateCallback) (*Invoice, error) {
var updatedInvoice *Invoice
err := d.Update(func(tx *bbolt.Tx) error {
invoices, err := tx.CreateBucketIfNotExists(invoiceBucket)
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
if err != nil {
return err
}
@ -877,13 +877,13 @@ func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, error) {
var startIndex [8]byte
byteOrder.PutUint64(startIndex[:], sinceSettleIndex)
err := d.DB.View(func(tx *bbolt.Tx) error {
invoices := tx.Bucket(invoiceBucket)
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil {
return ErrNoInvoicesCreated
}
settleIndex := invoices.Bucket(settleIndexBucket)
settleIndex := invoices.NestedReadBucket(settleIndexBucket)
if settleIndex == nil {
return ErrNoInvoicesCreated
}
@ -891,7 +891,7 @@ func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, error) {
// We'll now run through each entry in the add index starting
// at our starting index. We'll continue until we reach the
// very end of the current key space.
invoiceCursor := settleIndex.Cursor()
invoiceCursor := settleIndex.ReadCursor()
// We'll seek to the starting index, then manually advance the
// cursor in order to skip the entry with the since add index.
@ -919,7 +919,7 @@ func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, error) {
return settledInvoices, nil
}
func putInvoice(invoices, invoiceIndex, addIndex *bbolt.Bucket,
func putInvoice(invoices, invoiceIndex, addIndex kvdb.RwBucket,
i *Invoice, invoiceNum uint32, paymentHash lntypes.Hash) (
uint64, error) {
@ -1112,7 +1112,7 @@ func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) error {
return nil
}
func fetchInvoice(invoiceNum []byte, invoices *bbolt.Bucket) (Invoice, error) {
func fetchInvoice(invoiceNum []byte, invoices kvdb.ReadBucket) (Invoice, error) {
invoiceBytes := invoices.Get(invoiceNum)
if invoiceBytes == nil {
return Invoice{}, ErrInvoiceNotFound
@ -1325,7 +1325,7 @@ func copyInvoice(src *Invoice) *Invoice {
// updateInvoice fetches the invoice, obtains the update descriptor from the
// callback and applies the updates in a single db transaction.
func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex *bbolt.Bucket,
func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex kvdb.RwBucket,
invoiceNum []byte, callback InvoiceUpdateCallback) (*Invoice, error) {
invoice, err := fetchInvoice(invoiceNum, invoices)
@ -1572,7 +1572,7 @@ func updateHtlc(resolveTime time.Time, htlc *InvoiceHTLC,
// setSettleMetaFields updates the metadata associated with settlement of an
// invoice.
func setSettleMetaFields(settleIndex *bbolt.Bucket, invoiceNum []byte,
func setSettleMetaFields(settleIndex kvdb.RwBucket, invoiceNum []byte,
invoice *Invoice, now time.Time) error {
// Now that we know the invoice hasn't already been settled, we'll

View File

@ -1,6 +1,8 @@
package channeldb
import "github.com/coreos/bbolt"
import (
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
var (
// metaBucket stores all the meta information concerning the state of
@ -20,10 +22,10 @@ type Meta struct {
// FetchMeta fetches the meta data from boltdb and returns filled meta
// structure.
func (d *DB) FetchMeta(tx *bbolt.Tx) (*Meta, error) {
func (d *DB) FetchMeta(tx kvdb.ReadTx) (*Meta, error) {
meta := &Meta{}
err := d.View(func(tx *bbolt.Tx) error {
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
return fetchMeta(meta, tx)
})
if err != nil {
@ -36,8 +38,8 @@ func (d *DB) FetchMeta(tx *bbolt.Tx) (*Meta, error) {
// fetchMeta is an internal helper function used in order to allow callers to
// re-use a database transaction. See the publicly exported FetchMeta method
// for more information.
func fetchMeta(meta *Meta, tx *bbolt.Tx) error {
metaBucket := tx.Bucket(metaBucket)
func fetchMeta(meta *Meta, tx kvdb.ReadTx) error {
metaBucket := tx.ReadBucket(metaBucket)
if metaBucket == nil {
return ErrMetaNotFound
}
@ -54,7 +56,7 @@ func fetchMeta(meta *Meta, tx *bbolt.Tx) error {
// PutMeta writes the passed instance of the database met-data struct to disk.
func (d *DB) PutMeta(meta *Meta) error {
return d.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(d, func(tx kvdb.RwTx) error {
return putMeta(meta, tx)
})
}
@ -62,8 +64,8 @@ func (d *DB) PutMeta(meta *Meta) error {
// putMeta is an internal helper function used in order to allow callers to
// re-use a database transaction. See the publicly exported PutMeta method for
// more information.
func putMeta(meta *Meta, tx *bbolt.Tx) error {
metaBucket, err := tx.CreateBucketIfNotExists(metaBucket)
func putMeta(meta *Meta, tx kvdb.RwTx) error {
metaBucket, err := tx.CreateTopLevelBucket(metaBucket)
if err != nil {
return err
}
@ -71,7 +73,7 @@ func putMeta(meta *Meta, tx *bbolt.Tx) error {
return putDbVersion(metaBucket, meta)
}
func putDbVersion(metaBucket *bbolt.Bucket, meta *Meta) error {
func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) error {
scratch := make([]byte, 4)
byteOrder.PutUint32(scratch, meta.DbVersionNumber)
return metaBucket.Put(dbVersionKey, scratch)

View File

@ -5,8 +5,8 @@ import (
"io/ioutil"
"testing"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// applyMigration is a helper test function that encapsulates the general steps
@ -121,11 +121,11 @@ func TestOrderOfMigrations(t *testing.T) {
versions := []version{
{0, nil},
{1, nil},
{2, func(tx *bbolt.Tx) error {
{2, func(tx kvdb.RwTx) error {
appliedMigration = 2
return nil
}},
{3, func(tx *bbolt.Tx) error {
{3, func(tx kvdb.RwTx) error {
appliedMigration = 3
return nil
}},
@ -197,21 +197,23 @@ func TestMigrationWithPanic(t *testing.T) {
beforeMigrationFunc := func(d *DB) {
// Insert data in database and in order then make sure that the
// key isn't changes in case of panic or fail.
d.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
bucket.Put(keyPrefix, beforeMigration)
return nil
return bucket.Put(keyPrefix, beforeMigration)
})
if err != nil {
t.Fatalf("unable to insert: %v", err)
}
}
// Create migration function which changes the initially created data and
// throw the panic, in this case we pretending that something goes.
migrationWithPanic := func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
migrationWithPanic := func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -231,8 +233,8 @@ func TestMigrationWithPanic(t *testing.T) {
t.Fatal("migration panicked but version is changed")
}
err = d.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
err = kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -268,22 +270,24 @@ func TestMigrationWithFatal(t *testing.T) {
afterMigration := []byte("aftermigration")
beforeMigrationFunc := func(d *DB) {
d.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
bucket.Put(keyPrefix, beforeMigration)
return nil
return bucket.Put(keyPrefix, beforeMigration)
})
if err != nil {
t.Fatalf("unable to insert pre migration key: %v", err)
}
}
// Create migration function which changes the initially created data and
// return the error, in this case we pretending that something goes
// wrong.
migrationWithFatal := func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
migrationWithFatal := func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -303,8 +307,8 @@ func TestMigrationWithFatal(t *testing.T) {
t.Fatal("migration failed but version is changed")
}
err = d.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
err = kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -341,8 +345,8 @@ func TestMigrationWithoutErrors(t *testing.T) {
// Populate database with initial data.
beforeMigrationFunc := func(d *DB) {
d.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -353,8 +357,8 @@ func TestMigrationWithoutErrors(t *testing.T) {
}
// Create migration function which changes the initially created data.
migrationWithoutErrors := func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
migrationWithoutErrors := func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -375,8 +379,8 @@ func TestMigrationWithoutErrors(t *testing.T) {
"successfully applied migration")
}
err = d.Update(func(tx *bbolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
err = kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil {
return err
}
@ -419,7 +423,7 @@ func TestMigrationReversion(t *testing.T) {
// Update the database metadata to point to one more than the highest
// known version.
err = cdb.Update(func(tx *bbolt.Tx) error {
err = kvdb.Update(cdb, func(tx kvdb.RwTx) error {
newMeta := &Meta{
DbVersionNumber: getLatestDBVersion(dbVersions) + 1,
}

View File

@ -3,7 +3,7 @@ package migration12
import (
"bytes"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -12,11 +12,11 @@ var emptyFeatures = lnwire.NewFeatureVector(nil, nil)
// MigrateInvoiceTLV migrates all existing invoice bodies over to be serialized
// in a single TLV stream. In the process, we drop the Receipt field and add
// PaymentAddr and Features to the invoice Terms.
func MigrateInvoiceTLV(tx *bbolt.Tx) error {
func MigrateInvoiceTLV(tx kvdb.RwTx) error {
log.Infof("Migrating invoice bodies to TLV, " +
"adding payment addresses and feature vectors.")
invoiceB := tx.Bucket(invoiceBucket)
invoiceB := tx.ReadWriteBucket(invoiceBucket)
if invoiceB == nil {
return nil
}

View File

@ -5,7 +5,7 @@ import (
"fmt"
"testing"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/channeldb/migration12"
"github.com/lightningnetwork/lnd/channeldb/migtest"
"github.com/lightningnetwork/lnd/lntypes"
@ -121,15 +121,15 @@ var (
type migrationTest struct {
name string
beforeMigration func(*bbolt.Tx) error
afterMigration func(*bbolt.Tx) error
beforeMigration func(kvdb.RwTx) error
afterMigration func(kvdb.RwTx) error
}
var migrationTests = []migrationTest{
{
name: "no invoices",
beforeMigration: func(*bbolt.Tx) error { return nil },
afterMigration: func(*bbolt.Tx) error { return nil },
beforeMigration: func(kvdb.RwTx) error { return nil },
afterMigration: func(kvdb.RwTx) error { return nil },
},
{
name: "zero htlcs",
@ -145,9 +145,9 @@ var migrationTests = []migrationTest{
// genBeforeMigration creates a closure that inserts an invoice serialized under
// the old format under the test payment hash.
func genBeforeMigration(beforeBytes []byte) func(*bbolt.Tx) error {
return func(tx *bbolt.Tx) error {
invoices, err := tx.CreateBucketIfNotExists(
func genBeforeMigration(beforeBytes []byte) func(kvdb.RwTx) error {
return func(tx kvdb.RwTx) error {
invoices, err := tx.CreateTopLevelBucket(
invoiceBucket,
)
if err != nil {
@ -162,9 +162,9 @@ func genBeforeMigration(beforeBytes []byte) func(*bbolt.Tx) error {
// succeeded, but comparing the resulting encoding of the invoice to the
// expected serialization. In addition, the decoded invoice is compared against
// the expected invoice for equality.
func genAfterMigration(afterBytes []byte) func(*bbolt.Tx) error {
return func(tx *bbolt.Tx) error {
invoices := tx.Bucket(invoiceBucket)
func genAfterMigration(afterBytes []byte) func(kvdb.RwTx) error {
return func(tx kvdb.RwTx) error {
invoices := tx.ReadWriteBucket(invoiceBucket)
if invoices == nil {
return fmt.Errorf("invoice bucket not found")
}

View File

@ -4,7 +4,7 @@ import (
"encoding/binary"
"fmt"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
var (
@ -48,13 +48,13 @@ var (
// MigrateMPP migrates the payments to a new structure that accommodates for mpp
// payments.
func MigrateMPP(tx *bbolt.Tx) error {
func MigrateMPP(tx kvdb.RwTx) error {
log.Infof("Migrating payments to mpp structure")
// Iterate over all payments and store their indexing keys. This is
// needed, because no modifications are allowed inside a Bucket.ForEach
// loop.
paymentsBucket := tx.Bucket(paymentsRootBucket)
paymentsBucket := tx.ReadWriteBucket(paymentsRootBucket)
if paymentsBucket == nil {
return nil
}
@ -70,7 +70,7 @@ func MigrateMPP(tx *bbolt.Tx) error {
// With all keys retrieved, start the migration.
for _, k := range paymentKeys {
bucket := paymentsBucket.Bucket(k)
bucket := paymentsBucket.NestedReadWriteBucket(k)
// We only expect sub-buckets to be found in
// this top-level bucket.

View File

@ -3,7 +3,7 @@ package migration13
import (
"testing"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/channeldb/migtest"
)
@ -111,10 +111,10 @@ func TestMigrateMpp(t *testing.T) {
migtest.ApplyMigration(
t,
func(tx *bbolt.Tx) error {
func(tx kvdb.RwTx) error {
return migtest.RestoreDB(tx, paymentsRootBucket, pre)
},
func(tx *bbolt.Tx) error {
func(tx kvdb.RwTx) error {
return migtest.VerifyDB(tx, paymentsRootBucket, post)
},
MigrateMPP,

View File

@ -8,7 +8,7 @@ import (
"path/filepath"
"time"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
const (
@ -19,7 +19,7 @@ const (
// migration is a function which takes a prior outdated version of the database
// instances and mutates the key/bucket structure to arrive at a more
// up-to-date version of the database.
type migration func(tx *bbolt.Tx) error
type migration func(tx kvdb.RwTx) error
var (
// Big endian is the preferred byte order, due to cursor scans over
@ -31,7 +31,7 @@ var (
// information related to nodes, routing data, open/closed channels, fee
// schedules, and reputation data.
type DB struct {
*bbolt.DB
kvdb.Backend
dbPath string
graph *ChannelGraph
now func() time.Time
@ -55,20 +55,15 @@ func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) {
// Specify bbolt freelist options to reduce heap pressure in case the
// freelist grows to be very large.
options := &bbolt.Options{
NoFreelistSync: opts.NoFreelistSync,
FreelistType: bbolt.FreelistMapType,
}
bdb, err := bbolt.Open(path, dbFilePermission, options)
bdb, err := kvdb.Open(kvdb.BoltBackendName, path, opts.NoFreelistSync)
if err != nil {
return nil, err
}
chanDB := &DB{
DB: bdb,
dbPath: dbPath,
now: time.Now,
Backend: bdb,
dbPath: dbPath,
now: time.Now,
}
chanDB.graph = newChannelGraph(
chanDB, opts.RejectCacheSize, opts.ChannelCacheSize,
@ -89,28 +84,28 @@ func createChannelDB(dbPath string) error {
}
path := filepath.Join(dbPath, dbName)
bdb, err := bbolt.Open(path, dbFilePermission, nil)
bdb, err := kvdb.Create(kvdb.BoltBackendName, path, false)
if err != nil {
return err
}
err = bdb.Update(func(tx *bbolt.Tx) error {
if _, err := tx.CreateBucket(openChannelBucket); err != nil {
err = kvdb.Update(bdb, func(tx kvdb.RwTx) error {
if _, err := tx.CreateTopLevelBucket(openChannelBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(closedChannelBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(closedChannelBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(invoiceBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(invoiceBucket); err != nil {
return err
}
if _, err := tx.CreateBucket(paymentBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(paymentBucket); err != nil {
return err
}
nodes, err := tx.CreateBucket(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -123,7 +118,7 @@ func createChannelDB(dbPath string) error {
return err
}
edges, err := tx.CreateBucket(edgeBucket)
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return err
}
@ -140,7 +135,7 @@ func createChannelDB(dbPath string) error {
return err
}
graphMeta, err := tx.CreateBucket(graphMetaBucket)
graphMeta, err := tx.CreateTopLevelBucket(graphMetaBucket)
if err != nil {
return err
}
@ -149,7 +144,7 @@ func createChannelDB(dbPath string) error {
return err
}
if _, err := tx.CreateBucket(metaBucket); err != nil {
if _, err := tx.CreateTopLevelBucket(metaBucket); err != nil {
return err
}
@ -185,8 +180,8 @@ func fileExists(path string) bool {
func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) {
var chanSummaries []*ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error {
closeBucket := tx.Bucket(closedChannelBucket)
if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
closeBucket := tx.ReadBucket(closedChannelBucket)
if closeBucket == nil {
return ErrNoClosedChannels
}

View File

@ -13,7 +13,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -175,10 +175,10 @@ func newChannelGraph(db *DB, rejectCacheSize, chanCacheSize int) *ChannelGraph {
// node based off the source node.
func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
var source *LightningNode
err := c.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadBucket(nodeBucket)
if nodes == nil {
return ErrGraphNotFound
}
@ -202,7 +202,7 @@ func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
// of the graph. The source node is treated as the center node within a
// star-graph. This method may be used to kick off a path finding algorithm in
// order to explore the reachability of another node based off the source node.
func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) {
func (c *ChannelGraph) sourceNode(nodes kvdb.ReadBucket) (*LightningNode, error) {
selfPub := nodes.Get(sourceKey)
if selfPub == nil {
return nil, ErrSourceNodeNotSet
@ -225,10 +225,10 @@ func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) {
func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
nodePubBytes := node.PubKeyBytes[:]
return c.db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
// First grab the nodes bucket which stores the mapping from
// pubKey to node information.
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -245,8 +245,8 @@ func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
})
}
func addLightningNode(tx *bbolt.Tx, node *LightningNode) error {
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
func addLightningNode(tx kvdb.RwTx, node *LightningNode) error {
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return err
}
@ -270,17 +270,17 @@ func addLightningNode(tx *bbolt.Tx, node *LightningNode) error {
// buckets using an existing database transaction. The returned boolean will be
// true if the updated policy belongs to node1, and false if the policy belonged
// to node2.
func updateEdgePolicy(tx *bbolt.Tx, edge *ChannelEdgePolicy) (bool, error) {
edges := tx.Bucket(edgeBucket)
if edges == nil {
func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy) (bool, error) {
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return false, ErrEdgeNotFound
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return false, ErrEdgeNotFound
}
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return false, err
}
@ -551,8 +551,8 @@ func (c *ChannelEdgePolicy) IsDisabled() bool {
lnwire.ChanUpdateDisabled
}
func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket,
updateIndex *bbolt.Bucket, node *LightningNode) error {
func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket,
updateIndex kvdb.RwBucket, node *LightningNode) error {
var (
scratch [16]byte
@ -680,7 +680,7 @@ func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket,
return nodeBucket.Put(nodePub, b.Bytes())
}
func fetchLightningNode(nodeBucket *bbolt.Bucket,
func fetchLightningNode(nodeBucket kvdb.ReadBucket,
nodePub []byte) (LightningNode, error) {
nodeBytes := nodeBucket.Get(nodePub)
@ -863,7 +863,7 @@ func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, error) {
return edgeInfo, nil
}
func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy,
func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy,
from, to []byte) error {
var edgeKey [33 + 8]byte
@ -943,7 +943,7 @@ func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy,
// in this bucket.
// Maintaining the bucket this way allows a fast retrieval of disabled
// channels, for example when prune is needed.
func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64,
func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
direction bool, disabled bool) error {
var disabledEdgeKey [8 + 1]byte
@ -968,7 +968,7 @@ func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64,
// putChanEdgePolicyUnknown marks the edge policy as unknown
// in the edges bucket.
func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64,
func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
from []byte) error {
var edgeKey [33 + 8]byte
@ -983,8 +983,8 @@ func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64,
return edges.Put(edgeKey[:], unknownPolicy)
}
func fetchChanEdgePolicy(edges *bbolt.Bucket, chanID []byte,
nodePub []byte, nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) {
func fetchChanEdgePolicy(edges kvdb.ReadBucket, chanID []byte,
nodePub []byte, nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) {
var edgeKey [33 + 8]byte
copy(edgeKey[:], nodePub)
@ -1084,7 +1084,7 @@ func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy,
}
func deserializeChanEdgePolicy(r io.Reader,
nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) {
nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) {
edge := &ChannelEdgePolicy{}

View File

@ -8,7 +8,7 @@ import (
"time"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/tlv"
@ -252,8 +252,8 @@ func validateInvoice(i *Invoice) error {
func (d *DB) FetchAllInvoices(pendingOnly bool) ([]Invoice, error) {
var invoices []Invoice
err := d.View(func(tx *bbolt.Tx) error {
invoiceB := tx.Bucket(invoiceBucket)
err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoiceB := tx.ReadBucket(invoiceBucket)
if invoiceB == nil {
return ErrNoInvoicesCreated
}

View File

@ -1,6 +1,8 @@
package migration_01_to_11
import "github.com/coreos/bbolt"
import (
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
var (
// metaBucket stores all the meta information concerning the state of
@ -21,8 +23,8 @@ type Meta struct {
// putMeta is an internal helper function used in order to allow callers to
// re-use a database transaction. See the publicly exported PutMeta method for
// more information.
func putMeta(meta *Meta, tx *bbolt.Tx) error {
metaBucket, err := tx.CreateBucketIfNotExists(metaBucket)
func putMeta(meta *Meta, tx kvdb.RwTx) error {
metaBucket, err := tx.CreateTopLevelBucket(metaBucket)
if err != nil {
return err
}
@ -30,7 +32,7 @@ func putMeta(meta *Meta, tx *bbolt.Tx) error {
return putDbVersion(metaBucket, meta)
}
func putDbVersion(metaBucket *bbolt.Bucket, meta *Meta) error {
func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) error {
scratch := make([]byte, 4)
byteOrder.PutUint32(scratch, meta.DbVersionNumber)
return metaBucket.Put(dbVersionKey, scratch)

View File

@ -3,8 +3,8 @@ package migration_01_to_11
import (
"testing"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// applyMigration is a helper test function that encapsulates the general steps
@ -49,7 +49,7 @@ func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB),
}()
// Apply migration.
err = cdb.Update(func(tx *bbolt.Tx) error {
err = kvdb.Update(cdb, func(tx kvdb.RwTx) error {
return migrationFunc(tx)
})
if err != nil {

View File

@ -7,7 +7,7 @@ import (
"io"
"sort"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -76,8 +76,8 @@ func (db *DB) addPayment(payment *outgoingPayment) error {
}
paymentBytes := b.Bytes()
return db.Batch(func(tx *bbolt.Tx) error {
payments, err := tx.CreateBucketIfNotExists(paymentBucket)
return kvdb.Update(db, func(tx kvdb.RwTx) error {
payments, err := tx.CreateTopLevelBucket(paymentBucket)
if err != nil {
return err
}
@ -104,8 +104,8 @@ func (db *DB) addPayment(payment *outgoingPayment) error {
func (db *DB) fetchAllPayments() ([]*outgoingPayment, error) {
var payments []*outgoingPayment
err := db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(paymentBucket)
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
bucket := tx.ReadBucket(paymentBucket)
if bucket == nil {
return ErrNoPaymentsCreated
}
@ -140,7 +140,7 @@ func (db *DB) fetchAllPayments() ([]*outgoingPayment, error) {
// NOTE: Deprecated. Kept around for migration purposes.
func (db *DB) fetchPaymentStatus(paymentHash [32]byte) (PaymentStatus, error) {
var paymentStatus = StatusUnknown
err := db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
var err error
paymentStatus, err = fetchPaymentStatusTx(tx, paymentHash)
return err
@ -158,11 +158,11 @@ func (db *DB) fetchPaymentStatus(paymentHash [32]byte) (PaymentStatus, error) {
// can be composed into other atomic operations.
//
// NOTE: Deprecated. Kept around for migration purposes.
func fetchPaymentStatusTx(tx *bbolt.Tx, paymentHash [32]byte) (PaymentStatus, error) {
func fetchPaymentStatusTx(tx kvdb.ReadTx, paymentHash [32]byte) (PaymentStatus, error) {
// The default status for all payments that aren't recorded in database.
var paymentStatus = StatusUnknown
bucket := tx.Bucket(paymentStatusBucket)
bucket := tx.ReadBucket(paymentStatusBucket)
if bucket == nil {
return paymentStatus, nil
}
@ -375,14 +375,14 @@ func deserializeHopMigration9(r io.Reader) (*Hop, error) {
func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) {
var payments []*Payment
err := db.View(func(tx *bbolt.Tx) error {
paymentsBucket := tx.Bucket(paymentsRootBucket)
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
paymentsBucket := tx.ReadBucket(paymentsRootBucket)
if paymentsBucket == nil {
return nil
}
return paymentsBucket.ForEach(func(k, v []byte) error {
bucket := paymentsBucket.Bucket(k)
bucket := paymentsBucket.NestedReadBucket(k)
if bucket == nil {
// We only expect sub-buckets to be found in
// this top-level bucket.
@ -401,13 +401,13 @@ func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) {
// payment has was possible. These will be found in a
// sub-bucket indexed by their sequence number if
// available.
dup := bucket.Bucket(paymentDuplicateBucket)
dup := bucket.NestedReadBucket(paymentDuplicateBucket)
if dup == nil {
return nil
}
return dup.ForEach(func(k, v []byte) error {
subBucket := dup.Bucket(k)
subBucket := dup.NestedReadBucket(k)
if subBucket == nil {
// We one bucket for each duplicate to
// be found.
@ -437,7 +437,7 @@ func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) {
return payments, nil
}
func fetchPaymentMigration9(bucket *bbolt.Bucket) (*Payment, error) {
func fetchPaymentMigration9(bucket kvdb.ReadBucket) (*Payment, error) {
var (
err error
p = &Payment{}

View File

@ -4,15 +4,15 @@ import (
"bytes"
"io"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// MigrateRouteSerialization migrates the way we serialize routes across the
// entire database. At the time of writing of this migration, this includes our
// payment attempts, as well as the payment results in mission control.
func MigrateRouteSerialization(tx *bbolt.Tx) error {
func MigrateRouteSerialization(tx kvdb.RwTx) error {
// First, we'll do all the payment attempts.
rootPaymentBucket := tx.Bucket(paymentsRootBucket)
rootPaymentBucket := tx.ReadWriteBucket(paymentsRootBucket)
if rootPaymentBucket == nil {
return nil
}
@ -36,7 +36,7 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
// Now that we have all the payment hashes, we can carry out the
// migration itself.
for _, payHash := range payHashes {
payHashBucket := rootPaymentBucket.Bucket(payHash)
payHashBucket := rootPaymentBucket.NestedReadWriteBucket(payHash)
// First, we'll migrate the main (non duplicate) payment to
// this hash.
@ -47,7 +47,7 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
// Now that we've migrated the main payment, we'll also check
// for any duplicate payments to the same payment hash.
dupBucket := payHashBucket.Bucket(paymentDuplicateBucket)
dupBucket := payHashBucket.NestedReadWriteBucket(paymentDuplicateBucket)
// If there's no dup bucket, then we can move on to the next
// payment.
@ -69,7 +69,7 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
// Now in this second pass, we'll re-serialize their duplicate
// payment attempts under the new encoding.
for _, seqNo := range dupSeqNos {
dupPayHashBucket := dupBucket.Bucket(seqNo)
dupPayHashBucket := dupBucket.NestedReadWriteBucket(seqNo)
err := migrateAttemptEncoding(tx, dupPayHashBucket)
if err != nil {
return err
@ -83,8 +83,8 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
"existing data")
resultsKey := []byte("missioncontrol-results")
err = tx.DeleteBucket(resultsKey)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(resultsKey)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
@ -95,7 +95,7 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
// migrateAttemptEncoding migrates payment attempts using the legacy format to
// the new format.
func migrateAttemptEncoding(tx *bbolt.Tx, payHashBucket *bbolt.Bucket) error {
func migrateAttemptEncoding(tx kvdb.RwTx, payHashBucket kvdb.RwBucket) error {
payAttemptBytes := payHashBucket.Get(paymentAttemptInfoKey)
if payAttemptBytes == nil {
return nil

View File

@ -8,7 +8,7 @@ import (
bitcoinCfg "github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/zpay32"
litecoinCfg "github.com/ltcsuite/ltcd/chaincfg"
@ -16,10 +16,10 @@ import (
// MigrateInvoices adds invoice htlcs and a separate cltv delta field to the
// invoices.
func MigrateInvoices(tx *bbolt.Tx) error {
func MigrateInvoices(tx kvdb.RwTx) error {
log.Infof("Migrating invoices to new invoice format")
invoiceB := tx.Bucket(invoiceBucket)
invoiceB := tx.ReadWriteBucket(invoiceBucket)
if invoiceB == nil {
return nil
}

View File

@ -8,7 +8,7 @@ import (
"github.com/btcsuite/btcd/btcec"
bitcoinCfg "github.com/btcsuite/btcd/chaincfg"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/zpay32"
litecoinCfg "github.com/ltcsuite/ltcd/chaincfg"
)
@ -26,8 +26,8 @@ var (
// beforeMigrationFuncV11 insert the test invoices in the database.
func beforeMigrationFuncV11(t *testing.T, d *DB, invoices []Invoice) {
err := d.Update(func(tx *bbolt.Tx) error {
invoicesBucket, err := tx.CreateBucketIfNotExists(
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
invoicesBucket, err := tx.CreateTopLevelBucket(
invoiceBucket,
)
if err != nil {

View File

@ -7,7 +7,7 @@ import (
"fmt"
"github.com/btcsuite/btcd/btcec"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -16,11 +16,11 @@ import (
// (one for nodes and one for edges) to keep track of the last time a node or
// edge was updated on the network. These new indexes allow us to implement the
// new graph sync protocol added.
func MigrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error {
func MigrateNodeAndEdgeUpdateIndex(tx kvdb.RwTx) error {
// First, we'll populating the node portion of the new index. Before we
// can add new values to the index, we'll first create the new bucket
// where these items will be housed.
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return fmt.Errorf("unable to create node bucket: %v", err)
}
@ -64,7 +64,7 @@ func MigrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error {
// With the set of nodes updated, we'll now update all edges to have a
// corresponding entry in the edge update index.
edges, err := tx.CreateBucketIfNotExists(edgeBucket)
edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil {
return fmt.Errorf("unable to create edge bucket: %v", err)
}
@ -121,8 +121,8 @@ func MigrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error {
// invoices an index in the add and/or the settle index. Additionally, all
// existing invoices will have their bytes padded out in order to encode the
// add+settle index as well as the amount paid.
func MigrateInvoiceTimeSeries(tx *bbolt.Tx) error {
invoices, err := tx.CreateBucketIfNotExists(invoiceBucket)
func MigrateInvoiceTimeSeries(tx kvdb.RwTx) error {
invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
if err != nil {
return err
}
@ -258,8 +258,8 @@ func MigrateInvoiceTimeSeries(tx *bbolt.Tx) error {
// migrateInvoiceTimeSeries migration. As at the time of writing, the
// OutgoingPayment struct embeddeds an instance of the Invoice struct. As a
// result, we also need to migrate the internal invoice to the new format.
func MigrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error {
payBucket := tx.Bucket(paymentBucket)
func MigrateInvoiceTimeSeriesOutgoingPayments(tx kvdb.RwTx) error {
payBucket := tx.ReadWriteBucket(paymentBucket)
if payBucket == nil {
return nil
}
@ -339,18 +339,18 @@ func MigrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error {
// bucket. It ensure that edges with unknown policies will also have an entry
// in the bucket. After the migration, there will be two edge entries for
// every channel, regardless of whether the policies are known.
func MigrateEdgePolicies(tx *bbolt.Tx) error {
nodes := tx.Bucket(nodeBucket)
func MigrateEdgePolicies(tx kvdb.RwTx) error {
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return nil
}
edges := tx.Bucket(edgeBucket)
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return nil
}
edgeIndex := edges.Bucket(edgeIndexBucket)
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil {
return nil
}
@ -411,10 +411,10 @@ func MigrateEdgePolicies(tx *bbolt.Tx) error {
// PaymentStatusesMigration is a database migration intended for adding payment
// statuses for each existing payment entity in bucket to be able control
// transitions of statuses and prevent cases such as double payment
func PaymentStatusesMigration(tx *bbolt.Tx) error {
func PaymentStatusesMigration(tx kvdb.RwTx) error {
// Get the bucket dedicated to storing statuses of payments,
// where a key is payment hash, value is payment status.
paymentStatuses, err := tx.CreateBucketIfNotExists(paymentStatusBucket)
paymentStatuses, err := tx.CreateTopLevelBucket(paymentStatusBucket)
if err != nil {
return err
}
@ -422,7 +422,7 @@ func PaymentStatusesMigration(tx *bbolt.Tx) error {
log.Infof("Migrating database to support payment statuses")
circuitAddKey := []byte("circuit-adds")
circuits := tx.Bucket(circuitAddKey)
circuits := tx.ReadWriteBucket(circuitAddKey)
if circuits != nil {
log.Infof("Marking all known circuits with status InFlight")
@ -455,7 +455,7 @@ func PaymentStatusesMigration(tx *bbolt.Tx) error {
log.Infof("Marking all existing payments with status Completed")
// Get the bucket dedicated to storing payments
bucket := tx.Bucket(paymentBucket)
bucket := tx.ReadWriteBucket(paymentBucket)
if bucket == nil {
return nil
}
@ -498,14 +498,14 @@ func PaymentStatusesMigration(tx *bbolt.Tx) error {
// migration also fixes the case where the public keys within edge policies were
// being serialized with an extra byte, causing an even greater error when
// attempting to perform the offset calculation described earlier.
func MigratePruneEdgeUpdateIndex(tx *bbolt.Tx) error {
func MigratePruneEdgeUpdateIndex(tx kvdb.RwTx) error {
// To begin the migration, we'll retrieve the update index bucket. If it
// does not exist, we have nothing left to do so we can simply exit.
edges := tx.Bucket(edgeBucket)
edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil {
return nil
}
edgeUpdateIndex := edges.Bucket(edgeUpdateIndexBucket)
edgeUpdateIndex := edges.NestedReadWriteBucket(edgeUpdateIndexBucket)
if edgeUpdateIndex == nil {
return nil
}
@ -521,7 +521,7 @@ func MigratePruneEdgeUpdateIndex(tx *bbolt.Tx) error {
return fmt.Errorf("unable to create/fetch edge index " +
"bucket")
}
nodes, err := tx.CreateBucketIfNotExists(nodeBucket)
nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil {
return fmt.Errorf("unable to make node bucket")
}
@ -612,8 +612,8 @@ func MigratePruneEdgeUpdateIndex(tx *bbolt.Tx) error {
// MigrateOptionalChannelCloseSummaryFields migrates the serialized format of
// ChannelCloseSummary to a format where optional fields' presence is indicated
// with boolean markers.
func MigrateOptionalChannelCloseSummaryFields(tx *bbolt.Tx) error {
closedChanBucket := tx.Bucket(closedChannelBucket)
func MigrateOptionalChannelCloseSummaryFields(tx kvdb.RwTx) error {
closedChanBucket := tx.ReadWriteBucket(closedChannelBucket)
if closedChanBucket == nil {
return nil
}
@ -671,11 +671,11 @@ var messageStoreBucket = []byte("message-store")
// MigrateGossipMessageStoreKeys migrates the key format for gossip messages
// found in the message store to a new one that takes into consideration the of
// the message being stored.
func MigrateGossipMessageStoreKeys(tx *bbolt.Tx) error {
func MigrateGossipMessageStoreKeys(tx kvdb.RwTx) error {
// We'll start by retrieving the bucket in which these messages are
// stored within. If there isn't one, there's nothing left for us to do
// so we can avoid the migration.
messageStore := tx.Bucket(messageStoreBucket)
messageStore := tx.ReadWriteBucket(messageStoreBucket)
if messageStore == nil {
return nil
}
@ -747,10 +747,10 @@ func MigrateGossipMessageStoreKeys(tx *bbolt.Tx) error {
// InFlight (we have no PaymentAttemptInfo available for pre-migration
// payments) we delete those statuses, so only Completed payments remain in the
// new bucket structure.
func MigrateOutgoingPayments(tx *bbolt.Tx) error {
func MigrateOutgoingPayments(tx kvdb.RwTx) error {
log.Infof("Migrating outgoing payments to new bucket structure")
oldPayments := tx.Bucket(paymentBucket)
oldPayments := tx.ReadWriteBucket(paymentBucket)
// Return early if there are no payments to migrate.
if oldPayments == nil {
@ -758,7 +758,7 @@ func MigrateOutgoingPayments(tx *bbolt.Tx) error {
return nil
}
newPayments, err := tx.CreateBucket(paymentsRootBucket)
newPayments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
if err != nil {
return err
}
@ -767,7 +767,7 @@ func MigrateOutgoingPayments(tx *bbolt.Tx) error {
// only attempt to fetch it if needed.
sourcePub := func() ([33]byte, error) {
var pub [33]byte
nodes := tx.Bucket(nodeBucket)
nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil {
return pub, ErrGraphNotFound
}
@ -862,8 +862,8 @@ func MigrateOutgoingPayments(tx *bbolt.Tx) error {
// from a database containing duplicate payments to a payment
// hash. To keep this information, we store such duplicate
// payments in a sub-bucket.
if err == bbolt.ErrBucketExists {
pHashBucket := newPayments.Bucket(paymentHash[:])
if err == kvdb.ErrBucketExists {
pHashBucket := newPayments.NestedReadWriteBucket(paymentHash[:])
// Create a bucket for duplicate payments within this
// payment hash's bucket.
@ -922,14 +922,14 @@ func MigrateOutgoingPayments(tx *bbolt.Tx) error {
// Now we delete the old buckets. Deleting the payment status buckets
// deletes all payment statuses other than Complete.
err = tx.DeleteBucket(paymentStatusBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(paymentStatusBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
// Finally delete the old payment bucket.
err = tx.DeleteBucket(paymentBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = tx.DeleteTopLevelBucket(paymentBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}

View File

@ -11,9 +11,9 @@ import (
"time"
"github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -59,8 +59,8 @@ func TestPaymentStatusesMigration(t *testing.T) {
// locally-sourced payment should end up with an InFlight
// status, while the other should remain unchanged, which
// defaults to Grounded.
err = d.Update(func(tx *bbolt.Tx) error {
circuits, err := tx.CreateBucketIfNotExists(
err = kvdb.Update(d, func(tx kvdb.RwTx) error {
circuits, err := tx.CreateTopLevelBucket(
[]byte("circuit-adds"),
)
if err != nil {
@ -377,8 +377,8 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) {
// Get the old serialization format for this test's
// close summary, and it to the closed channel bucket.
old := test.oldSerialization(test.closeSummary)
err = d.Update(func(tx *bbolt.Tx) error {
closedChanBucket, err := tx.CreateBucketIfNotExists(
err = kvdb.Update(d, func(tx kvdb.RwTx) error {
closedChanBucket, err := tx.CreateTopLevelBucket(
closedChannelBucket,
)
if err != nil {
@ -404,8 +404,8 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) {
newSerialization := b.Bytes()
var dbSummary []byte
err = d.View(func(tx *bbolt.Tx) error {
closedChanBucket := tx.Bucket(closedChannelBucket)
err = kvdb.View(d, func(tx kvdb.ReadTx) error {
closedChanBucket := tx.ReadBucket(closedChannelBucket)
if closedChanBucket == nil {
return errors.New("unable to find bucket")
}
@ -482,8 +482,8 @@ func TestMigrateGossipMessageStoreKeys(t *testing.T) {
t.Fatalf("unable to serialize message: %v", err)
}
err := db.Update(func(tx *bbolt.Tx) error {
messageStore, err := tx.CreateBucketIfNotExists(
err := kvdb.Update(db, func(tx kvdb.RwTx) error {
messageStore, err := tx.CreateTopLevelBucket(
messageStoreBucket,
)
if err != nil {
@ -503,8 +503,8 @@ func TestMigrateGossipMessageStoreKeys(t *testing.T) {
// 3. The message matches the original.
afterMigration := func(db *DB) {
var rawMsg []byte
err := db.View(func(tx *bbolt.Tx) error {
messageStore := tx.Bucket(messageStoreBucket)
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
messageStore := tx.ReadBucket(messageStoreBucket)
if messageStore == nil {
return errors.New("message store bucket not " +
"found")
@ -666,8 +666,8 @@ func TestOutgoingPaymentsMigration(t *testing.T) {
// Finally, check that the payment sequence number is updated
// to reflect the migrated payments.
err = d.View(func(tx *bbolt.Tx) error {
payments := tx.Bucket(paymentsRootBucket)
err = kvdb.Update(d, func(tx kvdb.RwTx) error {
payments := tx.ReadWriteBucket(paymentsRootBucket)
if payments == nil {
return fmt.Errorf("payments bucket not found")
}
@ -746,8 +746,8 @@ func TestPaymentRouteSerialization(t *testing.T) {
// We'll first add a series of fake payments, using the existing legacy
// serialization format.
beforeMigrationFunc := func(d *DB) {
err := d.Update(func(tx *bbolt.Tx) error {
paymentsBucket, err := tx.CreateBucket(
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
paymentsBucket, err := tx.CreateTopLevelBucket(
paymentsRootBucket,
)
if err != nil {
@ -798,7 +798,7 @@ func TestPaymentRouteSerialization(t *testing.T) {
// the proper bucket. If this is the duplicate
// payment, then we'll grab the dup bucket,
// otherwise, we'll use the top level bucket.
var payHashBucket *bbolt.Bucket
var payHashBucket kvdb.RwBucket
if i < numPayments-1 {
payHashBucket, err = paymentsBucket.CreateBucket(
payInfo.PaymentHash[:],
@ -807,7 +807,7 @@ func TestPaymentRouteSerialization(t *testing.T) {
t.Fatalf("unable to create payments bucket: %v", err)
}
} else {
payHashBucket = paymentsBucket.Bucket(
payHashBucket = paymentsBucket.NestedReadWriteBucket(
payInfo.PaymentHash[:],
)
dupPayBucket, err := payHashBucket.CreateBucket(

View File

@ -1,12 +1,10 @@
package migration_01_to_11
import (
"github.com/coreos/bbolt"
)
import "github.com/lightningnetwork/lnd/channeldb/kvdb"
// fetchPaymentStatus fetches the payment status of the payment. If the payment
// isn't found, it will default to "StatusUnknown".
func fetchPaymentStatus(bucket *bbolt.Bucket) PaymentStatus {
func fetchPaymentStatus(bucket kvdb.ReadBucket) PaymentStatus {
if bucket.Get(paymentSettleInfoKey) != nil {
return StatusSucceeded
}

View File

@ -11,7 +11,7 @@ import (
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/tlv"
@ -254,14 +254,14 @@ type Payment struct {
func (db *DB) FetchPayments() ([]*Payment, error) {
var payments []*Payment
err := db.View(func(tx *bbolt.Tx) error {
paymentsBucket := tx.Bucket(paymentsRootBucket)
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
paymentsBucket := tx.ReadBucket(paymentsRootBucket)
if paymentsBucket == nil {
return nil
}
return paymentsBucket.ForEach(func(k, v []byte) error {
bucket := paymentsBucket.Bucket(k)
bucket := paymentsBucket.NestedReadBucket(k)
if bucket == nil {
// We only expect sub-buckets to be found in
// this top-level bucket.
@ -280,13 +280,13 @@ func (db *DB) FetchPayments() ([]*Payment, error) {
// payment has was possible. These will be found in a
// sub-bucket indexed by their sequence number if
// available.
dup := bucket.Bucket(paymentDuplicateBucket)
dup := bucket.NestedReadBucket(paymentDuplicateBucket)
if dup == nil {
return nil
}
return dup.ForEach(func(k, v []byte) error {
subBucket := dup.Bucket(k)
subBucket := dup.NestedReadBucket(k)
if subBucket == nil {
// We one bucket for each duplicate to
// be found.
@ -316,7 +316,7 @@ func (db *DB) FetchPayments() ([]*Payment, error) {
return payments, nil
}
func fetchPayment(bucket *bbolt.Bucket) (*Payment, error) {
func fetchPayment(bucket kvdb.ReadBucket) (*Payment, error) {
var (
err error
p = &Payment{}

View File

@ -6,13 +6,13 @@ import (
"os"
"testing"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// MakeDB creates a new instance of the ChannelDB for testing purposes. A
// callback which cleans up the created temporary directories is also returned
// and intended to be executed after the test completes.
func MakeDB() (*bbolt.DB, func(), error) {
func MakeDB() (kvdb.Backend, func(), error) {
// Create temporary database for mission control.
file, err := ioutil.TempFile("", "*.db")
if err != nil {
@ -20,7 +20,7 @@ func MakeDB() (*bbolt.DB, func(), error) {
}
dbPath := file.Name()
db, err := bbolt.Open(dbPath, 0600, nil)
db, err := kvdb.Open(kvdb.BoltBackendName, dbPath, true)
if err != nil {
return nil, nil, err
}
@ -36,7 +36,7 @@ func MakeDB() (*bbolt.DB, func(), error) {
// ApplyMigration is a helper test function that encapsulates the general steps
// which are needed to properly check the result of applying migration function.
func ApplyMigration(t *testing.T,
beforeMigration, afterMigration, migrationFunc func(tx *bbolt.Tx) error,
beforeMigration, afterMigration, migrationFunc func(tx kvdb.RwTx) error,
shouldFail bool) {
cdb, cleanUp, err := MakeDB()
@ -47,7 +47,7 @@ func ApplyMigration(t *testing.T,
// beforeMigration usually used for populating the database
// with test data.
err = cdb.Update(beforeMigration)
err = kvdb.Update(cdb, beforeMigration)
if err != nil {
t.Fatal(err)
}
@ -65,14 +65,14 @@ func ApplyMigration(t *testing.T,
// afterMigration usually used for checking the database state and
// throwing the error if something went wrong.
err = cdb.Update(afterMigration)
err = kvdb.Update(cdb, afterMigration)
if err != nil {
t.Fatal(err)
}
}()
// Apply migration.
err = cdb.Update(migrationFunc)
err = kvdb.Update(cdb, migrationFunc)
if err != nil {
t.Fatal(err)
}

View File

@ -7,7 +7,7 @@ import (
"fmt"
"strings"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
// DumpDB dumps go code describing the contents of the database to stdout. This
@ -21,8 +21,8 @@ import (
// hex("1111"): hex("5783492373"),
// },
// }
func DumpDB(tx *bbolt.Tx, rootKey []byte) error {
bucket := tx.Bucket(rootKey)
func DumpDB(tx kvdb.ReadTx, rootKey []byte) error {
bucket := tx.ReadBucket(rootKey)
if bucket == nil {
return fmt.Errorf("bucket %v not found", string(rootKey))
}
@ -30,13 +30,13 @@ func DumpDB(tx *bbolt.Tx, rootKey []byte) error {
return dumpBucket(bucket)
}
func dumpBucket(bucket *bbolt.Bucket) error {
func dumpBucket(bucket kvdb.ReadBucket) error {
fmt.Printf("map[string]interface{} {\n")
err := bucket.ForEach(func(k, v []byte) error {
key := toString(k)
fmt.Printf("%v: ", key)
subBucket := bucket.Bucket(k)
subBucket := bucket.NestedReadBucket(k)
if subBucket != nil {
err := dumpBucket(subBucket)
if err != nil {
@ -58,8 +58,8 @@ func dumpBucket(bucket *bbolt.Bucket) error {
}
// RestoreDB primes the database with the given data set.
func RestoreDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error {
bucket, err := tx.CreateBucket(rootKey)
func RestoreDB(tx kvdb.RwTx, rootKey []byte, data map[string]interface{}) error {
bucket, err := tx.CreateTopLevelBucket(rootKey)
if err != nil {
return err
}
@ -67,7 +67,7 @@ func RestoreDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error
return restoreDB(bucket, data)
}
func restoreDB(bucket *bbolt.Bucket, data map[string]interface{}) error {
func restoreDB(bucket kvdb.RwBucket, data map[string]interface{}) error {
for k, v := range data {
key := []byte(k)
@ -100,8 +100,8 @@ func restoreDB(bucket *bbolt.Bucket, data map[string]interface{}) error {
}
// VerifyDB verifies the database against the given data set.
func VerifyDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error {
bucket := tx.Bucket(rootKey)
func VerifyDB(tx kvdb.ReadTx, rootKey []byte, data map[string]interface{}) error {
bucket := tx.ReadBucket(rootKey)
if bucket == nil {
return fmt.Errorf("bucket %v not found", string(rootKey))
}
@ -109,7 +109,7 @@ func VerifyDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error {
return verifyDB(bucket, data)
}
func verifyDB(bucket *bbolt.Bucket, data map[string]interface{}) error {
func verifyDB(bucket kvdb.ReadBucket, data map[string]interface{}) error {
for k, v := range data {
key := []byte(k)
@ -126,7 +126,7 @@ func verifyDB(bucket *bbolt.Bucket, data map[string]interface{}) error {
// Key contains a sub-bucket.
case map[string]interface{}:
subBucket := bucket.Bucket(key)
subBucket := bucket.NestedReadBucket(key)
if subBucket == nil {
return fmt.Errorf("bucket %v not found", k)
}

View File

@ -8,7 +8,7 @@ import (
"github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
var (
@ -101,8 +101,8 @@ func (l *LinkNode) Sync() error {
// Finally update the database by storing the link node and updating
// any relevant indexes.
return l.db.Update(func(tx *bbolt.Tx) error {
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
return kvdb.Update(l.db, func(tx kvdb.RwTx) error {
nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket)
if nodeMetaBucket == nil {
return ErrLinkNodesNotFound
}
@ -114,7 +114,7 @@ func (l *LinkNode) Sync() error {
// putLinkNode serializes then writes the encoded version of the passed link
// node into the nodeMetaBucket. This function is provided in order to allow
// the ability to re-use a database transaction across many operations.
func putLinkNode(nodeMetaBucket *bbolt.Bucket, l *LinkNode) error {
func putLinkNode(nodeMetaBucket kvdb.RwBucket, l *LinkNode) error {
// First serialize the LinkNode into its raw-bytes encoding.
var b bytes.Buffer
if err := serializeLinkNode(&b, l); err != nil {
@ -130,13 +130,13 @@ func putLinkNode(nodeMetaBucket *bbolt.Bucket, l *LinkNode) error {
// DeleteLinkNode removes the link node with the given identity from the
// database.
func (db *DB) DeleteLinkNode(identity *btcec.PublicKey) error {
return db.Update(func(tx *bbolt.Tx) error {
return kvdb.Update(db, func(tx kvdb.RwTx) error {
return db.deleteLinkNode(tx, identity)
})
}
func (db *DB) deleteLinkNode(tx *bbolt.Tx, identity *btcec.PublicKey) error {
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
func (db *DB) deleteLinkNode(tx kvdb.RwTx, identity *btcec.PublicKey) error {
nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket)
if nodeMetaBucket == nil {
return ErrLinkNodesNotFound
}
@ -150,7 +150,7 @@ func (db *DB) deleteLinkNode(tx *bbolt.Tx, identity *btcec.PublicKey) error {
// key cannot be found, then ErrNodeNotFound if returned.
func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, error) {
var linkNode *LinkNode
err := db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
node, err := fetchLinkNode(tx, identity)
if err != nil {
return err
@ -163,10 +163,10 @@ func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, error) {
return linkNode, err
}
func fetchLinkNode(tx *bbolt.Tx, targetPub *btcec.PublicKey) (*LinkNode, error) {
func fetchLinkNode(tx kvdb.ReadTx, targetPub *btcec.PublicKey) (*LinkNode, error) {
// First fetch the bucket for storing node metadata, bailing out early
// if it hasn't been created yet.
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
if nodeMetaBucket == nil {
return nil, ErrLinkNodesNotFound
}
@ -191,7 +191,7 @@ func fetchLinkNode(tx *bbolt.Tx, targetPub *btcec.PublicKey) (*LinkNode, error)
// whom we have active channels with.
func (db *DB) FetchAllLinkNodes() ([]*LinkNode, error) {
var linkNodes []*LinkNode
err := db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
nodes, err := db.fetchAllLinkNodes(tx)
if err != nil {
return err
@ -209,8 +209,8 @@ func (db *DB) FetchAllLinkNodes() ([]*LinkNode, error) {
// fetchAllLinkNodes uses an existing database transaction to fetch all nodes
// with whom we have active channels with.
func (db *DB) fetchAllLinkNodes(tx *bbolt.Tx) ([]*LinkNode, error) {
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
func (db *DB) fetchAllLinkNodes(tx kvdb.ReadTx) ([]*LinkNode, error) {
nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
if nodeMetaBucket == nil {
return nil, ErrLinkNodesNotFound
}

View File

@ -6,7 +6,7 @@ import (
"errors"
"fmt"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
)
@ -65,7 +65,7 @@ func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash,
infoBytes := b.Bytes()
var updateErr error
err := p.db.Batch(func(tx *bbolt.Tx) error {
err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
// Reset the update error, to avoid carrying over an error
// from a previous execution of the batched db transaction.
updateErr = nil
@ -130,8 +130,8 @@ func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash,
// We'll delete any lingering HTLCs to start with, in case we
// are initializing a payment that was attempted earlier, but
// left in a state where we could retry.
err = bucket.DeleteBucket(paymentHtlcsBucket)
if err != nil && err != bbolt.ErrBucketNotFound {
err = bucket.DeleteNestedBucket(paymentHtlcsBucket)
if err != nil && err != kvdb.ErrBucketNotFound {
return err
}
@ -162,9 +162,8 @@ func (p *PaymentControl) RegisterAttempt(paymentHash lntypes.Hash,
htlcIDBytes := make([]byte, 8)
binary.BigEndian.PutUint64(htlcIDBytes, attempt.AttemptID)
return p.db.Update(func(tx *bbolt.Tx) error {
// Get the payment bucket to register this new attempt in.
bucket, err := fetchPaymentBucket(tx, paymentHash)
return kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
if err != nil {
return err
}
@ -234,10 +233,10 @@ func (p *PaymentControl) updateHtlcKey(paymentHash lntypes.Hash,
binary.BigEndian.PutUint64(htlcIDBytes, attemptID)
var payment *MPPayment
err := p.db.Batch(func(tx *bbolt.Tx) error {
// Fetch bucket that contains all information for the payment
// with this hash.
bucket, err := fetchPaymentBucket(tx, paymentHash)
err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
payment = nil
bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
if err != nil {
return err
}
@ -247,12 +246,12 @@ func (p *PaymentControl) updateHtlcKey(paymentHash lntypes.Hash,
return err
}
htlcsBucket := bucket.Bucket(paymentHtlcsBucket)
htlcsBucket := bucket.NestedReadWriteBucket(paymentHtlcsBucket)
if htlcsBucket == nil {
return fmt.Errorf("htlcs bucket not found")
}
htlcBucket := htlcsBucket.Bucket(htlcIDBytes)
htlcBucket := htlcsBucket.NestedReadWriteBucket(htlcIDBytes)
if htlcBucket == nil {
return fmt.Errorf("HTLC with ID %v not registered",
attemptID)
@ -286,13 +285,13 @@ func (p *PaymentControl) Fail(paymentHash lntypes.Hash,
updateErr error
payment *MPPayment
)
err := p.db.Batch(func(tx *bbolt.Tx) error {
err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
// Reset the update error, to avoid carrying over an error
// from a previous execution of the batched db transaction.
updateErr = nil
payment = nil
bucket, err := fetchPaymentBucket(tx, paymentHash)
bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
if err == ErrPaymentNotInitiated {
updateErr = ErrPaymentNotInitiated
return nil
@ -341,7 +340,7 @@ func (p *PaymentControl) FetchPayment(paymentHash lntypes.Hash) (
*MPPayment, error) {
var payment *MPPayment
err := p.db.View(func(tx *bbolt.Tx) error {
err := kvdb.View(p.db, func(tx kvdb.ReadTx) error {
bucket, err := fetchPaymentBucket(tx, paymentHash)
if err != nil {
return err
@ -360,10 +359,10 @@ func (p *PaymentControl) FetchPayment(paymentHash lntypes.Hash) (
// createPaymentBucket creates or fetches the sub-bucket assigned to this
// payment hash.
func createPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) (
*bbolt.Bucket, error) {
func createPaymentBucket(tx kvdb.RwTx, paymentHash lntypes.Hash) (
kvdb.RwBucket, error) {
payments, err := tx.CreateBucketIfNotExists(paymentsRootBucket)
payments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
if err != nil {
return nil, err
}
@ -373,15 +372,34 @@ func createPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) (
// fetchPaymentBucket fetches the sub-bucket assigned to this payment hash. If
// the bucket does not exist, it returns ErrPaymentNotInitiated.
func fetchPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) (
*bbolt.Bucket, error) {
func fetchPaymentBucket(tx kvdb.ReadTx, paymentHash lntypes.Hash) (
kvdb.ReadBucket, error) {
payments := tx.Bucket(paymentsRootBucket)
payments := tx.ReadBucket(paymentsRootBucket)
if payments == nil {
return nil, ErrPaymentNotInitiated
}
bucket := payments.Bucket(paymentHash[:])
bucket := payments.NestedReadBucket(paymentHash[:])
if bucket == nil {
return nil, ErrPaymentNotInitiated
}
return bucket, nil
}
// fetchPaymentBucketUpdate is identical to fetchPaymentBucket, but it returns a
// bucket that can be written to.
func fetchPaymentBucketUpdate(tx kvdb.RwTx, paymentHash lntypes.Hash) (
kvdb.RwBucket, error) {
payments := tx.ReadWriteBucket(paymentsRootBucket)
if payments == nil {
return nil, ErrPaymentNotInitiated
}
bucket := payments.NestedReadWriteBucket(paymentHash[:])
if bucket == nil {
return nil, ErrPaymentNotInitiated
}
@ -391,8 +409,8 @@ func fetchPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) (
// nextPaymentSequence returns the next sequence number to store for a new
// payment.
func nextPaymentSequence(tx *bbolt.Tx) ([]byte, error) {
payments, err := tx.CreateBucketIfNotExists(paymentsRootBucket)
func nextPaymentSequence(tx kvdb.RwTx) ([]byte, error) {
payments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
if err != nil {
return nil, err
}
@ -409,8 +427,8 @@ func nextPaymentSequence(tx *bbolt.Tx) ([]byte, error) {
// fetchPaymentStatus fetches the payment status of the payment. If the payment
// isn't found, it will default to "StatusUnknown".
func fetchPaymentStatus(bucket *bbolt.Bucket) (PaymentStatus, error) {
htlcsBucket := bucket.Bucket(paymentHtlcsBucket)
func fetchPaymentStatus(bucket kvdb.ReadBucket) (PaymentStatus, error) {
htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket)
if htlcsBucket != nil {
htlcs, err := fetchHtlcAttempts(htlcsBucket)
if err != nil {
@ -424,7 +442,6 @@ func fetchPaymentStatus(bucket *bbolt.Bucket) (PaymentStatus, error) {
return StatusSucceeded, nil
}
}
}
if bucket.Get(paymentFailInfoKey) != nil {
@ -441,7 +458,7 @@ func fetchPaymentStatus(bucket *bbolt.Bucket) (PaymentStatus, error) {
// ensureInFlight checks whether the payment found in the given bucket has
// status InFlight, and returns an error otherwise. This should be used to
// ensure we only mark in-flight payments as succeeded or failed.
func ensureInFlight(bucket *bbolt.Bucket) error {
func ensureInFlight(bucket kvdb.ReadBucket) error {
paymentStatus, err := fetchPaymentStatus(bucket)
if err != nil {
return err
@ -486,14 +503,14 @@ type InFlightPayment struct {
// FetchInFlightPayments returns all payments with status InFlight.
func (p *PaymentControl) FetchInFlightPayments() ([]*InFlightPayment, error) {
var inFlights []*InFlightPayment
err := p.db.View(func(tx *bbolt.Tx) error {
payments := tx.Bucket(paymentsRootBucket)
err := kvdb.View(p.db, func(tx kvdb.ReadTx) error {
payments := tx.ReadBucket(paymentsRootBucket)
if payments == nil {
return nil
}
return payments.ForEach(func(k, _ []byte) error {
bucket := payments.Bucket(k)
bucket := payments.NestedReadBucket(k)
if bucket == nil {
return fmt.Errorf("non bucket element")
}
@ -523,7 +540,9 @@ func (p *PaymentControl) FetchInFlightPayments() ([]*InFlightPayment, error) {
return err
}
htlcsBucket := bucket.Bucket(paymentHtlcsBucket)
htlcsBucket := bucket.NestedReadBucket(
paymentHtlcsBucket,
)
if htlcsBucket == nil {
return nil
}

View File

@ -9,7 +9,7 @@ import (
"time"
"github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/record"
@ -200,14 +200,14 @@ type PaymentCreationInfo struct {
func (db *DB) FetchPayments() ([]*MPPayment, error) {
var payments []*MPPayment
err := db.View(func(tx *bbolt.Tx) error {
paymentsBucket := tx.Bucket(paymentsRootBucket)
err := kvdb.View(db, func(tx kvdb.ReadTx) error {
paymentsBucket := tx.ReadBucket(paymentsRootBucket)
if paymentsBucket == nil {
return nil
}
return paymentsBucket.ForEach(func(k, v []byte) error {
bucket := paymentsBucket.Bucket(k)
bucket := paymentsBucket.NestedReadBucket(k)
if bucket == nil {
// We only expect sub-buckets to be found in
// this top-level bucket.
@ -232,7 +232,6 @@ func (db *DB) FetchPayments() ([]*MPPayment, error) {
}
payments = append(payments, duplicatePayments...)
return nil
})
})
@ -248,7 +247,7 @@ func (db *DB) FetchPayments() ([]*MPPayment, error) {
return payments, nil
}
func fetchPayment(bucket *bbolt.Bucket) (*MPPayment, error) {
func fetchPayment(bucket kvdb.ReadBucket) (*MPPayment, error) {
seqBytes := bucket.Get(paymentSequenceKey)
if seqBytes == nil {
return nil, fmt.Errorf("sequence number not found")
@ -276,7 +275,7 @@ func fetchPayment(bucket *bbolt.Bucket) (*MPPayment, error) {
}
var htlcs []HTLCAttempt
htlcsBucket := bucket.Bucket(paymentHtlcsBucket)
htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket)
if htlcsBucket != nil {
// Get the payment attempts. This can be empty.
htlcs, err = fetchHtlcAttempts(htlcsBucket)
@ -304,12 +303,12 @@ func fetchPayment(bucket *bbolt.Bucket) (*MPPayment, error) {
// fetchHtlcAttempts retrives all htlc attempts made for the payment found in
// the given bucket.
func fetchHtlcAttempts(bucket *bbolt.Bucket) ([]HTLCAttempt, error) {
func fetchHtlcAttempts(bucket kvdb.ReadBucket) ([]HTLCAttempt, error) {
htlcs := make([]HTLCAttempt, 0)
err := bucket.ForEach(func(k, _ []byte) error {
aid := byteOrder.Uint64(k)
htlcBucket := bucket.Bucket(k)
htlcBucket := bucket.NestedReadBucket(k)
attemptInfo, err := fetchHtlcAttemptInfo(
htlcBucket,
@ -347,7 +346,7 @@ func fetchHtlcAttempts(bucket *bbolt.Bucket) ([]HTLCAttempt, error) {
// fetchHtlcAttemptInfo fetches the payment attempt info for this htlc from the
// bucket.
func fetchHtlcAttemptInfo(bucket *bbolt.Bucket) (*HTLCAttemptInfo, error) {
func fetchHtlcAttemptInfo(bucket kvdb.ReadBucket) (*HTLCAttemptInfo, error) {
b := bucket.Get(htlcAttemptInfoKey)
if b == nil {
return nil, errNoAttemptInfo
@ -359,7 +358,7 @@ func fetchHtlcAttemptInfo(bucket *bbolt.Bucket) (*HTLCAttemptInfo, error) {
// fetchHtlcSettleInfo retrieves the settle info for the htlc. If the htlc isn't
// settled, nil is returned.
func fetchHtlcSettleInfo(bucket *bbolt.Bucket) (*HTLCSettleInfo, error) {
func fetchHtlcSettleInfo(bucket kvdb.ReadBucket) (*HTLCSettleInfo, error) {
b := bucket.Get(htlcSettleInfoKey)
if b == nil {
// Settle info is optional.
@ -372,7 +371,7 @@ func fetchHtlcSettleInfo(bucket *bbolt.Bucket) (*HTLCSettleInfo, error) {
// fetchHtlcFailInfo retrieves the failure info for the htlc. If the htlc hasn't
// failed, nil is returned.
func fetchHtlcFailInfo(bucket *bbolt.Bucket) (*HTLCFailInfo, error) {
func fetchHtlcFailInfo(bucket kvdb.ReadBucket) (*HTLCFailInfo, error) {
b := bucket.Get(htlcFailInfoKey)
if b == nil {
// Fail info is optional.
@ -385,15 +384,15 @@ func fetchHtlcFailInfo(bucket *bbolt.Bucket) (*HTLCFailInfo, error) {
// DeletePayments deletes all completed and failed payments from the DB.
func (db *DB) DeletePayments() error {
return db.Update(func(tx *bbolt.Tx) error {
payments := tx.Bucket(paymentsRootBucket)
return kvdb.Update(db, func(tx kvdb.RwTx) error {
payments := tx.ReadWriteBucket(paymentsRootBucket)
if payments == nil {
return nil
}
var deleteBuckets [][]byte
err := payments.ForEach(func(k, _ []byte) error {
bucket := payments.Bucket(k)
bucket := payments.NestedReadWriteBucket(k)
if bucket == nil {
// We only expect sub-buckets to be found in
// this top-level bucket.
@ -420,7 +419,7 @@ func (db *DB) DeletePayments() error {
}
for _, k := range deleteBuckets {
if err := payments.DeleteBucket(k); err != nil {
if err := payments.DeleteNestedBucket(k); err != nil {
return err
}
}

View File

@ -8,8 +8,8 @@ import (
"bytes"
"github.com/coreos/bbolt"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -61,12 +61,12 @@ func (s *WaitingProofStore) Add(proof *WaitingProof) error {
s.mu.Lock()
defer s.mu.Unlock()
err := s.db.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(s.db, func(tx kvdb.RwTx) error {
var err error
var b bytes.Buffer
// Get or create the bucket.
bucket, err := tx.CreateBucketIfNotExists(waitingProofsBucketKey)
bucket, err := tx.CreateTopLevelBucket(waitingProofsBucketKey)
if err != nil {
return err
}
@ -100,9 +100,9 @@ func (s *WaitingProofStore) Remove(key WaitingProofKey) error {
return ErrWaitingProofNotFound
}
err := s.db.Update(func(tx *bbolt.Tx) error {
err := kvdb.Update(s.db, func(tx kvdb.RwTx) error {
// Get or create the top bucket.
bucket := tx.Bucket(waitingProofsBucketKey)
bucket := tx.ReadWriteBucket(waitingProofsBucketKey)
if bucket == nil {
return ErrWaitingProofNotFound
}
@ -123,8 +123,8 @@ func (s *WaitingProofStore) Remove(key WaitingProofKey) error {
// ForAll iterates thought all waiting proofs and passing the waiting proof
// in the given callback.
func (s *WaitingProofStore) ForAll(cb func(*WaitingProof) error) error {
return s.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(waitingProofsBucketKey)
return kvdb.View(s.db, func(tx kvdb.ReadTx) error {
bucket := tx.ReadBucket(waitingProofsBucketKey)
if bucket == nil {
return ErrWaitingProofNotFound
}
@ -158,8 +158,8 @@ func (s *WaitingProofStore) Get(key WaitingProofKey) (*WaitingProof, error) {
return nil, ErrWaitingProofNotFound
}
err := s.db.View(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(waitingProofsBucketKey)
err := kvdb.View(s.db, func(tx kvdb.ReadTx) error {
bucket := tx.ReadBucket(waitingProofsBucketKey)
if bucket == nil {
return ErrWaitingProofNotFound
}

View File

@ -3,7 +3,7 @@ package channeldb
import (
"fmt"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes"
)
@ -106,8 +106,8 @@ func (w *WitnessCache) addWitnessEntries(wType WitnessType,
return nil
}
return w.db.Batch(func(tx *bbolt.Tx) error {
witnessBucket, err := tx.CreateBucketIfNotExists(witnessBucketKey)
return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error {
witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
if err != nil {
return err
}
@ -150,8 +150,8 @@ func (w *WitnessCache) LookupSha256Witness(hash lntypes.Hash) (lntypes.Preimage,
// will be returned.
func (w *WitnessCache) lookupWitness(wType WitnessType, witnessKey []byte) ([]byte, error) {
var witness []byte
err := w.db.View(func(tx *bbolt.Tx) error {
witnessBucket := tx.Bucket(witnessBucketKey)
err := kvdb.View(w.db, func(tx kvdb.ReadTx) error {
witnessBucket := tx.ReadBucket(witnessBucketKey)
if witnessBucket == nil {
return ErrNoWitnesses
}
@ -160,7 +160,7 @@ func (w *WitnessCache) lookupWitness(wType WitnessType, witnessKey []byte) ([]by
if err != nil {
return err
}
witnessTypeBucket := witnessBucket.Bucket(witnessTypeBucketKey)
witnessTypeBucket := witnessBucket.NestedReadBucket(witnessTypeBucketKey)
if witnessTypeBucket == nil {
return ErrNoWitnesses
}
@ -189,8 +189,8 @@ func (w *WitnessCache) DeleteSha256Witness(hash lntypes.Hash) error {
// deleteWitness attempts to delete a particular witness from the database.
func (w *WitnessCache) deleteWitness(wType WitnessType, witnessKey []byte) error {
return w.db.Batch(func(tx *bbolt.Tx) error {
witnessBucket, err := tx.CreateBucketIfNotExists(witnessBucketKey)
return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error {
witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
if err != nil {
return err
}
@ -213,8 +213,8 @@ func (w *WitnessCache) deleteWitness(wType WitnessType, witnessKey []byte) error
// DeleteWitnessClass attempts to delete an *entire* class of witnesses. After
// this function return with a non-nil error,
func (w *WitnessCache) DeleteWitnessClass(wType WitnessType) error {
return w.db.Batch(func(tx *bbolt.Tx) error {
witnessBucket, err := tx.CreateBucketIfNotExists(witnessBucketKey)
return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error {
witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
if err != nil {
return err
}
@ -224,6 +224,6 @@ func (w *WitnessCache) DeleteWitnessClass(wType WitnessType) error {
return err
}
return witnessBucket.DeleteBucket(witnessTypeBucketKey)
return witnessBucket.DeleteNestedBucket(witnessTypeBucketKey)
})
}

5
go.mod
View File

@ -45,8 +45,9 @@ require (
github.com/rogpeppe/fastuuid v1.2.0 // indirect
github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02
github.com/urfave/cli v1.18.0
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3
go.etcd.io/bbolt v1.3.3
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922
google.golang.org/grpc v1.19.0