2015-12-26 21:35:15 +03:00
|
|
|
package channeldb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
2016-03-24 08:11:57 +03:00
|
|
|
"fmt"
|
2016-03-23 04:46:30 +03:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2015-12-26 21:35:15 +03:00
|
|
|
"sync"
|
|
|
|
|
2018-03-11 06:00:57 +03:00
|
|
|
"github.com/coreos/bbolt"
|
2018-01-23 07:38:17 +03:00
|
|
|
"github.com/go-errors/errors"
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
2016-03-23 04:46:30 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2016-11-22 23:50:27 +03:00
|
|
|
dbName = "channel.db"
|
|
|
|
dbFilePermission = 0600
|
2015-12-26 21:35:15 +03:00
|
|
|
)
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// migration is a function which takes a prior outdated version of the database
|
|
|
|
// instances and mutates the key/bucket structure to arrive at a more
|
|
|
|
// up-to-date version of the database.
|
2016-11-22 23:50:27 +03:00
|
|
|
type migration func(tx *bolt.Tx) error
|
|
|
|
|
|
|
|
type version struct {
|
|
|
|
number uint32
|
|
|
|
migration migration
|
|
|
|
}
|
|
|
|
|
2015-12-26 21:35:15 +03:00
|
|
|
var (
|
2016-11-23 00:57:26 +03:00
|
|
|
// dbVersions is storing all versions of database. If current version
|
|
|
|
// of database don't match with latest version this list will be used
|
|
|
|
// for retrieving all migration function that are need to apply to the
|
2016-11-22 23:50:27 +03:00
|
|
|
// current db.
|
2016-11-23 00:57:26 +03:00
|
|
|
dbVersions = []version{
|
2016-11-22 23:50:27 +03:00
|
|
|
{
|
2016-11-23 00:57:26 +03:00
|
|
|
// The base DB version requires no migration.
|
2016-11-28 05:37:05 +03:00
|
|
|
number: 0,
|
2016-11-23 00:57:26 +03:00
|
|
|
migration: nil,
|
2016-11-22 23:50:27 +03:00
|
|
|
},
|
2018-04-17 05:06:21 +03:00
|
|
|
{
|
|
|
|
// The version of the database where two new indexes
|
|
|
|
// for the update time of node and channel updates were
|
|
|
|
// added.
|
|
|
|
number: 1,
|
|
|
|
migration: migrateNodeAndEdgeUpdateIndex,
|
|
|
|
},
|
2018-04-25 06:34:30 +03:00
|
|
|
{
|
|
|
|
// The DB version that added the invoice event time
|
|
|
|
// series.
|
|
|
|
number: 2,
|
|
|
|
migration: migrateInvoiceTimeSeries,
|
|
|
|
},
|
channeldb: add new migration to finalize invoice migration for outgoing payments
In this commit, we migrate the database away from a partially migrated
state. In a prior commit, we migrated the database in order to update
the Invoice struct with three new fields: add index, settle index, paid
amt. However, it was overlooked that the OutgoingPayment struct also
embedded an Invoice within it. As a result, nodes that upgraded to the
first migration found themselves unable to start up, or call
listpayments, as the internal invoice within the OutgoignPayment hadn't
yet been updated. This would result in an OOM typically as we went to
allocate a slice with a integer that should have been small, but may
have ended up actually being a set of random bytes, so a very large
number.
In this commit, we finish the DB migration by also migrating the
internal invoice within each OutgoingPayment.
Fixes #1538.
Fixes #1546.
2018-07-12 06:38:02 +03:00
|
|
|
{
|
|
|
|
// The DB version that updated the embedded invoice in
|
|
|
|
// outgoing payments to match the new format.
|
|
|
|
number: 3,
|
|
|
|
migration: migrateInvoiceTimeSeriesOutgoingPayments,
|
|
|
|
},
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// Big endian is the preferred byte order, due to cursor scans over
|
|
|
|
// integer keys iterating in order.
|
2016-03-24 08:11:57 +03:00
|
|
|
byteOrder = binary.BigEndian
|
2015-12-26 21:35:15 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var bufPool = &sync.Pool{
|
|
|
|
New: func() interface{} { return new(bytes.Buffer) },
|
|
|
|
}
|
|
|
|
|
2016-11-28 05:32:45 +03:00
|
|
|
// DB is the primary datastore for the lnd daemon. The database stores
|
2016-06-23 02:17:19 +03:00
|
|
|
// information related to nodes, routing data, open/closed channels, fee
|
|
|
|
// schedules, and reputation data.
|
2015-12-26 21:35:15 +03:00
|
|
|
type DB struct {
|
2016-11-28 05:32:45 +03:00
|
|
|
*bolt.DB
|
2016-12-22 23:09:19 +03:00
|
|
|
dbPath string
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2016-11-28 05:32:45 +03:00
|
|
|
// Open opens an existing channeldb. Any necessary schemas migrations due to
|
2016-12-27 06:50:23 +03:00
|
|
|
// updates will take place as necessary.
|
2016-12-22 23:09:19 +03:00
|
|
|
func Open(dbPath string) (*DB, error) {
|
2016-03-24 08:11:57 +03:00
|
|
|
path := filepath.Join(dbPath, dbName)
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2016-03-25 00:31:46 +03:00
|
|
|
if !fileExists(path) {
|
|
|
|
if err := createChannelDB(dbPath); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2016-03-24 08:11:57 +03:00
|
|
|
|
2016-11-22 23:50:27 +03:00
|
|
|
bdb, err := bolt.Open(path, dbFilePermission, nil)
|
2016-03-24 08:11:57 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
chanDB := &DB{
|
2016-12-22 23:09:19 +03:00
|
|
|
DB: bdb,
|
|
|
|
dbPath: dbPath,
|
2016-11-23 00:57:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Synchronize the version of database and apply migrations if needed.
|
|
|
|
if err := chanDB.syncVersions(dbVersions); err != nil {
|
|
|
|
bdb.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanDB, nil
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2017-12-13 12:28:58 +03:00
|
|
|
// Path returns the file path to the channel database.
|
|
|
|
func (d *DB) Path() string {
|
|
|
|
return d.dbPath
|
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// Wipe completely deletes all saved state within all used buckets within the
|
|
|
|
// database. The deletion is done in a single transaction, therefore this
|
|
|
|
// operation is fully atomic.
|
2015-12-26 21:35:15 +03:00
|
|
|
func (d *DB) Wipe() error {
|
2016-11-28 05:48:57 +03:00
|
|
|
return d.Update(func(tx *bolt.Tx) error {
|
2016-07-22 02:16:13 +03:00
|
|
|
err := tx.DeleteBucket(openChannelBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
2016-06-23 02:15:07 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-22 02:16:13 +03:00
|
|
|
err = tx.DeleteBucket(closedChannelBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-09-24 01:00:38 +03:00
|
|
|
err = tx.DeleteBucket(invoiceBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-11-08 05:18:57 +03:00
|
|
|
err = tx.DeleteBucket(nodeInfoBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
err = tx.DeleteBucket(nodeBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = tx.DeleteBucket(edgeBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-27 06:50:23 +03:00
|
|
|
err = tx.DeleteBucket(edgeIndexBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-20 03:58:27 +03:00
|
|
|
err = tx.DeleteBucket(graphMetaBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-08 09:47:01 +03:00
|
|
|
|
2016-07-22 02:16:13 +03:00
|
|
|
return nil
|
2015-12-26 21:35:15 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// createChannelDB creates and initializes a fresh version of channeldb. In
|
|
|
|
// the case that the target path has not yet been created or doesn't yet exist,
|
|
|
|
// then the path is created. Additionally, all required top-level buckets used
|
|
|
|
// within the database are created.
|
2016-03-25 00:31:46 +03:00
|
|
|
func createChannelDB(dbPath string) error {
|
2016-03-24 08:11:57 +03:00
|
|
|
if !fileExists(dbPath) {
|
|
|
|
if err := os.MkdirAll(dbPath, 0700); err != nil {
|
2016-03-25 00:31:46 +03:00
|
|
|
return err
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
path := filepath.Join(dbPath, dbName)
|
2016-11-22 23:50:27 +03:00
|
|
|
bdb, err := bolt.Open(path, dbFilePermission, nil)
|
2016-03-23 04:46:30 +03:00
|
|
|
if err != nil {
|
2016-03-25 00:31:46 +03:00
|
|
|
return err
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
|
2016-03-24 08:11:57 +03:00
|
|
|
err = bdb.Update(func(tx *bolt.Tx) error {
|
|
|
|
if _, err := tx.CreateBucket(openChannelBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2016-03-24 08:11:57 +03:00
|
|
|
if _, err := tx.CreateBucket(closedChannelBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2016-10-26 00:04:42 +03:00
|
|
|
if _, err := tx.CreateBucket(invoiceBucket); err != nil {
|
2016-03-24 08:11:57 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-10-26 00:04:42 +03:00
|
|
|
if _, err := tx.CreateBucket(nodeInfoBucket); err != nil {
|
2016-09-24 01:00:38 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
if _, err := tx.CreateBucket(nodeBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := tx.CreateBucket(edgeBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-27 06:50:23 +03:00
|
|
|
if _, err := tx.CreateBucket(edgeIndexBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-20 03:58:27 +03:00
|
|
|
if _, err := tx.CreateBucket(graphMetaBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-08 09:47:01 +03:00
|
|
|
|
2016-11-22 23:50:27 +03:00
|
|
|
if _, err := tx.CreateBucket(metaBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-11-28 05:48:57 +03:00
|
|
|
meta := &Meta{
|
|
|
|
DbVersionNumber: getLatestDBVersion(dbVersions),
|
|
|
|
}
|
|
|
|
return putMeta(meta, tx)
|
2016-03-24 08:11:57 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
2016-03-25 00:31:46 +03:00
|
|
|
return fmt.Errorf("unable to create new channeldb")
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2016-03-25 00:31:46 +03:00
|
|
|
return bdb.Close()
|
2015-12-26 21:35:15 +03:00
|
|
|
}
|
2016-03-23 04:46:30 +03:00
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// fileExists returns true if the file exists, and false otherwise.
|
2016-03-24 08:11:57 +03:00
|
|
|
func fileExists(path string) bool {
|
|
|
|
if _, err := os.Stat(path); err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
|
2017-02-23 22:56:47 +03:00
|
|
|
// FetchOpenChannels returns all stored currently active/open channels
|
2016-06-23 02:17:19 +03:00
|
|
|
// associated with the target nodeID. In the case that no active channels are
|
|
|
|
// known to have been created with this node, then a zero-length slice is
|
|
|
|
// returned.
|
2016-10-26 02:11:23 +03:00
|
|
|
func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error) {
|
2016-06-21 07:41:00 +03:00
|
|
|
var channels []*OpenChannel
|
2016-11-28 05:32:45 +03:00
|
|
|
err := d.View(func(tx *bolt.Tx) error {
|
2017-01-13 08:01:50 +03:00
|
|
|
// Get the bucket dedicated to storing the metadata for open
|
2016-03-24 08:39:52 +03:00
|
|
|
// channels.
|
|
|
|
openChanBucket := tx.Bucket(openChannelBucket)
|
2016-07-10 02:19:18 +03:00
|
|
|
if openChanBucket == nil {
|
|
|
|
return nil
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// Within this top level bucket, fetch the bucket dedicated to
|
|
|
|
// storing open channel data specific to the remote node.
|
2016-10-26 02:11:23 +03:00
|
|
|
pub := nodeID.SerializeCompressed()
|
|
|
|
nodeChanBucket := openChanBucket.Bucket(pub)
|
2016-06-21 07:41:00 +03:00
|
|
|
if nodeChanBucket == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// Next, we'll need to go down an additional layer in order to
|
|
|
|
// retrieve the channels for each chain the node knows of.
|
|
|
|
return nodeChanBucket.ForEach(func(chainHash, v []byte) error {
|
|
|
|
// If there's a value, it's not a bucket so ignore it.
|
|
|
|
if v != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2016-10-27 00:53:10 +03:00
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// If we've found a valid chainhash bucket, then we'll
|
|
|
|
// retrieve that so we can extract all the channels.
|
|
|
|
chainBucket := nodeChanBucket.Bucket(chainHash)
|
|
|
|
if chainBucket == nil {
|
|
|
|
return fmt.Errorf("unable to read bucket for "+
|
|
|
|
"chain=%x", chainHash[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we both of the necessary buckets retrieved,
|
|
|
|
// fetch all the active channels related to this node.
|
|
|
|
nodeChannels, err := d.fetchNodeChannels(chainBucket)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to read channel for "+
|
|
|
|
"chain_hash=%x, node_key=%x: %v",
|
|
|
|
chainHash[:], pub, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
channels = nodeChannels
|
|
|
|
return nil
|
|
|
|
})
|
2016-10-27 00:53:10 +03:00
|
|
|
})
|
|
|
|
|
|
|
|
return channels, err
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// fetchNodeChannels retrieves all active channels from the target chainBucket
|
|
|
|
// which is under a node's dedicated channel bucket. This function is typically
|
|
|
|
// used to fetch all the active channels related to a particular node.
|
|
|
|
func (d *DB) fetchNodeChannels(chainBucket *bolt.Bucket) ([]*OpenChannel, error) {
|
2016-10-27 00:53:10 +03:00
|
|
|
|
|
|
|
var channels []*OpenChannel
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// A node may have channels on several chains, so for each known chain,
|
|
|
|
// we'll extract all the channels.
|
|
|
|
err := chainBucket.ForEach(func(chanPoint, v []byte) error {
|
|
|
|
// If there's a value, it's not a bucket so ignore it.
|
|
|
|
if v != nil {
|
2016-09-07 04:48:40 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// Once we've found a valid channel bucket, we'll extract it
|
|
|
|
// from the node's chain bucket.
|
|
|
|
chanBucket := chainBucket.Bucket(chanPoint)
|
|
|
|
|
|
|
|
var outPoint wire.OutPoint
|
|
|
|
err := readOutpoint(bytes.NewReader(chanPoint), &outPoint)
|
|
|
|
if err != nil {
|
2016-10-27 00:53:10 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-11-10 07:57:09 +03:00
|
|
|
oChannel, err := fetchOpenChannel(chanBucket, &outPoint)
|
2016-10-27 00:53:10 +03:00
|
|
|
if err != nil {
|
2017-02-08 03:41:14 +03:00
|
|
|
return fmt.Errorf("unable to read channel data for "+
|
2017-11-10 07:57:09 +03:00
|
|
|
"chan_point=%v: %v", outPoint, err)
|
2016-10-27 00:53:10 +03:00
|
|
|
}
|
|
|
|
oChannel.Db = d
|
|
|
|
|
|
|
|
channels = append(channels, oChannel)
|
2017-11-10 07:57:09 +03:00
|
|
|
|
2016-10-27 00:53:10 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return channels, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FetchAllChannels attempts to retrieve all open channels currently stored
|
2018-04-12 13:49:19 +03:00
|
|
|
// within the database, including pending open, fully open and channels waiting
|
|
|
|
// for a closing transaction to confirm.
|
2016-10-27 00:53:10 +03:00
|
|
|
func (d *DB) FetchAllChannels() ([]*OpenChannel, error) {
|
2018-04-12 13:49:19 +03:00
|
|
|
var channels []*OpenChannel
|
|
|
|
|
|
|
|
// TODO(halseth): fetch all in one db tx.
|
|
|
|
openChannels, err := d.FetchAllOpenChannels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
channels = append(channels, openChannels...)
|
|
|
|
|
|
|
|
pendingChannels, err := d.FetchPendingChannels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
channels = append(channels, pendingChannels...)
|
|
|
|
|
|
|
|
waitingClose, err := d.FetchWaitingCloseChannels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
channels = append(channels, waitingClose...)
|
|
|
|
|
|
|
|
return channels, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FetchAllOpenChannels will return all channels that have the funding
|
|
|
|
// transaction confirmed, and is not waiting for a closing transaction to be
|
|
|
|
// confirmed.
|
|
|
|
func (d *DB) FetchAllOpenChannels() ([]*OpenChannel, error) {
|
|
|
|
return fetchChannels(d, false, false)
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// FetchPendingChannels will return channels that have completed the process of
|
|
|
|
// generating and broadcasting funding transactions, but whose funding
|
2017-01-23 10:31:01 +03:00
|
|
|
// transactions have yet to be confirmed on the blockchain.
|
|
|
|
func (d *DB) FetchPendingChannels() ([]*OpenChannel, error) {
|
2018-04-12 13:49:19 +03:00
|
|
|
return fetchChannels(d, true, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FetchWaitingCloseChannels will return all channels that have been opened,
|
|
|
|
// but now is waiting for a closing transaction to be confirmed.
|
|
|
|
func (d *DB) FetchWaitingCloseChannels() ([]*OpenChannel, error) {
|
|
|
|
return fetchChannels(d, false, true)
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// fetchChannels attempts to retrieve channels currently stored in the
|
2018-04-12 13:49:19 +03:00
|
|
|
// database. The pending parameter determines whether only pending channels
|
|
|
|
// will be returned, or only open channels will be returned. The waitingClose
|
|
|
|
// parameter determines wheter only channels waiting for a closing transaction
|
|
|
|
// to be confirmed should be returned. If no active channels exist within the
|
|
|
|
// network, then ErrNoActiveChannels is returned.
|
|
|
|
func fetchChannels(d *DB, pending, waitingClose bool) ([]*OpenChannel, error) {
|
2016-10-27 00:53:10 +03:00
|
|
|
var channels []*OpenChannel
|
|
|
|
|
2016-11-28 05:32:45 +03:00
|
|
|
err := d.View(func(tx *bolt.Tx) error {
|
2017-01-13 08:01:50 +03:00
|
|
|
// Get the bucket dedicated to storing the metadata for open
|
2016-10-27 00:53:10 +03:00
|
|
|
// channels.
|
|
|
|
openChanBucket := tx.Bucket(openChannelBucket)
|
|
|
|
if openChanBucket == nil {
|
|
|
|
return ErrNoActiveChannels
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// Next, fetch the bucket dedicated to storing metadata related
|
|
|
|
// to all nodes. All keys within this bucket are the serialized
|
|
|
|
// public keys of all our direct counterparties.
|
2016-10-27 00:53:10 +03:00
|
|
|
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
|
|
|
|
if nodeMetaBucket == nil {
|
|
|
|
return fmt.Errorf("node bucket not created")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally for each node public key in the bucket, fetch all
|
2017-11-10 07:57:09 +03:00
|
|
|
// the channels related to this particular node.
|
2016-10-27 00:53:10 +03:00
|
|
|
return nodeMetaBucket.ForEach(func(k, v []byte) error {
|
|
|
|
nodeChanBucket := openChanBucket.Bucket(k)
|
|
|
|
if nodeChanBucket == nil {
|
|
|
|
return nil
|
2016-06-21 07:41:00 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
return nodeChanBucket.ForEach(func(chainHash, v []byte) error {
|
|
|
|
// If there's a value, it's not a bucket so
|
|
|
|
// ignore it.
|
|
|
|
if v != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we've found a valid chainhash bucket,
|
|
|
|
// then we'll retrieve that so we can extract
|
|
|
|
// all the channels.
|
|
|
|
chainBucket := nodeChanBucket.Bucket(chainHash)
|
|
|
|
if chainBucket == nil {
|
|
|
|
return fmt.Errorf("unable to read "+
|
|
|
|
"bucket for chain=%x", chainHash[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeChans, err := d.fetchNodeChannels(chainBucket)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to read "+
|
|
|
|
"channel for chain_hash=%x, "+
|
|
|
|
"node_key=%x: %v", chainHash[:], k, err)
|
|
|
|
}
|
2018-04-12 13:49:19 +03:00
|
|
|
for _, channel := range nodeChans {
|
|
|
|
if channel.IsPending != pending {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the channel is in any other state
|
|
|
|
// than Default, then it means it is
|
|
|
|
// waiting to be closed.
|
|
|
|
channelWaitingClose :=
|
|
|
|
channel.ChanStatus != Default
|
|
|
|
|
|
|
|
// Only include it if we requested
|
|
|
|
// channels with the same waitingClose
|
|
|
|
// status.
|
|
|
|
if channelWaitingClose != waitingClose {
|
|
|
|
continue
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
2018-04-12 13:49:19 +03:00
|
|
|
|
|
|
|
channels = append(channels, channel)
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
2017-11-10 07:57:09 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2016-06-21 07:41:00 +03:00
|
|
|
})
|
2016-03-24 08:39:52 +03:00
|
|
|
})
|
2018-04-12 13:49:19 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-03-24 08:39:52 +03:00
|
|
|
|
2018-04-12 13:49:19 +03:00
|
|
|
return channels, nil
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
|
2017-05-05 01:20:16 +03:00
|
|
|
// FetchClosedChannels attempts to fetch all closed channels from the database.
|
|
|
|
// The pendingOnly bool toggles if channels that aren't yet fully closed should
|
2017-12-18 05:40:05 +03:00
|
|
|
// be returned in the response or not. When a channel was cooperatively closed,
|
2017-05-05 01:20:16 +03:00
|
|
|
// it becomes fully closed after a single confirmation. When a channel was
|
|
|
|
// forcibly closed, it will become fully closed after _all_ the pending funds
|
|
|
|
// (if any) have been swept.
|
|
|
|
func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) {
|
|
|
|
var chanSummaries []*ChannelCloseSummary
|
|
|
|
|
|
|
|
if err := d.View(func(tx *bolt.Tx) error {
|
|
|
|
closeBucket := tx.Bucket(closedChannelBucket)
|
|
|
|
if closeBucket == nil {
|
|
|
|
return ErrNoClosedChannels
|
|
|
|
}
|
|
|
|
|
|
|
|
return closeBucket.ForEach(func(chanID []byte, summaryBytes []byte) error {
|
2017-11-10 07:58:24 +03:00
|
|
|
summaryReader := bytes.NewReader(summaryBytes)
|
|
|
|
chanSummary, err := deserializeCloseChannelSummary(summaryReader)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-05 01:20:16 +03:00
|
|
|
|
|
|
|
// If the query specified to only include pending
|
|
|
|
// channels, then we'll skip any channels which aren't
|
|
|
|
// currently pending.
|
2017-11-10 07:58:24 +03:00
|
|
|
if !chanSummary.IsPending && pendingOnly {
|
2017-05-05 01:20:16 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
chanSummaries = append(chanSummaries, chanSummary)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanSummaries, nil
|
|
|
|
}
|
2017-05-05 01:21:35 +03:00
|
|
|
|
2018-01-23 07:38:17 +03:00
|
|
|
// ErrClosedChannelNotFound signals that a closed channel could not be found in
|
|
|
|
// the channeldb.
|
|
|
|
var ErrClosedChannelNotFound = errors.New("unable to find closed channel summary")
|
|
|
|
|
|
|
|
// FetchClosedChannel queries for a channel close summary using the channel
|
|
|
|
// point of the channel in question.
|
|
|
|
func (d *DB) FetchClosedChannel(chanID *wire.OutPoint) (*ChannelCloseSummary, error) {
|
|
|
|
var chanSummary *ChannelCloseSummary
|
|
|
|
if err := d.View(func(tx *bolt.Tx) error {
|
|
|
|
closeBucket := tx.Bucket(closedChannelBucket)
|
|
|
|
if closeBucket == nil {
|
|
|
|
return ErrClosedChannelNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
var b bytes.Buffer
|
|
|
|
var err error
|
|
|
|
if err = writeOutpoint(&b, chanID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
summaryBytes := closeBucket.Get(b.Bytes())
|
|
|
|
if summaryBytes == nil {
|
|
|
|
return ErrClosedChannelNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
summaryReader := bytes.NewReader(summaryBytes)
|
|
|
|
chanSummary, err = deserializeCloseChannelSummary(summaryReader)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanSummary, nil
|
|
|
|
}
|
|
|
|
|
2017-05-05 01:21:35 +03:00
|
|
|
// MarkChanFullyClosed marks a channel as fully closed within the database. A
|
|
|
|
// channel should be marked as fully closed if the channel was initially
|
2018-04-17 05:06:21 +03:00
|
|
|
// cooperatively closed and it's reached a single confirmation, or after all
|
|
|
|
// the pending funds in a channel that has been forcibly closed have been
|
|
|
|
// swept.
|
2017-05-05 01:21:35 +03:00
|
|
|
func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
|
|
|
|
return d.Update(func(tx *bolt.Tx) error {
|
|
|
|
var b bytes.Buffer
|
2017-07-26 06:39:59 +03:00
|
|
|
if err := writeOutpoint(&b, chanPoint); err != nil {
|
2017-05-05 01:21:35 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
chanID := b.Bytes()
|
|
|
|
|
2017-11-11 06:36:35 +03:00
|
|
|
closedChanBucket, err := tx.CreateBucketIfNotExists(
|
|
|
|
closedChannelBucket,
|
|
|
|
)
|
2017-05-05 01:21:35 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:58:24 +03:00
|
|
|
chanSummaryBytes := closedChanBucket.Get(chanID)
|
|
|
|
if chanSummaryBytes == nil {
|
2018-04-13 03:13:28 +03:00
|
|
|
return fmt.Errorf("no closed channel for "+
|
|
|
|
"chan_point=%v found", chanPoint)
|
2017-05-05 01:21:35 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:58:24 +03:00
|
|
|
chanSummaryReader := bytes.NewReader(chanSummaryBytes)
|
|
|
|
chanSummary, err := deserializeCloseChannelSummary(
|
|
|
|
chanSummaryReader,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
chanSummary.IsPending = false
|
|
|
|
|
|
|
|
var newSummary bytes.Buffer
|
|
|
|
err = serializeChannelCloseSummary(&newSummary, chanSummary)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-05 01:21:35 +03:00
|
|
|
|
2017-11-10 07:58:24 +03:00
|
|
|
return closedChanBucket.Put(chanID, newSummary.Bytes())
|
2017-05-05 01:21:35 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-11-11 06:36:35 +03:00
|
|
|
// syncVersions function is used for safe db version synchronization. It
|
|
|
|
// applies migration functions to the current database and recovers the
|
|
|
|
// previous state of db if at least one error/panic appeared during migration.
|
2016-11-23 00:57:26 +03:00
|
|
|
func (d *DB) syncVersions(versions []version) error {
|
2016-11-22 23:50:27 +03:00
|
|
|
meta, err := d.FetchMeta(nil)
|
|
|
|
if err != nil {
|
2016-11-28 05:36:17 +03:00
|
|
|
if err == ErrMetaNotFound {
|
|
|
|
meta = &Meta{}
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// If the current database version matches the latest version number,
|
|
|
|
// then we don't need to perform any migrations.
|
2016-11-22 23:50:27 +03:00
|
|
|
latestVersion := getLatestDBVersion(versions)
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Checking for schema update: latest_version=%v, "+
|
|
|
|
"db_version=%v", latestVersion, meta.DbVersionNumber)
|
2016-11-23 00:57:26 +03:00
|
|
|
if meta.DbVersionNumber == latestVersion {
|
|
|
|
return nil
|
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Performing database schema migration")
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// Otherwise, we fetch the migrations which need to applied, and
|
|
|
|
// execute them serially within a single database transaction to ensure
|
|
|
|
// the migration is atomic.
|
2018-04-17 05:06:21 +03:00
|
|
|
migrations, migrationVersions := getMigrationsToApply(
|
|
|
|
versions, meta.DbVersionNumber,
|
|
|
|
)
|
2016-11-28 05:32:45 +03:00
|
|
|
return d.Update(func(tx *bolt.Tx) error {
|
2017-02-08 23:56:37 +03:00
|
|
|
for i, migration := range migrations {
|
2016-11-23 00:57:26 +03:00
|
|
|
if migration == nil {
|
|
|
|
continue
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Applying migration #%v", migrationVersions[i])
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
if err := migration(tx); err != nil {
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Unable to apply migration #%v",
|
|
|
|
migrationVersions[i])
|
2016-11-22 23:50:27 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-11-23 00:57:26 +03:00
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
meta.DbVersionNumber = latestVersion
|
2017-02-23 21:59:50 +03:00
|
|
|
return putMeta(meta, tx)
|
2016-11-23 00:57:26 +03:00
|
|
|
})
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
// ChannelGraph returns a new instance of the directed channel graph.
|
|
|
|
func (d *DB) ChannelGraph() *ChannelGraph {
|
|
|
|
return &ChannelGraph{d}
|
|
|
|
}
|
|
|
|
|
2016-11-22 23:50:27 +03:00
|
|
|
func getLatestDBVersion(versions []version) uint32 {
|
|
|
|
return versions[len(versions)-1].number
|
|
|
|
}
|
|
|
|
|
|
|
|
// getMigrationsToApply retrieves the migration function that should be
|
|
|
|
// applied to the database.
|
2017-02-08 23:56:37 +03:00
|
|
|
func getMigrationsToApply(versions []version, version uint32) ([]migration, []uint32) {
|
2016-11-22 23:50:27 +03:00
|
|
|
migrations := make([]migration, 0, len(versions))
|
2017-02-08 23:56:37 +03:00
|
|
|
migrationVersions := make([]uint32, 0, len(versions))
|
2016-11-22 23:50:27 +03:00
|
|
|
|
|
|
|
for _, v := range versions {
|
|
|
|
if v.number > version {
|
|
|
|
migrations = append(migrations, v.migration)
|
2017-02-08 23:56:37 +03:00
|
|
|
migrationVersions = append(migrationVersions, v.number)
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-08 23:56:37 +03:00
|
|
|
return migrations, migrationVersions
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|