2015-12-26 21:35:15 +03:00
|
|
|
package channeldb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
2016-03-24 08:11:57 +03:00
|
|
|
"fmt"
|
2018-12-10 06:15:09 +03:00
|
|
|
"net"
|
2016-03-23 04:46:30 +03:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2018-12-10 06:37:47 +03:00
|
|
|
"time"
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
2018-06-15 22:32:53 +03:00
|
|
|
"github.com/coreos/bbolt"
|
|
|
|
"github.com/go-errors/errors"
|
2018-11-20 17:09:46 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2016-03-23 04:46:30 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2016-11-22 23:50:27 +03:00
|
|
|
dbName = "channel.db"
|
|
|
|
dbFilePermission = 0600
|
2015-12-26 21:35:15 +03:00
|
|
|
)
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// migration is a function which takes a prior outdated version of the database
|
|
|
|
// instances and mutates the key/bucket structure to arrive at a more
|
|
|
|
// up-to-date version of the database.
|
2018-11-30 07:04:21 +03:00
|
|
|
type migration func(tx *bbolt.Tx) error
|
2016-11-22 23:50:27 +03:00
|
|
|
|
|
|
|
type version struct {
|
|
|
|
number uint32
|
|
|
|
migration migration
|
|
|
|
}
|
|
|
|
|
2015-12-26 21:35:15 +03:00
|
|
|
var (
|
2016-11-23 00:57:26 +03:00
|
|
|
// dbVersions is storing all versions of database. If current version
|
|
|
|
// of database don't match with latest version this list will be used
|
|
|
|
// for retrieving all migration function that are need to apply to the
|
2016-11-22 23:50:27 +03:00
|
|
|
// current db.
|
2016-11-23 00:57:26 +03:00
|
|
|
dbVersions = []version{
|
2016-11-22 23:50:27 +03:00
|
|
|
{
|
2016-11-23 00:57:26 +03:00
|
|
|
// The base DB version requires no migration.
|
2016-11-28 05:37:05 +03:00
|
|
|
number: 0,
|
2016-11-23 00:57:26 +03:00
|
|
|
migration: nil,
|
2016-11-22 23:50:27 +03:00
|
|
|
},
|
2018-04-17 05:06:21 +03:00
|
|
|
{
|
|
|
|
// The version of the database where two new indexes
|
|
|
|
// for the update time of node and channel updates were
|
|
|
|
// added.
|
|
|
|
number: 1,
|
|
|
|
migration: migrateNodeAndEdgeUpdateIndex,
|
|
|
|
},
|
2018-04-25 06:34:30 +03:00
|
|
|
{
|
|
|
|
// The DB version that added the invoice event time
|
|
|
|
// series.
|
|
|
|
number: 2,
|
|
|
|
migration: migrateInvoiceTimeSeries,
|
|
|
|
},
|
channeldb: add new migration to finalize invoice migration for outgoing payments
In this commit, we migrate the database away from a partially migrated
state. In a prior commit, we migrated the database in order to update
the Invoice struct with three new fields: add index, settle index, paid
amt. However, it was overlooked that the OutgoingPayment struct also
embedded an Invoice within it. As a result, nodes that upgraded to the
first migration found themselves unable to start up, or call
listpayments, as the internal invoice within the OutgoignPayment hadn't
yet been updated. This would result in an OOM typically as we went to
allocate a slice with a integer that should have been small, but may
have ended up actually being a set of random bytes, so a very large
number.
In this commit, we finish the DB migration by also migrating the
internal invoice within each OutgoingPayment.
Fixes #1538.
Fixes #1546.
2018-07-12 06:38:02 +03:00
|
|
|
{
|
|
|
|
// The DB version that updated the embedded invoice in
|
|
|
|
// outgoing payments to match the new format.
|
|
|
|
number: 3,
|
|
|
|
migration: migrateInvoiceTimeSeriesOutgoingPayments,
|
|
|
|
},
|
2018-06-18 13:35:22 +03:00
|
|
|
{
|
|
|
|
// The version of the database where every channel
|
|
|
|
// always has two entries in the edges bucket. If
|
|
|
|
// a policy is unknown, this will be represented
|
|
|
|
// by a special byte sequence.
|
|
|
|
number: 4,
|
|
|
|
migration: migrateEdgePolicies,
|
|
|
|
},
|
2018-08-12 16:17:10 +03:00
|
|
|
{
|
2018-08-10 23:59:20 +03:00
|
|
|
// The DB version where we persist each attempt to send
|
|
|
|
// an HTLC to a payment hash, and track whether the
|
|
|
|
// payment is in-flight, succeeded, or failed.
|
2018-08-12 16:17:10 +03:00
|
|
|
number: 5,
|
|
|
|
migration: paymentStatusesMigration,
|
|
|
|
},
|
2018-09-01 00:59:37 +03:00
|
|
|
{
|
|
|
|
// The DB version that properly prunes stale entries
|
|
|
|
// from the edge update index.
|
|
|
|
number: 6,
|
|
|
|
migration: migratePruneEdgeUpdateIndex,
|
|
|
|
},
|
2018-11-20 17:09:45 +03:00
|
|
|
{
|
|
|
|
// The DB version that migrates the ChannelCloseSummary
|
|
|
|
// to a format where optional fields are indicated with
|
|
|
|
// boolean flags.
|
|
|
|
number: 7,
|
|
|
|
migration: migrateOptionalChannelCloseSummaryFields,
|
|
|
|
},
|
2019-02-06 04:18:20 +03:00
|
|
|
{
|
|
|
|
// The DB version that changes the gossiper's message
|
|
|
|
// store keys to account for the message's type and
|
|
|
|
// ShortChannelID.
|
|
|
|
number: 8,
|
|
|
|
migration: migrateGossipMessageStoreKeys,
|
|
|
|
},
|
2019-05-23 21:05:27 +03:00
|
|
|
{
|
|
|
|
// The DB version where the payments and payment
|
|
|
|
// statuses are moved to being stored in a combined
|
|
|
|
// bucket.
|
|
|
|
number: 9,
|
|
|
|
migration: migrateOutgoingPayments,
|
|
|
|
},
|
2019-07-31 07:44:50 +03:00
|
|
|
{
|
|
|
|
// The DB version where we started to store legacy
|
|
|
|
// payload information for all routes, as well as the
|
|
|
|
// optional TLV records.
|
|
|
|
number: 10,
|
|
|
|
migration: migrateRouteSerialization,
|
|
|
|
},
|
2019-08-08 17:58:28 +03:00
|
|
|
{
|
|
|
|
// Add invoice htlc and cltv delta fields.
|
|
|
|
number: 11,
|
|
|
|
migration: migrateInvoices,
|
|
|
|
},
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// Big endian is the preferred byte order, due to cursor scans over
|
|
|
|
// integer keys iterating in order.
|
2016-03-24 08:11:57 +03:00
|
|
|
byteOrder = binary.BigEndian
|
2015-12-26 21:35:15 +03:00
|
|
|
)
|
|
|
|
|
2016-11-28 05:32:45 +03:00
|
|
|
// DB is the primary datastore for the lnd daemon. The database stores
|
2016-06-23 02:17:19 +03:00
|
|
|
// information related to nodes, routing data, open/closed channels, fee
|
|
|
|
// schedules, and reputation data.
|
2015-12-26 21:35:15 +03:00
|
|
|
type DB struct {
|
2018-11-30 07:04:21 +03:00
|
|
|
*bbolt.DB
|
2016-12-22 23:09:19 +03:00
|
|
|
dbPath string
|
2019-04-01 21:52:31 +03:00
|
|
|
graph *ChannelGraph
|
2019-08-30 14:10:29 +03:00
|
|
|
now func() time.Time
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2016-11-28 05:32:45 +03:00
|
|
|
// Open opens an existing channeldb. Any necessary schemas migrations due to
|
2016-12-27 06:50:23 +03:00
|
|
|
// updates will take place as necessary.
|
2019-04-02 02:33:36 +03:00
|
|
|
func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) {
|
2016-03-24 08:11:57 +03:00
|
|
|
path := filepath.Join(dbPath, dbName)
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2016-03-25 00:31:46 +03:00
|
|
|
if !fileExists(path) {
|
|
|
|
if err := createChannelDB(dbPath); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2016-03-24 08:11:57 +03:00
|
|
|
|
2019-04-02 02:33:36 +03:00
|
|
|
opts := DefaultOptions()
|
|
|
|
for _, modifier := range modifiers {
|
|
|
|
modifier(&opts)
|
|
|
|
}
|
|
|
|
|
2019-07-09 01:41:25 +03:00
|
|
|
// Specify bbolt freelist options to reduce heap pressure in case the
|
|
|
|
// freelist grows to be very large.
|
|
|
|
options := &bbolt.Options{
|
|
|
|
NoFreelistSync: true,
|
|
|
|
FreelistType: bbolt.FreelistMapType,
|
|
|
|
}
|
|
|
|
|
|
|
|
bdb, err := bbolt.Open(path, dbFilePermission, options)
|
2016-03-24 08:11:57 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
chanDB := &DB{
|
2016-12-22 23:09:19 +03:00
|
|
|
DB: bdb,
|
|
|
|
dbPath: dbPath,
|
2019-08-30 14:10:29 +03:00
|
|
|
now: time.Now,
|
2016-11-23 00:57:26 +03:00
|
|
|
}
|
2019-04-02 02:33:36 +03:00
|
|
|
chanDB.graph = newChannelGraph(
|
|
|
|
chanDB, opts.RejectCacheSize, opts.ChannelCacheSize,
|
|
|
|
)
|
2016-11-23 00:57:26 +03:00
|
|
|
|
|
|
|
// Synchronize the version of database and apply migrations if needed.
|
|
|
|
if err := chanDB.syncVersions(dbVersions); err != nil {
|
|
|
|
bdb.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanDB, nil
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2017-12-13 12:28:58 +03:00
|
|
|
// Path returns the file path to the channel database.
|
|
|
|
func (d *DB) Path() string {
|
|
|
|
return d.dbPath
|
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// Wipe completely deletes all saved state within all used buckets within the
|
|
|
|
// database. The deletion is done in a single transaction, therefore this
|
|
|
|
// operation is fully atomic.
|
2015-12-26 21:35:15 +03:00
|
|
|
func (d *DB) Wipe() error {
|
2018-11-30 07:04:21 +03:00
|
|
|
return d.Update(func(tx *bbolt.Tx) error {
|
2016-07-22 02:16:13 +03:00
|
|
|
err := tx.DeleteBucket(openChannelBucket)
|
2018-11-30 07:04:21 +03:00
|
|
|
if err != nil && err != bbolt.ErrBucketNotFound {
|
2016-06-23 02:15:07 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-22 02:16:13 +03:00
|
|
|
err = tx.DeleteBucket(closedChannelBucket)
|
2018-11-30 07:04:21 +03:00
|
|
|
if err != nil && err != bbolt.ErrBucketNotFound {
|
2016-07-22 02:16:13 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-09-24 01:00:38 +03:00
|
|
|
err = tx.DeleteBucket(invoiceBucket)
|
2018-11-30 07:04:21 +03:00
|
|
|
if err != nil && err != bbolt.ErrBucketNotFound {
|
2016-09-24 01:00:38 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-11-08 05:18:57 +03:00
|
|
|
err = tx.DeleteBucket(nodeInfoBucket)
|
2018-11-30 07:04:21 +03:00
|
|
|
if err != nil && err != bbolt.ErrBucketNotFound {
|
2016-11-08 05:18:57 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
err = tx.DeleteBucket(nodeBucket)
|
2018-11-30 07:04:21 +03:00
|
|
|
if err != nil && err != bbolt.ErrBucketNotFound {
|
2016-12-08 09:47:01 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = tx.DeleteBucket(edgeBucket)
|
2018-11-30 07:04:21 +03:00
|
|
|
if err != nil && err != bbolt.ErrBucketNotFound {
|
2016-12-08 09:47:01 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-12-27 06:50:23 +03:00
|
|
|
err = tx.DeleteBucket(edgeIndexBucket)
|
2018-11-30 07:04:21 +03:00
|
|
|
if err != nil && err != bbolt.ErrBucketNotFound {
|
2016-12-27 06:50:23 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-12-20 03:58:27 +03:00
|
|
|
err = tx.DeleteBucket(graphMetaBucket)
|
2018-11-30 07:04:21 +03:00
|
|
|
if err != nil && err != bbolt.ErrBucketNotFound {
|
2016-12-20 03:58:27 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-12-08 09:47:01 +03:00
|
|
|
|
2016-07-22 02:16:13 +03:00
|
|
|
return nil
|
2015-12-26 21:35:15 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// createChannelDB creates and initializes a fresh version of channeldb. In
|
|
|
|
// the case that the target path has not yet been created or doesn't yet exist,
|
|
|
|
// then the path is created. Additionally, all required top-level buckets used
|
|
|
|
// within the database are created.
|
2016-03-25 00:31:46 +03:00
|
|
|
func createChannelDB(dbPath string) error {
|
2016-03-24 08:11:57 +03:00
|
|
|
if !fileExists(dbPath) {
|
|
|
|
if err := os.MkdirAll(dbPath, 0700); err != nil {
|
2016-03-25 00:31:46 +03:00
|
|
|
return err
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
path := filepath.Join(dbPath, dbName)
|
2018-11-30 07:04:21 +03:00
|
|
|
bdb, err := bbolt.Open(path, dbFilePermission, nil)
|
2016-03-23 04:46:30 +03:00
|
|
|
if err != nil {
|
2016-03-25 00:31:46 +03:00
|
|
|
return err
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
|
2018-11-30 07:04:21 +03:00
|
|
|
err = bdb.Update(func(tx *bbolt.Tx) error {
|
2016-03-24 08:11:57 +03:00
|
|
|
if _, err := tx.CreateBucket(openChannelBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := tx.CreateBucket(closedChannelBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2018-09-10 01:49:21 +03:00
|
|
|
if _, err := tx.CreateBucket(forwardingLogBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := tx.CreateBucket(fwdPackagesKey); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-10-26 00:04:42 +03:00
|
|
|
if _, err := tx.CreateBucket(invoiceBucket); err != nil {
|
2016-03-24 08:11:57 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-09-10 01:49:21 +03:00
|
|
|
if _, err := tx.CreateBucket(paymentBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-10-26 00:04:42 +03:00
|
|
|
if _, err := tx.CreateBucket(nodeInfoBucket); err != nil {
|
2016-09-24 01:00:38 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-09-10 01:49:21 +03:00
|
|
|
nodes, err := tx.CreateBucket(nodeBucket)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, err = nodes.CreateBucket(aliasIndexBucket)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, err = nodes.CreateBucket(nodeUpdateIndexBucket)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
edges, err := tx.CreateBucket(edgeBucket)
|
|
|
|
if err != nil {
|
2016-12-08 09:47:01 +03:00
|
|
|
return err
|
|
|
|
}
|
2018-09-10 01:49:21 +03:00
|
|
|
if _, err := edges.CreateBucket(edgeIndexBucket); err != nil {
|
2016-12-08 09:47:01 +03:00
|
|
|
return err
|
|
|
|
}
|
2018-09-10 01:49:21 +03:00
|
|
|
if _, err := edges.CreateBucket(edgeUpdateIndexBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := edges.CreateBucket(channelPointBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-03-27 23:06:12 +03:00
|
|
|
if _, err := edges.CreateBucket(zombieBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-09-10 01:49:21 +03:00
|
|
|
|
|
|
|
graphMeta, err := tx.CreateBucket(graphMetaBucket)
|
|
|
|
if err != nil {
|
2016-12-27 06:50:23 +03:00
|
|
|
return err
|
|
|
|
}
|
2018-09-10 01:49:21 +03:00
|
|
|
_, err = graphMeta.CreateBucket(pruneLogBucket)
|
|
|
|
if err != nil {
|
2016-12-20 03:58:27 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-12-08 09:47:01 +03:00
|
|
|
|
2016-11-22 23:50:27 +03:00
|
|
|
if _, err := tx.CreateBucket(metaBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-11-28 05:48:57 +03:00
|
|
|
meta := &Meta{
|
|
|
|
DbVersionNumber: getLatestDBVersion(dbVersions),
|
|
|
|
}
|
|
|
|
return putMeta(meta, tx)
|
2016-03-24 08:11:57 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
2016-03-25 00:31:46 +03:00
|
|
|
return fmt.Errorf("unable to create new channeldb")
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2016-03-25 00:31:46 +03:00
|
|
|
return bdb.Close()
|
2015-12-26 21:35:15 +03:00
|
|
|
}
|
2016-03-23 04:46:30 +03:00
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// fileExists returns true if the file exists, and false otherwise.
|
2016-03-24 08:11:57 +03:00
|
|
|
func fileExists(path string) bool {
|
|
|
|
if _, err := os.Stat(path); err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
|
2018-06-15 05:47:00 +03:00
|
|
|
// FetchOpenChannels starts a new database transaction and returns all stored
|
|
|
|
// currently active/open channels associated with the target nodeID. In the case
|
|
|
|
// that no active channels are known to have been created with this node, then a
|
|
|
|
// zero-length slice is returned.
|
2016-10-26 02:11:23 +03:00
|
|
|
func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error) {
|
2016-06-21 07:41:00 +03:00
|
|
|
var channels []*OpenChannel
|
2018-11-30 07:04:21 +03:00
|
|
|
err := d.View(func(tx *bbolt.Tx) error {
|
2018-06-15 05:47:00 +03:00
|
|
|
var err error
|
|
|
|
channels, err = d.fetchOpenChannels(tx, nodeID)
|
|
|
|
return err
|
|
|
|
})
|
|
|
|
|
|
|
|
return channels, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetchOpenChannels uses and existing database transaction and returns all
|
|
|
|
// stored currently active/open channels associated with the target nodeID. In
|
|
|
|
// the case that no active channels are known to have been created with this
|
|
|
|
// node, then a zero-length slice is returned.
|
2018-11-30 07:04:21 +03:00
|
|
|
func (d *DB) fetchOpenChannels(tx *bbolt.Tx,
|
2018-06-15 05:47:00 +03:00
|
|
|
nodeID *btcec.PublicKey) ([]*OpenChannel, error) {
|
|
|
|
|
|
|
|
// Get the bucket dedicated to storing the metadata for open channels.
|
|
|
|
openChanBucket := tx.Bucket(openChannelBucket)
|
|
|
|
if openChanBucket == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Within this top level bucket, fetch the bucket dedicated to storing
|
|
|
|
// open channel data specific to the remote node.
|
|
|
|
pub := nodeID.SerializeCompressed()
|
|
|
|
nodeChanBucket := openChanBucket.Bucket(pub)
|
|
|
|
if nodeChanBucket == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll need to go down an additional layer in order to retrieve
|
|
|
|
// the channels for each chain the node knows of.
|
|
|
|
var channels []*OpenChannel
|
|
|
|
err := nodeChanBucket.ForEach(func(chainHash, v []byte) error {
|
|
|
|
// If there's a value, it's not a bucket so ignore it.
|
|
|
|
if v != nil {
|
2016-07-10 02:19:18 +03:00
|
|
|
return nil
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
|
|
|
|
2018-06-15 05:47:00 +03:00
|
|
|
// If we've found a valid chainhash bucket, then we'll retrieve
|
|
|
|
// that so we can extract all the channels.
|
|
|
|
chainBucket := nodeChanBucket.Bucket(chainHash)
|
|
|
|
if chainBucket == nil {
|
|
|
|
return fmt.Errorf("unable to read bucket for chain=%x",
|
|
|
|
chainHash[:])
|
2016-06-21 07:41:00 +03:00
|
|
|
}
|
|
|
|
|
2018-06-15 05:47:00 +03:00
|
|
|
// Finally, we both of the necessary buckets retrieved, fetch
|
|
|
|
// all the active channels related to this node.
|
|
|
|
nodeChannels, err := d.fetchNodeChannels(chainBucket)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to read channel for "+
|
|
|
|
"chain_hash=%x, node_key=%x: %v",
|
|
|
|
chainHash[:], pub, err)
|
|
|
|
}
|
|
|
|
|
2018-07-31 09:37:25 +03:00
|
|
|
channels = append(channels, nodeChannels...)
|
2018-06-15 05:47:00 +03:00
|
|
|
return nil
|
2016-10-27 00:53:10 +03:00
|
|
|
})
|
|
|
|
|
|
|
|
return channels, err
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// fetchNodeChannels retrieves all active channels from the target chainBucket
|
|
|
|
// which is under a node's dedicated channel bucket. This function is typically
|
|
|
|
// used to fetch all the active channels related to a particular node.
|
2018-11-30 07:04:21 +03:00
|
|
|
func (d *DB) fetchNodeChannels(chainBucket *bbolt.Bucket) ([]*OpenChannel, error) {
|
2016-10-27 00:53:10 +03:00
|
|
|
|
|
|
|
var channels []*OpenChannel
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// A node may have channels on several chains, so for each known chain,
|
|
|
|
// we'll extract all the channels.
|
|
|
|
err := chainBucket.ForEach(func(chanPoint, v []byte) error {
|
|
|
|
// If there's a value, it's not a bucket so ignore it.
|
|
|
|
if v != nil {
|
2016-09-07 04:48:40 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// Once we've found a valid channel bucket, we'll extract it
|
|
|
|
// from the node's chain bucket.
|
|
|
|
chanBucket := chainBucket.Bucket(chanPoint)
|
|
|
|
|
|
|
|
var outPoint wire.OutPoint
|
|
|
|
err := readOutpoint(bytes.NewReader(chanPoint), &outPoint)
|
|
|
|
if err != nil {
|
2016-10-27 00:53:10 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-11-10 07:57:09 +03:00
|
|
|
oChannel, err := fetchOpenChannel(chanBucket, &outPoint)
|
2016-10-27 00:53:10 +03:00
|
|
|
if err != nil {
|
2017-02-08 03:41:14 +03:00
|
|
|
return fmt.Errorf("unable to read channel data for "+
|
2017-11-10 07:57:09 +03:00
|
|
|
"chan_point=%v: %v", outPoint, err)
|
2016-10-27 00:53:10 +03:00
|
|
|
}
|
|
|
|
oChannel.Db = d
|
|
|
|
|
|
|
|
channels = append(channels, oChannel)
|
2017-11-10 07:57:09 +03:00
|
|
|
|
2016-10-27 00:53:10 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return channels, nil
|
|
|
|
}
|
|
|
|
|
2018-12-10 06:26:02 +03:00
|
|
|
// FetchChannel attempts to locate a channel specified by the passed channel
|
|
|
|
// point. If the channel cannot be found, then an error will be returned.
|
|
|
|
func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
|
|
|
|
var (
|
|
|
|
targetChan *OpenChannel
|
|
|
|
targetChanPoint bytes.Buffer
|
|
|
|
)
|
|
|
|
|
|
|
|
if err := writeOutpoint(&targetChanPoint, &chanPoint); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// chanScan will traverse the following bucket structure:
|
|
|
|
// * nodePub => chainHash => chanPoint
|
|
|
|
//
|
|
|
|
// At each level we go one further, ensuring that we're traversing the
|
|
|
|
// proper key (that's actually a bucket). By only reading the bucket
|
|
|
|
// structure and skipping fully decoding each channel, we save a good
|
|
|
|
// bit of CPU as we don't need to do things like decompress public
|
|
|
|
// keys.
|
|
|
|
chanScan := func(tx *bbolt.Tx) error {
|
|
|
|
// Get the bucket dedicated to storing the metadata for open
|
|
|
|
// channels.
|
|
|
|
openChanBucket := tx.Bucket(openChannelBucket)
|
|
|
|
if openChanBucket == nil {
|
|
|
|
return ErrNoActiveChannels
|
|
|
|
}
|
|
|
|
|
|
|
|
// Within the node channel bucket, are the set of node pubkeys
|
|
|
|
// we have channels with, we don't know the entire set, so
|
|
|
|
// we'll check them all.
|
|
|
|
return openChanBucket.ForEach(func(nodePub, v []byte) error {
|
|
|
|
// Ensure that this is a key the same size as a pubkey,
|
|
|
|
// and also that it leads directly to a bucket.
|
|
|
|
if len(nodePub) != 33 || v != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeChanBucket := openChanBucket.Bucket(nodePub)
|
|
|
|
if nodeChanBucket == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// The next layer down is all the chains that this node
|
|
|
|
// has channels on with us.
|
|
|
|
return nodeChanBucket.ForEach(func(chainHash, v []byte) error {
|
|
|
|
// If there's a value, it's not a bucket so
|
|
|
|
// ignore it.
|
|
|
|
if v != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
chainBucket := nodeChanBucket.Bucket(chainHash)
|
|
|
|
if chainBucket == nil {
|
|
|
|
return fmt.Errorf("unable to read "+
|
|
|
|
"bucket for chain=%x", chainHash[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally we reach the leaf bucket that stores
|
|
|
|
// all the chanPoints for this node.
|
|
|
|
chanBucket := chainBucket.Bucket(
|
|
|
|
targetChanPoint.Bytes(),
|
|
|
|
)
|
|
|
|
if chanBucket == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
channel, err := fetchOpenChannel(
|
|
|
|
chanBucket, &chanPoint,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
targetChan = channel
|
|
|
|
targetChan.Db = d
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
err := d.View(chanScan)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if targetChan != nil {
|
|
|
|
return targetChan, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we can't find the channel, then we return with an error, as we
|
|
|
|
// have nothing to backup.
|
|
|
|
return nil, ErrChannelNotFound
|
|
|
|
}
|
|
|
|
|
2016-10-27 00:53:10 +03:00
|
|
|
// FetchAllChannels attempts to retrieve all open channels currently stored
|
2018-04-12 13:49:19 +03:00
|
|
|
// within the database, including pending open, fully open and channels waiting
|
|
|
|
// for a closing transaction to confirm.
|
2016-10-27 00:53:10 +03:00
|
|
|
func (d *DB) FetchAllChannels() ([]*OpenChannel, error) {
|
2018-04-12 13:49:19 +03:00
|
|
|
var channels []*OpenChannel
|
|
|
|
|
|
|
|
// TODO(halseth): fetch all in one db tx.
|
|
|
|
openChannels, err := d.FetchAllOpenChannels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
channels = append(channels, openChannels...)
|
|
|
|
|
|
|
|
pendingChannels, err := d.FetchPendingChannels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
channels = append(channels, pendingChannels...)
|
|
|
|
|
|
|
|
waitingClose, err := d.FetchWaitingCloseChannels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
channels = append(channels, waitingClose...)
|
|
|
|
|
|
|
|
return channels, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FetchAllOpenChannels will return all channels that have the funding
|
|
|
|
// transaction confirmed, and is not waiting for a closing transaction to be
|
|
|
|
// confirmed.
|
|
|
|
func (d *DB) FetchAllOpenChannels() ([]*OpenChannel, error) {
|
|
|
|
return fetchChannels(d, false, false)
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// FetchPendingChannels will return channels that have completed the process of
|
|
|
|
// generating and broadcasting funding transactions, but whose funding
|
2017-01-23 10:31:01 +03:00
|
|
|
// transactions have yet to be confirmed on the blockchain.
|
|
|
|
func (d *DB) FetchPendingChannels() ([]*OpenChannel, error) {
|
2018-04-12 13:49:19 +03:00
|
|
|
return fetchChannels(d, true, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FetchWaitingCloseChannels will return all channels that have been opened,
|
2019-01-04 04:33:35 +03:00
|
|
|
// but are now waiting for a closing transaction to be confirmed.
|
|
|
|
//
|
|
|
|
// NOTE: This includes channels that are also pending to be opened.
|
2018-04-12 13:49:19 +03:00
|
|
|
func (d *DB) FetchWaitingCloseChannels() ([]*OpenChannel, error) {
|
2019-01-04 04:33:35 +03:00
|
|
|
waitingClose, err := fetchChannels(d, false, true)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
pendingWaitingClose, err := fetchChannels(d, true, true)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return append(waitingClose, pendingWaitingClose...), nil
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// fetchChannels attempts to retrieve channels currently stored in the
|
2018-04-12 13:49:19 +03:00
|
|
|
// database. The pending parameter determines whether only pending channels
|
|
|
|
// will be returned, or only open channels will be returned. The waitingClose
|
2018-09-06 11:48:46 +03:00
|
|
|
// parameter determines whether only channels waiting for a closing transaction
|
2018-04-12 13:49:19 +03:00
|
|
|
// to be confirmed should be returned. If no active channels exist within the
|
|
|
|
// network, then ErrNoActiveChannels is returned.
|
|
|
|
func fetchChannels(d *DB, pending, waitingClose bool) ([]*OpenChannel, error) {
|
2016-10-27 00:53:10 +03:00
|
|
|
var channels []*OpenChannel
|
|
|
|
|
2018-11-30 07:04:21 +03:00
|
|
|
err := d.View(func(tx *bbolt.Tx) error {
|
2017-01-13 08:01:50 +03:00
|
|
|
// Get the bucket dedicated to storing the metadata for open
|
2016-10-27 00:53:10 +03:00
|
|
|
// channels.
|
|
|
|
openChanBucket := tx.Bucket(openChannelBucket)
|
|
|
|
if openChanBucket == nil {
|
|
|
|
return ErrNoActiveChannels
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// Next, fetch the bucket dedicated to storing metadata related
|
|
|
|
// to all nodes. All keys within this bucket are the serialized
|
|
|
|
// public keys of all our direct counterparties.
|
2016-10-27 00:53:10 +03:00
|
|
|
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
|
|
|
|
if nodeMetaBucket == nil {
|
|
|
|
return fmt.Errorf("node bucket not created")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally for each node public key in the bucket, fetch all
|
2017-11-10 07:57:09 +03:00
|
|
|
// the channels related to this particular node.
|
2016-10-27 00:53:10 +03:00
|
|
|
return nodeMetaBucket.ForEach(func(k, v []byte) error {
|
|
|
|
nodeChanBucket := openChanBucket.Bucket(k)
|
|
|
|
if nodeChanBucket == nil {
|
|
|
|
return nil
|
2016-06-21 07:41:00 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
return nodeChanBucket.ForEach(func(chainHash, v []byte) error {
|
|
|
|
// If there's a value, it's not a bucket so
|
|
|
|
// ignore it.
|
|
|
|
if v != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we've found a valid chainhash bucket,
|
|
|
|
// then we'll retrieve that so we can extract
|
|
|
|
// all the channels.
|
|
|
|
chainBucket := nodeChanBucket.Bucket(chainHash)
|
|
|
|
if chainBucket == nil {
|
|
|
|
return fmt.Errorf("unable to read "+
|
|
|
|
"bucket for chain=%x", chainHash[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeChans, err := d.fetchNodeChannels(chainBucket)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to read "+
|
|
|
|
"channel for chain_hash=%x, "+
|
|
|
|
"node_key=%x: %v", chainHash[:], k, err)
|
|
|
|
}
|
2018-04-12 13:49:19 +03:00
|
|
|
for _, channel := range nodeChans {
|
|
|
|
if channel.IsPending != pending {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the channel is in any other state
|
|
|
|
// than Default, then it means it is
|
|
|
|
// waiting to be closed.
|
|
|
|
channelWaitingClose :=
|
2018-12-10 06:21:34 +03:00
|
|
|
channel.ChanStatus() != ChanStatusDefault
|
2018-04-12 13:49:19 +03:00
|
|
|
|
|
|
|
// Only include it if we requested
|
|
|
|
// channels with the same waitingClose
|
|
|
|
// status.
|
|
|
|
if channelWaitingClose != waitingClose {
|
|
|
|
continue
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
2018-04-12 13:49:19 +03:00
|
|
|
|
|
|
|
channels = append(channels, channel)
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
2017-11-10 07:57:09 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2016-06-21 07:41:00 +03:00
|
|
|
})
|
2016-03-24 08:39:52 +03:00
|
|
|
})
|
2018-04-12 13:49:19 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-03-24 08:39:52 +03:00
|
|
|
|
2018-04-12 13:49:19 +03:00
|
|
|
return channels, nil
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
|
2017-05-05 01:20:16 +03:00
|
|
|
// FetchClosedChannels attempts to fetch all closed channels from the database.
|
|
|
|
// The pendingOnly bool toggles if channels that aren't yet fully closed should
|
2017-12-18 05:40:05 +03:00
|
|
|
// be returned in the response or not. When a channel was cooperatively closed,
|
2017-05-05 01:20:16 +03:00
|
|
|
// it becomes fully closed after a single confirmation. When a channel was
|
|
|
|
// forcibly closed, it will become fully closed after _all_ the pending funds
|
|
|
|
// (if any) have been swept.
|
|
|
|
func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) {
|
|
|
|
var chanSummaries []*ChannelCloseSummary
|
|
|
|
|
2018-11-30 07:04:21 +03:00
|
|
|
if err := d.View(func(tx *bbolt.Tx) error {
|
2017-05-05 01:20:16 +03:00
|
|
|
closeBucket := tx.Bucket(closedChannelBucket)
|
|
|
|
if closeBucket == nil {
|
|
|
|
return ErrNoClosedChannels
|
|
|
|
}
|
|
|
|
|
|
|
|
return closeBucket.ForEach(func(chanID []byte, summaryBytes []byte) error {
|
2017-11-10 07:58:24 +03:00
|
|
|
summaryReader := bytes.NewReader(summaryBytes)
|
|
|
|
chanSummary, err := deserializeCloseChannelSummary(summaryReader)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-05 01:20:16 +03:00
|
|
|
|
|
|
|
// If the query specified to only include pending
|
|
|
|
// channels, then we'll skip any channels which aren't
|
|
|
|
// currently pending.
|
2017-11-10 07:58:24 +03:00
|
|
|
if !chanSummary.IsPending && pendingOnly {
|
2017-05-05 01:20:16 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
chanSummaries = append(chanSummaries, chanSummary)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanSummaries, nil
|
|
|
|
}
|
2017-05-05 01:21:35 +03:00
|
|
|
|
2018-01-23 07:38:17 +03:00
|
|
|
// ErrClosedChannelNotFound signals that a closed channel could not be found in
|
|
|
|
// the channeldb.
|
|
|
|
var ErrClosedChannelNotFound = errors.New("unable to find closed channel summary")
|
|
|
|
|
|
|
|
// FetchClosedChannel queries for a channel close summary using the channel
|
|
|
|
// point of the channel in question.
|
|
|
|
func (d *DB) FetchClosedChannel(chanID *wire.OutPoint) (*ChannelCloseSummary, error) {
|
|
|
|
var chanSummary *ChannelCloseSummary
|
2018-11-30 07:04:21 +03:00
|
|
|
if err := d.View(func(tx *bbolt.Tx) error {
|
2018-01-23 07:38:17 +03:00
|
|
|
closeBucket := tx.Bucket(closedChannelBucket)
|
|
|
|
if closeBucket == nil {
|
|
|
|
return ErrClosedChannelNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
var b bytes.Buffer
|
|
|
|
var err error
|
|
|
|
if err = writeOutpoint(&b, chanID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
summaryBytes := closeBucket.Get(b.Bytes())
|
|
|
|
if summaryBytes == nil {
|
|
|
|
return ErrClosedChannelNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
summaryReader := bytes.NewReader(summaryBytes)
|
|
|
|
chanSummary, err = deserializeCloseChannelSummary(summaryReader)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanSummary, nil
|
|
|
|
}
|
|
|
|
|
2018-11-20 17:09:46 +03:00
|
|
|
// FetchClosedChannelForID queries for a channel close summary using the
|
|
|
|
// channel ID of the channel in question.
|
|
|
|
func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) (
|
|
|
|
*ChannelCloseSummary, error) {
|
|
|
|
|
|
|
|
var chanSummary *ChannelCloseSummary
|
2018-11-30 07:04:21 +03:00
|
|
|
if err := d.View(func(tx *bbolt.Tx) error {
|
2018-11-20 17:09:46 +03:00
|
|
|
closeBucket := tx.Bucket(closedChannelBucket)
|
|
|
|
if closeBucket == nil {
|
|
|
|
return ErrClosedChannelNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
// The first 30 bytes of the channel ID and outpoint will be
|
|
|
|
// equal.
|
|
|
|
cursor := closeBucket.Cursor()
|
|
|
|
op, c := cursor.Seek(cid[:30])
|
|
|
|
|
|
|
|
// We scan over all possible candidates for this channel ID.
|
|
|
|
for ; op != nil && bytes.Compare(cid[:30], op[:30]) <= 0; op, c = cursor.Next() {
|
|
|
|
var outPoint wire.OutPoint
|
|
|
|
err := readOutpoint(bytes.NewReader(op), &outPoint)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the found outpoint does not correspond to this
|
|
|
|
// channel ID, we continue.
|
|
|
|
if !cid.IsChanPoint(&outPoint) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deserialize the close summary and return.
|
|
|
|
r := bytes.NewReader(c)
|
|
|
|
chanSummary, err = deserializeCloseChannelSummary(r)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ErrClosedChannelNotFound
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanSummary, nil
|
|
|
|
}
|
|
|
|
|
2017-05-05 01:21:35 +03:00
|
|
|
// MarkChanFullyClosed marks a channel as fully closed within the database. A
|
|
|
|
// channel should be marked as fully closed if the channel was initially
|
2018-04-17 05:06:21 +03:00
|
|
|
// cooperatively closed and it's reached a single confirmation, or after all
|
|
|
|
// the pending funds in a channel that has been forcibly closed have been
|
|
|
|
// swept.
|
2017-05-05 01:21:35 +03:00
|
|
|
func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
|
2018-11-30 07:04:21 +03:00
|
|
|
return d.Update(func(tx *bbolt.Tx) error {
|
2017-05-05 01:21:35 +03:00
|
|
|
var b bytes.Buffer
|
2017-07-26 06:39:59 +03:00
|
|
|
if err := writeOutpoint(&b, chanPoint); err != nil {
|
2017-05-05 01:21:35 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
chanID := b.Bytes()
|
|
|
|
|
2017-11-11 06:36:35 +03:00
|
|
|
closedChanBucket, err := tx.CreateBucketIfNotExists(
|
|
|
|
closedChannelBucket,
|
|
|
|
)
|
2017-05-05 01:21:35 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:58:24 +03:00
|
|
|
chanSummaryBytes := closedChanBucket.Get(chanID)
|
|
|
|
if chanSummaryBytes == nil {
|
2018-04-13 03:13:28 +03:00
|
|
|
return fmt.Errorf("no closed channel for "+
|
|
|
|
"chan_point=%v found", chanPoint)
|
2017-05-05 01:21:35 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:58:24 +03:00
|
|
|
chanSummaryReader := bytes.NewReader(chanSummaryBytes)
|
|
|
|
chanSummary, err := deserializeCloseChannelSummary(
|
|
|
|
chanSummaryReader,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
chanSummary.IsPending = false
|
|
|
|
|
|
|
|
var newSummary bytes.Buffer
|
|
|
|
err = serializeChannelCloseSummary(&newSummary, chanSummary)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-05 01:21:35 +03:00
|
|
|
|
2018-06-15 22:32:53 +03:00
|
|
|
err = closedChanBucket.Put(chanID, newSummary.Bytes())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the channel is closed, we'll check if we have any
|
|
|
|
// other open channels with this peer. If we don't we'll
|
|
|
|
// garbage collect it to ensure we don't establish persistent
|
|
|
|
// connections to peers without open channels.
|
|
|
|
return d.pruneLinkNode(tx, chanSummary.RemotePub)
|
2017-05-05 01:21:35 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-06-15 22:32:53 +03:00
|
|
|
// pruneLinkNode determines whether we should garbage collect a link node from
|
|
|
|
// the database due to no longer having any open channels with it. If there are
|
|
|
|
// any left, then this acts as a no-op.
|
2018-11-30 07:04:21 +03:00
|
|
|
func (d *DB) pruneLinkNode(tx *bbolt.Tx, remotePub *btcec.PublicKey) error {
|
2018-07-31 11:29:12 +03:00
|
|
|
openChannels, err := d.fetchOpenChannels(tx, remotePub)
|
2018-06-15 22:32:53 +03:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to fetch open channels for peer %x: "+
|
|
|
|
"%v", remotePub.SerializeCompressed(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(openChannels) > 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Pruning link node %x with zero open channels from database",
|
|
|
|
remotePub.SerializeCompressed())
|
|
|
|
|
2018-07-31 11:29:12 +03:00
|
|
|
return d.deleteLinkNode(tx, remotePub)
|
2018-06-15 22:32:53 +03:00
|
|
|
}
|
|
|
|
|
2018-07-18 02:40:14 +03:00
|
|
|
// PruneLinkNodes attempts to prune all link nodes found within the databse with
|
|
|
|
// whom we no longer have any open channels with.
|
2018-07-31 11:29:12 +03:00
|
|
|
func (d *DB) PruneLinkNodes() error {
|
2018-11-30 07:04:21 +03:00
|
|
|
return d.Update(func(tx *bbolt.Tx) error {
|
2018-07-31 11:29:12 +03:00
|
|
|
linkNodes, err := d.fetchAllLinkNodes(tx)
|
2018-07-18 02:40:14 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, linkNode := range linkNodes {
|
2018-07-31 11:29:12 +03:00
|
|
|
err := d.pruneLinkNode(tx, linkNode.IdentityPub)
|
2018-07-18 02:40:14 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-12-10 06:37:47 +03:00
|
|
|
// ChannelShell is a shell of a channel that is meant to be used for channel
|
|
|
|
// recovery purposes. It contains a minimal OpenChannel instance along with
|
|
|
|
// addresses for that target node.
|
|
|
|
type ChannelShell struct {
|
|
|
|
// NodeAddrs the set of addresses that this node has known to be
|
|
|
|
// reachable at in the past.
|
|
|
|
NodeAddrs []net.Addr
|
|
|
|
|
|
|
|
// Chan is a shell of an OpenChannel, it contains only the items
|
|
|
|
// required to restore the channel on disk.
|
|
|
|
Chan *OpenChannel
|
|
|
|
}
|
|
|
|
|
|
|
|
// RestoreChannelShells is a method that allows the caller to reconstruct the
|
|
|
|
// state of an OpenChannel from the ChannelShell. We'll attempt to write the
|
|
|
|
// new channel to disk, create a LinkNode instance with the passed node
|
|
|
|
// addresses, and finally create an edge within the graph for the channel as
|
|
|
|
// well. This method is idempotent, so repeated calls with the same set of
|
|
|
|
// channel shells won't modify the database after the initial call.
|
|
|
|
func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error {
|
2019-04-01 21:52:31 +03:00
|
|
|
chanGraph := d.ChannelGraph()
|
2018-12-10 06:37:47 +03:00
|
|
|
|
2019-04-02 02:32:52 +03:00
|
|
|
// TODO(conner): find way to do this w/o accessing internal members?
|
|
|
|
chanGraph.cacheMu.Lock()
|
|
|
|
defer chanGraph.cacheMu.Unlock()
|
|
|
|
|
|
|
|
var chansRestored []uint64
|
|
|
|
err := d.Update(func(tx *bbolt.Tx) error {
|
2018-12-10 06:37:47 +03:00
|
|
|
for _, channelShell := range channelShells {
|
|
|
|
channel := channelShell.Chan
|
|
|
|
|
2019-03-11 02:22:58 +03:00
|
|
|
// When we make a channel, we mark that the channel has
|
|
|
|
// been restored, this will signal to other sub-systems
|
|
|
|
// to not attempt to use the channel as if it was a
|
|
|
|
// regular one.
|
|
|
|
channel.chanStatus |= ChanStatusRestored
|
|
|
|
|
2018-12-10 06:37:47 +03:00
|
|
|
// First, we'll attempt to create a new open channel
|
|
|
|
// and link node for this channel. If the channel
|
|
|
|
// already exists, then in order to ensure this method
|
|
|
|
// is idempotent, we'll continue to the next step.
|
|
|
|
channel.Db = d
|
|
|
|
err := syncNewChannel(
|
|
|
|
tx, channel, channelShell.NodeAddrs,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll create an active edge in the graph
|
|
|
|
// database for this channel in order to restore our
|
|
|
|
// partial view of the network.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): if we restore *after* the channel
|
|
|
|
// has been closed on chain, then need to inform the
|
|
|
|
// router that it should try and prune these values as
|
|
|
|
// we can detect them
|
|
|
|
edgeInfo := ChannelEdgeInfo{
|
|
|
|
ChannelID: channel.ShortChannelID.ToUint64(),
|
|
|
|
ChainHash: channel.ChainHash,
|
|
|
|
ChannelPoint: channel.FundingOutpoint,
|
2019-03-11 02:15:07 +03:00
|
|
|
Capacity: channel.Capacity,
|
2018-12-10 06:37:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
nodes := tx.Bucket(nodeBucket)
|
|
|
|
if nodes == nil {
|
|
|
|
return ErrGraphNotFound
|
|
|
|
}
|
|
|
|
selfNode, err := chanGraph.sourceNode(nodes)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Depending on which pub key is smaller, we'll assign
|
|
|
|
// our roles as "node1" and "node2".
|
|
|
|
chanPeer := channel.IdentityPub.SerializeCompressed()
|
|
|
|
selfIsSmaller := bytes.Compare(
|
|
|
|
selfNode.PubKeyBytes[:], chanPeer,
|
|
|
|
) == -1
|
|
|
|
if selfIsSmaller {
|
|
|
|
copy(edgeInfo.NodeKey1Bytes[:], selfNode.PubKeyBytes[:])
|
|
|
|
copy(edgeInfo.NodeKey2Bytes[:], chanPeer)
|
|
|
|
} else {
|
|
|
|
copy(edgeInfo.NodeKey1Bytes[:], chanPeer)
|
|
|
|
copy(edgeInfo.NodeKey2Bytes[:], selfNode.PubKeyBytes[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the edge info shell constructed, we'll now add
|
|
|
|
// it to the graph.
|
|
|
|
err = chanGraph.addChannelEdge(tx, &edgeInfo)
|
2019-03-12 02:14:56 +03:00
|
|
|
if err != nil && err != ErrEdgeAlreadyExist {
|
2018-12-10 06:37:47 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Similarly, we'll construct a channel edge shell and
|
|
|
|
// add that itself to the graph.
|
|
|
|
chanEdge := ChannelEdgePolicy{
|
|
|
|
ChannelID: edgeInfo.ChannelID,
|
|
|
|
LastUpdate: time.Now(),
|
|
|
|
}
|
|
|
|
|
|
|
|
// If their pubkey is larger, then we'll flip the
|
|
|
|
// direction bit to indicate that us, the "second" node
|
|
|
|
// is updating their policy.
|
|
|
|
if !selfIsSmaller {
|
|
|
|
chanEdge.ChannelFlags |= lnwire.ChanUpdateDirection
|
|
|
|
}
|
|
|
|
|
2019-04-02 02:34:51 +03:00
|
|
|
_, err = updateEdgePolicy(tx, &chanEdge)
|
2018-12-10 06:37:47 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-04-02 02:32:52 +03:00
|
|
|
|
|
|
|
chansRestored = append(chansRestored, edgeInfo.ChannelID)
|
2018-12-10 06:37:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
2019-04-02 02:32:52 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, chanid := range chansRestored {
|
|
|
|
chanGraph.rejectCache.remove(chanid)
|
|
|
|
chanGraph.chanCache.remove(chanid)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2018-12-10 06:37:47 +03:00
|
|
|
}
|
|
|
|
|
2018-12-10 06:15:09 +03:00
|
|
|
// AddrsForNode consults the graph and channel database for all addresses known
|
|
|
|
// to the passed node public key.
|
|
|
|
func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) {
|
|
|
|
var (
|
|
|
|
linkNode *LinkNode
|
|
|
|
graphNode LightningNode
|
|
|
|
)
|
|
|
|
|
|
|
|
dbErr := d.View(func(tx *bbolt.Tx) error {
|
|
|
|
var err error
|
|
|
|
|
|
|
|
linkNode, err = fetchLinkNode(tx, nodePub)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll also query the graph for this peer to see if they have
|
|
|
|
// any addresses that we don't currently have stored within the
|
|
|
|
// link node database.
|
|
|
|
nodes := tx.Bucket(nodeBucket)
|
|
|
|
if nodes == nil {
|
|
|
|
return ErrGraphNotFound
|
|
|
|
}
|
|
|
|
compressedPubKey := nodePub.SerializeCompressed()
|
|
|
|
graphNode, err = fetchLightningNode(nodes, compressedPubKey)
|
2019-03-11 02:26:27 +03:00
|
|
|
if err != nil && err != ErrGraphNodeNotFound {
|
|
|
|
// If the node isn't found, then that's OK, as we still
|
|
|
|
// have the link node data.
|
2018-12-10 06:15:09 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if dbErr != nil {
|
|
|
|
return nil, dbErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we have both sources of addrs for this node, we'll use a
|
|
|
|
// map to de-duplicate any addresses between the two sources, and
|
|
|
|
// produce a final list of the combined addrs.
|
|
|
|
addrs := make(map[string]net.Addr)
|
|
|
|
for _, addr := range linkNode.Addresses {
|
|
|
|
addrs[addr.String()] = addr
|
|
|
|
}
|
|
|
|
for _, addr := range graphNode.Addresses {
|
|
|
|
addrs[addr.String()] = addr
|
|
|
|
}
|
|
|
|
dedupedAddrs := make([]net.Addr, 0, len(addrs))
|
|
|
|
for _, addr := range addrs {
|
|
|
|
dedupedAddrs = append(dedupedAddrs, addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return dedupedAddrs, nil
|
|
|
|
}
|
|
|
|
|
2017-11-11 06:36:35 +03:00
|
|
|
// syncVersions function is used for safe db version synchronization. It
|
|
|
|
// applies migration functions to the current database and recovers the
|
|
|
|
// previous state of db if at least one error/panic appeared during migration.
|
2016-11-23 00:57:26 +03:00
|
|
|
func (d *DB) syncVersions(versions []version) error {
|
2016-11-22 23:50:27 +03:00
|
|
|
meta, err := d.FetchMeta(nil)
|
|
|
|
if err != nil {
|
2016-11-28 05:36:17 +03:00
|
|
|
if err == ErrMetaNotFound {
|
|
|
|
meta = &Meta{}
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
latestVersion := getLatestDBVersion(versions)
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Checking for schema update: latest_version=%v, "+
|
|
|
|
"db_version=%v", latestVersion, meta.DbVersionNumber)
|
2018-08-03 02:46:17 +03:00
|
|
|
|
|
|
|
switch {
|
|
|
|
|
|
|
|
// If the database reports a higher version that we are aware of, the
|
|
|
|
// user is probably trying to revert to a prior version of lnd. We fail
|
|
|
|
// here to prevent reversions and unintended corruption.
|
|
|
|
case meta.DbVersionNumber > latestVersion:
|
|
|
|
log.Errorf("Refusing to revert from db_version=%d to "+
|
|
|
|
"lower version=%d", meta.DbVersionNumber,
|
|
|
|
latestVersion)
|
|
|
|
return ErrDBReversion
|
|
|
|
|
|
|
|
// If the current database version matches the latest version number,
|
|
|
|
// then we don't need to perform any migrations.
|
|
|
|
case meta.DbVersionNumber == latestVersion:
|
2016-11-23 00:57:26 +03:00
|
|
|
return nil
|
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Performing database schema migration")
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// Otherwise, we fetch the migrations which need to applied, and
|
|
|
|
// execute them serially within a single database transaction to ensure
|
|
|
|
// the migration is atomic.
|
2018-04-17 05:06:21 +03:00
|
|
|
migrations, migrationVersions := getMigrationsToApply(
|
|
|
|
versions, meta.DbVersionNumber,
|
|
|
|
)
|
2018-11-30 07:04:21 +03:00
|
|
|
return d.Update(func(tx *bbolt.Tx) error {
|
2017-02-08 23:56:37 +03:00
|
|
|
for i, migration := range migrations {
|
2016-11-23 00:57:26 +03:00
|
|
|
if migration == nil {
|
|
|
|
continue
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Applying migration #%v", migrationVersions[i])
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
if err := migration(tx); err != nil {
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Unable to apply migration #%v",
|
|
|
|
migrationVersions[i])
|
2016-11-22 23:50:27 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-11-23 00:57:26 +03:00
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
meta.DbVersionNumber = latestVersion
|
2017-02-23 21:59:50 +03:00
|
|
|
return putMeta(meta, tx)
|
2016-11-23 00:57:26 +03:00
|
|
|
})
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
// ChannelGraph returns a new instance of the directed channel graph.
|
|
|
|
func (d *DB) ChannelGraph() *ChannelGraph {
|
2019-04-01 21:52:31 +03:00
|
|
|
return d.graph
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
|
|
|
|
2016-11-22 23:50:27 +03:00
|
|
|
func getLatestDBVersion(versions []version) uint32 {
|
|
|
|
return versions[len(versions)-1].number
|
|
|
|
}
|
|
|
|
|
|
|
|
// getMigrationsToApply retrieves the migration function that should be
|
|
|
|
// applied to the database.
|
2017-02-08 23:56:37 +03:00
|
|
|
func getMigrationsToApply(versions []version, version uint32) ([]migration, []uint32) {
|
2016-11-22 23:50:27 +03:00
|
|
|
migrations := make([]migration, 0, len(versions))
|
2017-02-08 23:56:37 +03:00
|
|
|
migrationVersions := make([]uint32, 0, len(versions))
|
2016-11-22 23:50:27 +03:00
|
|
|
|
|
|
|
for _, v := range versions {
|
|
|
|
if v.number > version {
|
|
|
|
migrations = append(migrations, v.migration)
|
2017-02-08 23:56:37 +03:00
|
|
|
migrationVersions = append(migrationVersions, v.number)
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-08 23:56:37 +03:00
|
|
|
return migrations, migrationVersions
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|