2015-12-26 21:35:15 +03:00
|
|
|
package channeldb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
2016-03-24 08:11:57 +03:00
|
|
|
"fmt"
|
2016-03-23 04:46:30 +03:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2015-12-26 21:35:15 +03:00
|
|
|
"sync"
|
|
|
|
|
2016-03-23 04:46:30 +03:00
|
|
|
"github.com/boltdb/bolt"
|
2017-06-16 23:28:26 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2016-10-26 02:11:23 +03:00
|
|
|
"github.com/roasbeef/btcd/btcec"
|
2016-06-21 07:41:00 +03:00
|
|
|
"github.com/roasbeef/btcd/wire"
|
2016-03-23 04:46:30 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2016-11-22 23:50:27 +03:00
|
|
|
dbName = "channel.db"
|
|
|
|
dbFilePermission = 0600
|
2015-12-26 21:35:15 +03:00
|
|
|
)
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// migration is a function which takes a prior outdated version of the database
|
|
|
|
// instances and mutates the key/bucket structure to arrive at a more
|
|
|
|
// up-to-date version of the database.
|
2016-11-22 23:50:27 +03:00
|
|
|
type migration func(tx *bolt.Tx) error
|
|
|
|
|
|
|
|
type version struct {
|
|
|
|
number uint32
|
|
|
|
migration migration
|
|
|
|
}
|
|
|
|
|
2015-12-26 21:35:15 +03:00
|
|
|
var (
|
2016-11-23 00:57:26 +03:00
|
|
|
// dbVersions is storing all versions of database. If current version
|
|
|
|
// of database don't match with latest version this list will be used
|
|
|
|
// for retrieving all migration function that are need to apply to the
|
2016-11-22 23:50:27 +03:00
|
|
|
// current db.
|
2016-11-23 00:57:26 +03:00
|
|
|
dbVersions = []version{
|
2016-11-22 23:50:27 +03:00
|
|
|
{
|
2016-11-23 00:57:26 +03:00
|
|
|
// The base DB version requires no migration.
|
2016-11-28 05:37:05 +03:00
|
|
|
number: 0,
|
2016-11-23 00:57:26 +03:00
|
|
|
migration: nil,
|
2016-11-22 23:50:27 +03:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// Big endian is the preferred byte order, due to cursor scans over
|
|
|
|
// integer keys iterating in order.
|
2016-03-24 08:11:57 +03:00
|
|
|
byteOrder = binary.BigEndian
|
2015-12-26 21:35:15 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var bufPool = &sync.Pool{
|
|
|
|
New: func() interface{} { return new(bytes.Buffer) },
|
|
|
|
}
|
|
|
|
|
2016-11-28 05:32:45 +03:00
|
|
|
// DB is the primary datastore for the lnd daemon. The database stores
|
2016-06-23 02:17:19 +03:00
|
|
|
// information related to nodes, routing data, open/closed channels, fee
|
|
|
|
// schedules, and reputation data.
|
2015-12-26 21:35:15 +03:00
|
|
|
type DB struct {
|
2016-11-28 05:32:45 +03:00
|
|
|
*bolt.DB
|
2016-12-22 23:09:19 +03:00
|
|
|
dbPath string
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2016-11-28 05:32:45 +03:00
|
|
|
// Open opens an existing channeldb. Any necessary schemas migrations due to
|
2016-12-27 06:50:23 +03:00
|
|
|
// updates will take place as necessary.
|
2016-12-22 23:09:19 +03:00
|
|
|
func Open(dbPath string) (*DB, error) {
|
2016-03-24 08:11:57 +03:00
|
|
|
path := filepath.Join(dbPath, dbName)
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2016-03-25 00:31:46 +03:00
|
|
|
if !fileExists(path) {
|
|
|
|
if err := createChannelDB(dbPath); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2016-03-24 08:11:57 +03:00
|
|
|
|
2016-11-22 23:50:27 +03:00
|
|
|
bdb, err := bolt.Open(path, dbFilePermission, nil)
|
2016-03-24 08:11:57 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
chanDB := &DB{
|
2016-12-22 23:09:19 +03:00
|
|
|
DB: bdb,
|
|
|
|
dbPath: dbPath,
|
2016-11-23 00:57:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Synchronize the version of database and apply migrations if needed.
|
|
|
|
if err := chanDB.syncVersions(dbVersions); err != nil {
|
|
|
|
bdb.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanDB, nil
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// Wipe completely deletes all saved state within all used buckets within the
|
|
|
|
// database. The deletion is done in a single transaction, therefore this
|
|
|
|
// operation is fully atomic.
|
2015-12-26 21:35:15 +03:00
|
|
|
func (d *DB) Wipe() error {
|
2016-11-28 05:48:57 +03:00
|
|
|
return d.Update(func(tx *bolt.Tx) error {
|
2016-07-22 02:16:13 +03:00
|
|
|
err := tx.DeleteBucket(openChannelBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
2016-06-23 02:15:07 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-22 02:16:13 +03:00
|
|
|
err = tx.DeleteBucket(closedChannelBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-09-24 01:00:38 +03:00
|
|
|
err = tx.DeleteBucket(invoiceBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-11-08 05:18:57 +03:00
|
|
|
err = tx.DeleteBucket(nodeInfoBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
err = tx.DeleteBucket(nodeBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = tx.DeleteBucket(edgeBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-27 06:50:23 +03:00
|
|
|
err = tx.DeleteBucket(edgeIndexBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-20 03:58:27 +03:00
|
|
|
err = tx.DeleteBucket(graphMetaBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-08 09:47:01 +03:00
|
|
|
|
2016-07-22 02:16:13 +03:00
|
|
|
return nil
|
2015-12-26 21:35:15 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// createChannelDB creates and initializes a fresh version of channeldb. In
|
|
|
|
// the case that the target path has not yet been created or doesn't yet exist,
|
|
|
|
// then the path is created. Additionally, all required top-level buckets used
|
|
|
|
// within the database are created.
|
2016-03-25 00:31:46 +03:00
|
|
|
func createChannelDB(dbPath string) error {
|
2016-03-24 08:11:57 +03:00
|
|
|
if !fileExists(dbPath) {
|
|
|
|
if err := os.MkdirAll(dbPath, 0700); err != nil {
|
2016-03-25 00:31:46 +03:00
|
|
|
return err
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
path := filepath.Join(dbPath, dbName)
|
2016-11-22 23:50:27 +03:00
|
|
|
bdb, err := bolt.Open(path, dbFilePermission, nil)
|
2016-03-23 04:46:30 +03:00
|
|
|
if err != nil {
|
2016-03-25 00:31:46 +03:00
|
|
|
return err
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
|
2016-03-24 08:11:57 +03:00
|
|
|
err = bdb.Update(func(tx *bolt.Tx) error {
|
|
|
|
if _, err := tx.CreateBucket(openChannelBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2016-03-24 08:11:57 +03:00
|
|
|
if _, err := tx.CreateBucket(closedChannelBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2016-10-26 00:04:42 +03:00
|
|
|
if _, err := tx.CreateBucket(invoiceBucket); err != nil {
|
2016-03-24 08:11:57 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-10-26 00:04:42 +03:00
|
|
|
if _, err := tx.CreateBucket(nodeInfoBucket); err != nil {
|
2016-09-24 01:00:38 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
if _, err := tx.CreateBucket(nodeBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := tx.CreateBucket(edgeBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-27 06:50:23 +03:00
|
|
|
if _, err := tx.CreateBucket(edgeIndexBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-20 03:58:27 +03:00
|
|
|
if _, err := tx.CreateBucket(graphMetaBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-08 09:47:01 +03:00
|
|
|
|
2016-11-22 23:50:27 +03:00
|
|
|
if _, err := tx.CreateBucket(metaBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-11-28 05:48:57 +03:00
|
|
|
meta := &Meta{
|
|
|
|
DbVersionNumber: getLatestDBVersion(dbVersions),
|
|
|
|
}
|
|
|
|
return putMeta(meta, tx)
|
2016-03-24 08:11:57 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
2016-03-25 00:31:46 +03:00
|
|
|
return fmt.Errorf("unable to create new channeldb")
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2016-03-25 00:31:46 +03:00
|
|
|
return bdb.Close()
|
2015-12-26 21:35:15 +03:00
|
|
|
}
|
2016-03-23 04:46:30 +03:00
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// fileExists returns true if the file exists, and false otherwise.
|
2016-03-24 08:11:57 +03:00
|
|
|
func fileExists(path string) bool {
|
|
|
|
if _, err := os.Stat(path); err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
|
2017-02-23 22:56:47 +03:00
|
|
|
// FetchOpenChannels returns all stored currently active/open channels
|
2016-06-23 02:17:19 +03:00
|
|
|
// associated with the target nodeID. In the case that no active channels are
|
|
|
|
// known to have been created with this node, then a zero-length slice is
|
|
|
|
// returned.
|
2016-10-26 02:11:23 +03:00
|
|
|
func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error) {
|
2016-06-21 07:41:00 +03:00
|
|
|
var channels []*OpenChannel
|
2016-11-28 05:32:45 +03:00
|
|
|
err := d.View(func(tx *bolt.Tx) error {
|
2017-01-13 08:01:50 +03:00
|
|
|
// Get the bucket dedicated to storing the metadata for open
|
2016-03-24 08:39:52 +03:00
|
|
|
// channels.
|
|
|
|
openChanBucket := tx.Bucket(openChannelBucket)
|
2016-07-10 02:19:18 +03:00
|
|
|
if openChanBucket == nil {
|
|
|
|
return nil
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 07:41:00 +03:00
|
|
|
// Within this top level bucket, fetch the bucket dedicated to storing
|
|
|
|
// open channel data specific to the remote node.
|
2016-10-26 02:11:23 +03:00
|
|
|
pub := nodeID.SerializeCompressed()
|
|
|
|
nodeChanBucket := openChanBucket.Bucket(pub)
|
2016-06-21 07:41:00 +03:00
|
|
|
if nodeChanBucket == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-27 00:53:10 +03:00
|
|
|
// Finally, we both of the necessary buckets retrieved, fetch
|
|
|
|
// all the active channels related to this node.
|
|
|
|
nodeChannels, err := d.fetchNodeChannels(openChanBucket,
|
|
|
|
nodeChanBucket)
|
|
|
|
if err != nil {
|
2017-02-08 03:41:14 +03:00
|
|
|
return fmt.Errorf("unable to read channel for "+
|
2017-02-08 03:51:58 +03:00
|
|
|
"node_key=%x: %v", pub, err)
|
2016-10-27 00:53:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
channels = nodeChannels
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
return channels, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetchNodeChannels retrieves all active channels from the target
|
|
|
|
// nodeChanBucket. This function is typically used to fetch all the active
|
2017-05-05 01:20:16 +03:00
|
|
|
// channels related to a particular node.
|
2016-10-27 00:53:10 +03:00
|
|
|
func (d *DB) fetchNodeChannels(openChanBucket,
|
|
|
|
nodeChanBucket *bolt.Bucket) ([]*OpenChannel, error) {
|
|
|
|
|
|
|
|
var channels []*OpenChannel
|
|
|
|
|
|
|
|
// Once we have the node's channel bucket, iterate through each
|
|
|
|
// item in the inner chan ID bucket. This bucket acts as an
|
|
|
|
// index for all channels we currently have open with this node.
|
|
|
|
nodeChanIDBucket := nodeChanBucket.Bucket(chanIDBucket[:])
|
|
|
|
if nodeChanIDBucket == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
err := nodeChanIDBucket.ForEach(func(k, v []byte) error {
|
|
|
|
if k == nil {
|
2016-09-07 04:48:40 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-10-27 00:53:10 +03:00
|
|
|
outBytes := bytes.NewReader(k)
|
|
|
|
chanID := &wire.OutPoint{}
|
2017-07-26 06:39:59 +03:00
|
|
|
if err := readOutpoint(outBytes, chanID); err != nil {
|
2016-10-27 00:53:10 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
oChannel, err := fetchOpenChannel(openChanBucket,
|
|
|
|
nodeChanBucket, chanID)
|
|
|
|
if err != nil {
|
2017-02-08 03:41:14 +03:00
|
|
|
return fmt.Errorf("unable to read channel data for "+
|
2017-02-08 06:54:40 +03:00
|
|
|
"chan_point=%v: %v", chanID, err)
|
2016-10-27 00:53:10 +03:00
|
|
|
}
|
|
|
|
oChannel.Db = d
|
|
|
|
|
|
|
|
channels = append(channels, oChannel)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return channels, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FetchAllChannels attempts to retrieve all open channels currently stored
|
2017-01-23 10:31:01 +03:00
|
|
|
// within the database.
|
2016-10-27 00:53:10 +03:00
|
|
|
func (d *DB) FetchAllChannels() ([]*OpenChannel, error) {
|
2017-01-23 10:31:01 +03:00
|
|
|
return fetchChannels(d, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FetchPendingChannels will return channels that have completed the process
|
|
|
|
// of generating and broadcasting funding transactions, but whose funding
|
|
|
|
// transactions have yet to be confirmed on the blockchain.
|
|
|
|
func (d *DB) FetchPendingChannels() ([]*OpenChannel, error) {
|
|
|
|
return fetchChannels(d, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetchChannels attempts to retrieve channels currently stored in the
|
|
|
|
// database. The pendingOnly parameter determines whether only pending
|
|
|
|
// channels will be returned. If no active channels exist within the network,
|
|
|
|
// then ErrNoActiveChannels is returned.
|
|
|
|
func fetchChannels(d *DB, pendingOnly bool) ([]*OpenChannel, error) {
|
2016-10-27 00:53:10 +03:00
|
|
|
var channels []*OpenChannel
|
|
|
|
|
2016-11-28 05:32:45 +03:00
|
|
|
err := d.View(func(tx *bolt.Tx) error {
|
2017-01-13 08:01:50 +03:00
|
|
|
// Get the bucket dedicated to storing the metadata for open
|
2016-10-27 00:53:10 +03:00
|
|
|
// channels.
|
|
|
|
openChanBucket := tx.Bucket(openChannelBucket)
|
|
|
|
if openChanBucket == nil {
|
|
|
|
return ErrNoActiveChannels
|
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// Next, fetch the bucket dedicated to storing metadata
|
2016-10-27 00:53:10 +03:00
|
|
|
// related to all nodes. All keys within this bucket are the
|
|
|
|
// serialized public keys of all our direct counterparties.
|
|
|
|
nodeMetaBucket := tx.Bucket(nodeInfoBucket)
|
|
|
|
if nodeMetaBucket == nil {
|
|
|
|
return fmt.Errorf("node bucket not created")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally for each node public key in the bucket, fetch all
|
|
|
|
// the channels related to this particualr ndoe.
|
|
|
|
return nodeMetaBucket.ForEach(func(k, v []byte) error {
|
|
|
|
nodeChanBucket := openChanBucket.Bucket(k)
|
|
|
|
if nodeChanBucket == nil {
|
|
|
|
return nil
|
2016-06-21 07:41:00 +03:00
|
|
|
}
|
|
|
|
|
2016-10-27 00:53:10 +03:00
|
|
|
nodeChannels, err := d.fetchNodeChannels(openChanBucket,
|
|
|
|
nodeChanBucket)
|
2016-06-21 07:41:00 +03:00
|
|
|
if err != nil {
|
2017-02-08 03:41:14 +03:00
|
|
|
return fmt.Errorf("unable to read channel for "+
|
2017-02-08 03:51:58 +03:00
|
|
|
"node_key=%x: %v", k, err)
|
2016-06-21 07:41:00 +03:00
|
|
|
}
|
2017-05-11 02:44:40 +03:00
|
|
|
// TODO(roasbeef): simplify
|
2017-01-23 10:31:01 +03:00
|
|
|
if pendingOnly {
|
|
|
|
for _, channel := range nodeChannels {
|
2017-03-09 07:44:32 +03:00
|
|
|
if channel.IsPending {
|
2017-01-23 10:31:01 +03:00
|
|
|
channels = append(channels, channel)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
channels = append(channels, nodeChannels...)
|
|
|
|
}
|
2016-06-21 07:41:00 +03:00
|
|
|
return nil
|
|
|
|
})
|
2016-03-24 08:39:52 +03:00
|
|
|
})
|
|
|
|
|
2016-06-21 07:41:00 +03:00
|
|
|
return channels, err
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
|
2017-01-23 10:31:01 +03:00
|
|
|
// MarkChannelAsOpen records the finalization of the funding process and marks
|
2017-05-11 02:44:40 +03:00
|
|
|
// a channel as available for use. Additionally the height in which this
|
|
|
|
// channel as opened will also be recorded within the database.
|
2017-06-16 23:28:26 +03:00
|
|
|
func (d *DB) MarkChannelAsOpen(outpoint *wire.OutPoint,
|
|
|
|
openLoc lnwire.ShortChannelID) error {
|
|
|
|
|
2017-03-09 07:44:32 +03:00
|
|
|
return d.Update(func(tx *bolt.Tx) error {
|
2017-01-23 10:31:01 +03:00
|
|
|
openChanBucket := tx.Bucket(openChannelBucket)
|
|
|
|
if openChanBucket == nil {
|
|
|
|
return ErrNoActiveChannels
|
|
|
|
}
|
|
|
|
|
2017-05-11 02:44:40 +03:00
|
|
|
// Generate the database key, which will consist of the
|
|
|
|
// IsPending prefix followed by the channel's outpoint.
|
2017-01-23 10:31:01 +03:00
|
|
|
var b bytes.Buffer
|
2017-07-26 06:39:59 +03:00
|
|
|
if err := writeOutpoint(&b, outpoint); err != nil {
|
2017-01-23 10:31:01 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
keyPrefix := make([]byte, 3+b.Len())
|
|
|
|
copy(keyPrefix[3:], b.Bytes())
|
|
|
|
copy(keyPrefix[:3], isPendingPrefix)
|
|
|
|
|
2017-05-11 02:44:40 +03:00
|
|
|
// For the database value, store a zero, since the channel is
|
|
|
|
// no longer pending.
|
|
|
|
scratch := make([]byte, 4)
|
|
|
|
byteOrder.PutUint16(scratch[:2], uint16(0))
|
|
|
|
if err := openChanBucket.Put(keyPrefix, scratch[:2]); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll also store the opening height for this
|
|
|
|
// channel as well.
|
2017-06-06 01:02:27 +03:00
|
|
|
confInfoKey := make([]byte, len(confInfoPrefix)+len(b.Bytes()))
|
|
|
|
copy(confInfoKey[:len(confInfoPrefix)], confInfoPrefix)
|
|
|
|
copy(confInfoKey[len(confInfoPrefix):], b.Bytes())
|
2017-05-11 02:44:40 +03:00
|
|
|
|
2017-06-06 01:02:27 +03:00
|
|
|
confInfoBytes := openChanBucket.Get(confInfoKey)
|
|
|
|
infoCopy := make([]byte, len(confInfoBytes))
|
|
|
|
copy(infoCopy[:], confInfoBytes)
|
|
|
|
|
2017-06-16 23:28:26 +03:00
|
|
|
byteOrder.PutUint64(infoCopy[4:], openLoc.ToUint64())
|
2017-06-06 01:02:27 +03:00
|
|
|
|
|
|
|
return openChanBucket.Put(confInfoKey, infoCopy)
|
2017-01-23 10:31:01 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-05-05 01:20:16 +03:00
|
|
|
// FetchClosedChannels attempts to fetch all closed channels from the database.
|
|
|
|
// The pendingOnly bool toggles if channels that aren't yet fully closed should
|
|
|
|
// be returned int he response or not. When a channel was cooperatively closed,
|
|
|
|
// it becomes fully closed after a single confirmation. When a channel was
|
|
|
|
// forcibly closed, it will become fully closed after _all_ the pending funds
|
|
|
|
// (if any) have been swept.
|
|
|
|
func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) {
|
|
|
|
var chanSummaries []*ChannelCloseSummary
|
|
|
|
|
|
|
|
if err := d.View(func(tx *bolt.Tx) error {
|
|
|
|
closeBucket := tx.Bucket(closedChannelBucket)
|
|
|
|
if closeBucket == nil {
|
|
|
|
return ErrNoClosedChannels
|
|
|
|
}
|
|
|
|
|
|
|
|
return closeBucket.ForEach(func(chanID []byte, summaryBytes []byte) error {
|
|
|
|
// The first byte of the summary is a bool which
|
|
|
|
// indicates if this channel is pending closure, or has
|
|
|
|
// been fully closed.
|
|
|
|
isPending := summaryBytes[0]
|
|
|
|
|
|
|
|
// If the query specified to only include pending
|
|
|
|
// channels, then we'll skip any channels which aren't
|
|
|
|
// currently pending.
|
|
|
|
if pendingOnly && isPending != 0x01 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
summaryReader := bytes.NewReader(summaryBytes)
|
|
|
|
chanSummary, err := deserializeCloseChannelSummary(summaryReader)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
chanSummaries = append(chanSummaries, chanSummary)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanSummaries, nil
|
|
|
|
}
|
2017-05-05 01:21:35 +03:00
|
|
|
|
|
|
|
// MarkChanFullyClosed marks a channel as fully closed within the database. A
|
|
|
|
// channel should be marked as fully closed if the channel was initially
|
|
|
|
// cooperatively closed and it's reach a single confirmation, or after all the
|
|
|
|
// pending funds in a channel that has been forcibly closed have been swept.
|
|
|
|
func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
|
|
|
|
return d.Update(func(tx *bolt.Tx) error {
|
|
|
|
var b bytes.Buffer
|
2017-07-26 06:39:59 +03:00
|
|
|
if err := writeOutpoint(&b, chanPoint); err != nil {
|
2017-05-05 01:21:35 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
chanID := b.Bytes()
|
|
|
|
|
|
|
|
closedChanBucket, err := tx.CreateBucketIfNotExists(closedChannelBucket)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
chanSummary := closedChanBucket.Get(chanID)
|
|
|
|
if chanSummary == nil {
|
|
|
|
return fmt.Errorf("no closed channel by that chanID found")
|
|
|
|
}
|
|
|
|
|
|
|
|
newSummary := make([]byte, len(chanSummary))
|
|
|
|
copy(newSummary[:], chanSummary[:])
|
|
|
|
newSummary[0] = 0x00
|
|
|
|
|
|
|
|
return closedChanBucket.Put(chanID, newSummary)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// syncVersions function is used for safe db version synchronization. It applies
|
2016-11-22 23:50:27 +03:00
|
|
|
// migration functions to the current database and recovers the previous
|
|
|
|
// state of db if at least one error/panic appeared during migration.
|
2016-11-23 00:57:26 +03:00
|
|
|
func (d *DB) syncVersions(versions []version) error {
|
2016-11-22 23:50:27 +03:00
|
|
|
meta, err := d.FetchMeta(nil)
|
|
|
|
if err != nil {
|
2016-11-28 05:36:17 +03:00
|
|
|
if err == ErrMetaNotFound {
|
|
|
|
meta = &Meta{}
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// If the current database version matches the latest version number,
|
|
|
|
// then we don't need to perform any migrations.
|
2016-11-22 23:50:27 +03:00
|
|
|
latestVersion := getLatestDBVersion(versions)
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Checking for schema update: latest_version=%v, "+
|
|
|
|
"db_version=%v", latestVersion, meta.DbVersionNumber)
|
2016-11-23 00:57:26 +03:00
|
|
|
if meta.DbVersionNumber == latestVersion {
|
|
|
|
return nil
|
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Performing database schema migration")
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// Otherwise, we fetch the migrations which need to applied, and
|
|
|
|
// execute them serially within a single database transaction to ensure
|
|
|
|
// the migration is atomic.
|
2017-02-08 23:56:37 +03:00
|
|
|
migrations, migrationVersions := getMigrationsToApply(versions,
|
|
|
|
meta.DbVersionNumber)
|
2016-11-28 05:32:45 +03:00
|
|
|
return d.Update(func(tx *bolt.Tx) error {
|
2017-02-08 23:56:37 +03:00
|
|
|
for i, migration := range migrations {
|
2016-11-23 00:57:26 +03:00
|
|
|
if migration == nil {
|
|
|
|
continue
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Applying migration #%v", migrationVersions[i])
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
if err := migration(tx); err != nil {
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Unable to apply migration #%v",
|
|
|
|
migrationVersions[i])
|
2016-11-22 23:50:27 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-11-23 00:57:26 +03:00
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
meta.DbVersionNumber = latestVersion
|
2017-02-23 21:59:50 +03:00
|
|
|
return putMeta(meta, tx)
|
2016-11-23 00:57:26 +03:00
|
|
|
})
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
// ChannelGraph returns a new instance of the directed channel graph.
|
|
|
|
func (d *DB) ChannelGraph() *ChannelGraph {
|
|
|
|
return &ChannelGraph{d}
|
|
|
|
}
|
|
|
|
|
2016-11-22 23:50:27 +03:00
|
|
|
func getLatestDBVersion(versions []version) uint32 {
|
|
|
|
return versions[len(versions)-1].number
|
|
|
|
}
|
|
|
|
|
|
|
|
// getMigrationsToApply retrieves the migration function that should be
|
|
|
|
// applied to the database.
|
2017-02-08 23:56:37 +03:00
|
|
|
func getMigrationsToApply(versions []version, version uint32) ([]migration, []uint32) {
|
2016-11-22 23:50:27 +03:00
|
|
|
migrations := make([]migration, 0, len(versions))
|
2017-02-08 23:56:37 +03:00
|
|
|
migrationVersions := make([]uint32, 0, len(versions))
|
2016-11-22 23:50:27 +03:00
|
|
|
|
|
|
|
for _, v := range versions {
|
|
|
|
if v.number > version {
|
|
|
|
migrations = append(migrations, v.migration)
|
2017-02-08 23:56:37 +03:00
|
|
|
migrationVersions = append(migrationVersions, v.number)
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-08 23:56:37 +03:00
|
|
|
return migrations, migrationVersions
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|