2015-12-26 21:35:15 +03:00
|
|
|
package channeldb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
2016-03-24 08:11:57 +03:00
|
|
|
"fmt"
|
2016-03-23 04:46:30 +03:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2015-12-26 21:35:15 +03:00
|
|
|
"sync"
|
|
|
|
|
2016-03-23 04:46:30 +03:00
|
|
|
"github.com/boltdb/bolt"
|
2016-05-15 17:17:44 +03:00
|
|
|
"github.com/roasbeef/btcd/chaincfg"
|
2016-06-21 07:41:00 +03:00
|
|
|
"github.com/roasbeef/btcd/wire"
|
2016-03-23 04:46:30 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
dbName = "channel.db"
|
2015-12-26 21:35:15 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2016-03-24 08:11:57 +03:00
|
|
|
// Big endian is the preferred byte order, due to cursor scans over integer
|
|
|
|
// keys iterating in order.
|
|
|
|
byteOrder = binary.BigEndian
|
2015-12-26 21:35:15 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var bufPool = &sync.Pool{
|
|
|
|
New: func() interface{} { return new(bytes.Buffer) },
|
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// DB is the primary datastore for the LND daemon. The database stores
|
|
|
|
// information related to nodes, routing data, open/closed channels, fee
|
|
|
|
// schedules, and reputation data.
|
2015-12-26 21:35:15 +03:00
|
|
|
type DB struct {
|
2016-03-24 08:11:57 +03:00
|
|
|
store *bolt.DB
|
|
|
|
|
2016-04-24 22:35:52 +03:00
|
|
|
netParams *chaincfg.Params
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Open opens an existing channeldb created under the passed namespace with
|
|
|
|
// sensitive data encrypted by the passed EncryptorDecryptor implementation.
|
|
|
|
// TODO(roasbeef): versioning?
|
2016-04-24 22:35:52 +03:00
|
|
|
func Open(dbPath string, netParams *chaincfg.Params) (*DB, error) {
|
2016-03-24 08:11:57 +03:00
|
|
|
path := filepath.Join(dbPath, dbName)
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2016-03-25 00:31:46 +03:00
|
|
|
if !fileExists(path) {
|
|
|
|
if err := createChannelDB(dbPath); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2016-03-24 08:11:57 +03:00
|
|
|
|
2016-03-25 00:31:46 +03:00
|
|
|
bdb, err := bolt.Open(path, 0600, nil)
|
2016-03-24 08:11:57 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-04-24 22:35:52 +03:00
|
|
|
return &DB{store: bdb, netParams: netParams}, nil
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// Wipe completely deletes all saved state within all used buckets within the
|
|
|
|
// database. The deletion is done in a single transaction, therefore this
|
|
|
|
// operation is fully atomic.
|
2015-12-26 21:35:15 +03:00
|
|
|
func (d *DB) Wipe() error {
|
2016-03-24 08:11:57 +03:00
|
|
|
return d.store.Update(func(tx *bolt.Tx) error {
|
2016-07-22 02:16:13 +03:00
|
|
|
err := tx.DeleteBucket(openChannelBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
2016-06-23 02:15:07 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-07-22 02:16:13 +03:00
|
|
|
err = tx.DeleteBucket(closedChannelBucket)
|
|
|
|
if err != nil && err != bolt.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2015-12-26 21:35:15 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// Close terminates the underlying database handle manually.
|
2016-03-24 08:11:57 +03:00
|
|
|
func (d *DB) Close() error {
|
|
|
|
return d.store.Close()
|
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// createChannelDB creates and initializes a fresh version of channeldb. In
|
|
|
|
// the case that the target path has not yet been created or doesn't yet exist,
|
|
|
|
// then the path is created. Additionally, all required top-level buckets used
|
|
|
|
// within the database are created.
|
2016-03-25 00:31:46 +03:00
|
|
|
func createChannelDB(dbPath string) error {
|
2016-03-24 08:11:57 +03:00
|
|
|
if !fileExists(dbPath) {
|
|
|
|
if err := os.MkdirAll(dbPath, 0700); err != nil {
|
2016-03-25 00:31:46 +03:00
|
|
|
return err
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
path := filepath.Join(dbPath, dbName)
|
2016-03-24 08:11:57 +03:00
|
|
|
bdb, err := bolt.Open(path, 0600, nil)
|
2016-03-23 04:46:30 +03:00
|
|
|
if err != nil {
|
2016-03-25 00:31:46 +03:00
|
|
|
return err
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
|
2016-03-24 08:11:57 +03:00
|
|
|
err = bdb.Update(func(tx *bolt.Tx) error {
|
|
|
|
if _, err := tx.CreateBucket(openChannelBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2016-03-24 08:11:57 +03:00
|
|
|
if _, err := tx.CreateBucket(closedChannelBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2016-03-24 08:11:57 +03:00
|
|
|
if _, err := tx.CreateBucket(channelLogBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
2016-03-25 00:31:46 +03:00
|
|
|
return fmt.Errorf("unable to create new channeldb")
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2016-03-25 00:31:46 +03:00
|
|
|
return bdb.Close()
|
2015-12-26 21:35:15 +03:00
|
|
|
}
|
2016-03-23 04:46:30 +03:00
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// fileExists returns true if the file exists, and false otherwise.
|
2016-03-24 08:11:57 +03:00
|
|
|
func fileExists(path string) bool {
|
|
|
|
if _, err := os.Stat(path); err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// FetchOpenChannel returns all stored currently active/open channels
|
|
|
|
// associated with the target nodeID. In the case that no active channels are
|
|
|
|
// known to have been created with this node, then a zero-length slice is
|
|
|
|
// returned.
|
2016-06-21 07:41:00 +03:00
|
|
|
func (d *DB) FetchOpenChannels(nodeID *wire.ShaHash) ([]*OpenChannel, error) {
|
|
|
|
var channels []*OpenChannel
|
2016-03-24 08:39:52 +03:00
|
|
|
err := d.store.View(func(tx *bolt.Tx) error {
|
|
|
|
// Get the bucket dedicated to storing the meta-data for open
|
|
|
|
// channels.
|
|
|
|
openChanBucket := tx.Bucket(openChannelBucket)
|
2016-07-10 02:19:18 +03:00
|
|
|
if openChanBucket == nil {
|
|
|
|
return nil
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 07:41:00 +03:00
|
|
|
// Within this top level bucket, fetch the bucket dedicated to storing
|
|
|
|
// open channel data specific to the remote node.
|
|
|
|
nodeChanBucket := openChanBucket.Bucket(nodeID[:])
|
|
|
|
if nodeChanBucket == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Once we have the node's channel bucket, iterate through each
|
|
|
|
// item in the inner chan ID bucket. This bucket acts as an
|
|
|
|
// index for all channels we currently have open with this node.
|
|
|
|
nodeChanIDBucket := nodeChanBucket.Bucket(chanIDBucket[:])
|
|
|
|
err := nodeChanIDBucket.ForEach(func(k, v []byte) error {
|
|
|
|
outBytes := bytes.NewReader(k)
|
|
|
|
chanID := &wire.OutPoint{}
|
|
|
|
if err := readOutpoint(outBytes, chanID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
oChannel, err := fetchOpenChannel(openChanBucket,
|
2016-08-13 00:57:27 +03:00
|
|
|
nodeChanBucket, chanID)
|
2016-06-21 07:41:00 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-06-23 08:01:10 +03:00
|
|
|
oChannel.Db = d
|
2016-06-21 07:41:00 +03:00
|
|
|
|
|
|
|
channels = append(channels, oChannel)
|
|
|
|
return nil
|
|
|
|
})
|
2016-03-24 08:39:52 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-06-21 07:41:00 +03:00
|
|
|
|
2016-03-24 08:39:52 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2016-06-21 07:41:00 +03:00
|
|
|
return channels, err
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|