2015-12-26 21:35:15 +03:00
|
|
|
package channeldb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
2016-03-24 08:11:57 +03:00
|
|
|
"fmt"
|
2020-06-24 13:50:11 +03:00
|
|
|
"io/ioutil"
|
2018-12-10 06:15:09 +03:00
|
|
|
"net"
|
2016-03-23 04:46:30 +03:00
|
|
|
"os"
|
2015-12-26 21:35:15 +03:00
|
|
|
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
2020-05-08 18:50:13 +03:00
|
|
|
"github.com/btcsuite/btcwallet/walletdb"
|
2018-06-15 22:32:53 +03:00
|
|
|
"github.com/go-errors/errors"
|
2019-12-13 03:16:33 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
2020-05-22 01:37:39 +03:00
|
|
|
mig "github.com/lightningnetwork/lnd/channeldb/migration"
|
2019-11-22 13:24:28 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/migration12"
|
2020-02-20 20:08:01 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/migration13"
|
2020-06-10 13:34:27 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/migration16"
|
2020-10-12 18:08:30 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/migration20"
|
2021-02-19 13:34:17 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/migration21"
|
2019-10-24 13:25:28 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/migration_01_to_11"
|
2020-01-20 12:57:34 +03:00
|
|
|
"github.com/lightningnetwork/lnd/clock"
|
2018-11-20 17:09:46 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2016-03-23 04:46:30 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2016-11-22 23:50:27 +03:00
|
|
|
dbName = "channel.db"
|
|
|
|
dbFilePermission = 0600
|
2015-12-26 21:35:15 +03:00
|
|
|
)
|
|
|
|
|
2020-05-12 01:38:45 +03:00
|
|
|
var (
|
|
|
|
// ErrDryRunMigrationOK signals that a migration executed successful,
|
|
|
|
// but we intentionally did not commit the result.
|
2020-05-28 08:42:22 +03:00
|
|
|
ErrDryRunMigrationOK = errors.New("dry run migration successful")
|
2020-05-12 01:38:45 +03:00
|
|
|
)
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// migration is a function which takes a prior outdated version of the database
|
|
|
|
// instances and mutates the key/bucket structure to arrive at a more
|
|
|
|
// up-to-date version of the database.
|
2019-12-13 03:16:33 +03:00
|
|
|
type migration func(tx kvdb.RwTx) error
|
2016-11-22 23:50:27 +03:00
|
|
|
|
|
|
|
type version struct {
|
|
|
|
number uint32
|
|
|
|
migration migration
|
|
|
|
}
|
|
|
|
|
2015-12-26 21:35:15 +03:00
|
|
|
var (
|
2016-11-23 00:57:26 +03:00
|
|
|
// dbVersions is storing all versions of database. If current version
|
|
|
|
// of database don't match with latest version this list will be used
|
|
|
|
// for retrieving all migration function that are need to apply to the
|
2016-11-22 23:50:27 +03:00
|
|
|
// current db.
|
2016-11-23 00:57:26 +03:00
|
|
|
dbVersions = []version{
|
2019-10-12 10:47:45 +03:00
|
|
|
{
|
|
|
|
// The base DB version requires no migration.
|
|
|
|
number: 0,
|
|
|
|
migration: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// The version of the database where two new indexes
|
|
|
|
// for the update time of node and channel updates were
|
|
|
|
// added.
|
|
|
|
number: 1,
|
2019-10-24 13:25:28 +03:00
|
|
|
migration: migration_01_to_11.MigrateNodeAndEdgeUpdateIndex,
|
2019-10-12 10:47:45 +03:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// The DB version that added the invoice event time
|
|
|
|
// series.
|
|
|
|
number: 2,
|
2019-10-24 13:25:28 +03:00
|
|
|
migration: migration_01_to_11.MigrateInvoiceTimeSeries,
|
2019-10-12 10:47:45 +03:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// The DB version that updated the embedded invoice in
|
|
|
|
// outgoing payments to match the new format.
|
|
|
|
number: 3,
|
2019-10-24 13:25:28 +03:00
|
|
|
migration: migration_01_to_11.MigrateInvoiceTimeSeriesOutgoingPayments,
|
2019-10-12 10:47:45 +03:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// The version of the database where every channel
|
|
|
|
// always has two entries in the edges bucket. If
|
|
|
|
// a policy is unknown, this will be represented
|
|
|
|
// by a special byte sequence.
|
|
|
|
number: 4,
|
2019-10-24 13:25:28 +03:00
|
|
|
migration: migration_01_to_11.MigrateEdgePolicies,
|
2019-10-12 10:47:45 +03:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// The DB version where we persist each attempt to send
|
|
|
|
// an HTLC to a payment hash, and track whether the
|
|
|
|
// payment is in-flight, succeeded, or failed.
|
|
|
|
number: 5,
|
2019-10-24 13:25:28 +03:00
|
|
|
migration: migration_01_to_11.PaymentStatusesMigration,
|
2019-10-12 10:47:45 +03:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// The DB version that properly prunes stale entries
|
|
|
|
// from the edge update index.
|
|
|
|
number: 6,
|
2019-10-24 13:25:28 +03:00
|
|
|
migration: migration_01_to_11.MigratePruneEdgeUpdateIndex,
|
2019-10-12 10:47:45 +03:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// The DB version that migrates the ChannelCloseSummary
|
|
|
|
// to a format where optional fields are indicated with
|
|
|
|
// boolean flags.
|
|
|
|
number: 7,
|
2019-10-24 13:25:28 +03:00
|
|
|
migration: migration_01_to_11.MigrateOptionalChannelCloseSummaryFields,
|
2019-10-12 10:47:45 +03:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// The DB version that changes the gossiper's message
|
|
|
|
// store keys to account for the message's type and
|
|
|
|
// ShortChannelID.
|
|
|
|
number: 8,
|
2019-10-24 13:25:28 +03:00
|
|
|
migration: migration_01_to_11.MigrateGossipMessageStoreKeys,
|
2019-10-12 10:47:45 +03:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// The DB version where the payments and payment
|
|
|
|
// statuses are moved to being stored in a combined
|
|
|
|
// bucket.
|
|
|
|
number: 9,
|
2019-10-24 13:25:28 +03:00
|
|
|
migration: migration_01_to_11.MigrateOutgoingPayments,
|
2019-10-12 10:47:45 +03:00
|
|
|
},
|
2019-07-31 07:44:50 +03:00
|
|
|
{
|
|
|
|
// The DB version where we started to store legacy
|
|
|
|
// payload information for all routes, as well as the
|
|
|
|
// optional TLV records.
|
|
|
|
number: 10,
|
2019-10-24 13:25:28 +03:00
|
|
|
migration: migration_01_to_11.MigrateRouteSerialization,
|
2019-07-31 07:44:50 +03:00
|
|
|
},
|
2019-08-08 17:58:28 +03:00
|
|
|
{
|
|
|
|
// Add invoice htlc and cltv delta fields.
|
|
|
|
number: 11,
|
2019-10-24 13:25:28 +03:00
|
|
|
migration: migration_01_to_11.MigrateInvoices,
|
2019-08-08 17:58:28 +03:00
|
|
|
},
|
2019-11-22 13:24:28 +03:00
|
|
|
{
|
|
|
|
// Migrate to TLV invoice bodies, add payment address
|
|
|
|
// and features, remove receipt.
|
|
|
|
number: 12,
|
|
|
|
migration: migration12.MigrateInvoiceTLV,
|
|
|
|
},
|
2020-02-20 20:08:01 +03:00
|
|
|
{
|
|
|
|
// Migrate to multi-path payments.
|
|
|
|
number: 13,
|
|
|
|
migration: migration13.MigrateMPP,
|
|
|
|
},
|
2020-05-22 01:37:39 +03:00
|
|
|
{
|
|
|
|
// Initialize payment address index and begin using it
|
|
|
|
// as the default index, falling back to payment hash
|
|
|
|
// index.
|
|
|
|
number: 14,
|
|
|
|
migration: mig.CreateTLB(payAddrIndexBucket),
|
|
|
|
},
|
2020-06-10 13:34:27 +03:00
|
|
|
{
|
|
|
|
// Initialize payment index bucket which will be used
|
|
|
|
// to index payments by sequence number. This index will
|
|
|
|
// be used to allow more efficient ListPayments queries.
|
|
|
|
number: 15,
|
|
|
|
migration: mig.CreateTLB(paymentsIndexBucket),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Add our existing payments to the index bucket created
|
|
|
|
// in migration 15.
|
|
|
|
number: 16,
|
|
|
|
migration: migration16.MigrateSequenceIndex,
|
|
|
|
},
|
2020-07-07 20:49:36 +03:00
|
|
|
{
|
|
|
|
// Create a top level bucket which will store extra
|
|
|
|
// information about channel closes.
|
|
|
|
number: 17,
|
|
|
|
migration: mig.CreateTLB(closeSummaryBucket),
|
|
|
|
},
|
2020-09-08 14:47:21 +03:00
|
|
|
{
|
|
|
|
// Create a top level bucket which holds information
|
|
|
|
// about our peers.
|
|
|
|
number: 18,
|
|
|
|
migration: mig.CreateTLB(peersBucket),
|
|
|
|
},
|
2020-10-12 18:08:30 +03:00
|
|
|
{
|
|
|
|
// Create a top level bucket which holds outpoint
|
|
|
|
// information.
|
|
|
|
number: 19,
|
|
|
|
migration: mig.CreateTLB(outpointBucket),
|
|
|
|
},
|
|
|
|
{
|
|
|
|
// Migrate some data to the outpoint index.
|
|
|
|
number: 20,
|
|
|
|
migration: migration20.MigrateOutpointIndex,
|
|
|
|
},
|
2021-02-19 13:34:17 +03:00
|
|
|
{
|
|
|
|
// Migrate to length prefixed wire messages everywhere
|
|
|
|
// in the database.
|
|
|
|
number: 21,
|
|
|
|
migration: migration21.MigrateDatabaseWireMessages,
|
|
|
|
},
|
2021-03-03 20:57:29 +03:00
|
|
|
{
|
|
|
|
// Initialize set id index so that invoices can be
|
|
|
|
// queried by individual htlc sets.
|
|
|
|
number: 22,
|
|
|
|
migration: mig.CreateTLB(setIDIndexBucket),
|
|
|
|
},
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// Big endian is the preferred byte order, due to cursor scans over
|
|
|
|
// integer keys iterating in order.
|
2016-03-24 08:11:57 +03:00
|
|
|
byteOrder = binary.BigEndian
|
2015-12-26 21:35:15 +03:00
|
|
|
)
|
|
|
|
|
2016-11-28 05:32:45 +03:00
|
|
|
// DB is the primary datastore for the lnd daemon. The database stores
|
2016-06-23 02:17:19 +03:00
|
|
|
// information related to nodes, routing data, open/closed channels, fee
|
|
|
|
// schedules, and reputation data.
|
2015-12-26 21:35:15 +03:00
|
|
|
type DB struct {
|
2019-12-13 05:22:19 +03:00
|
|
|
kvdb.Backend
|
2020-05-08 18:50:13 +03:00
|
|
|
|
2016-12-22 23:09:19 +03:00
|
|
|
dbPath string
|
2019-04-01 21:52:31 +03:00
|
|
|
graph *ChannelGraph
|
2020-01-20 12:57:34 +03:00
|
|
|
clock clock.Clock
|
2020-05-12 01:38:45 +03:00
|
|
|
dryRun bool
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2020-05-08 18:50:13 +03:00
|
|
|
// Update is a wrapper around walletdb.Update which calls into the extended
|
|
|
|
// backend when available. This call is needed to be able to cast DB to
|
2020-10-26 16:06:32 +03:00
|
|
|
// ExtendedBackend. The passed reset function is called before the start of the
|
|
|
|
// transaction and can be used to reset intermediate state. As callers may
|
|
|
|
// expect retries of the f closure (depending on the database backend used), the
|
|
|
|
// reset function will be called before each retry respectively.
|
|
|
|
func (db *DB) Update(f func(tx walletdb.ReadWriteTx) error, reset func()) error {
|
2020-05-08 18:50:13 +03:00
|
|
|
if v, ok := db.Backend.(kvdb.ExtendedBackend); ok {
|
2020-10-26 16:06:32 +03:00
|
|
|
return v.Update(f, reset)
|
2020-05-08 18:50:13 +03:00
|
|
|
}
|
2020-10-26 16:06:32 +03:00
|
|
|
|
|
|
|
reset()
|
2020-05-08 18:50:13 +03:00
|
|
|
return walletdb.Update(db, f)
|
|
|
|
}
|
|
|
|
|
|
|
|
// View is a wrapper around walletdb.View which calls into the extended
|
|
|
|
// backend when available. This call is needed to be able to cast DB to
|
2020-10-20 17:18:40 +03:00
|
|
|
// ExtendedBackend. The passed reset function is called before the start of the
|
|
|
|
// transaction and can be used to reset intermediate state. As callers may
|
|
|
|
// expect retries of the f closure (depending on the database backend used), the
|
|
|
|
// reset function will be called before each retry respectively.
|
|
|
|
func (db *DB) View(f func(tx walletdb.ReadTx) error, reset func()) error {
|
2020-05-08 18:50:13 +03:00
|
|
|
if v, ok := db.Backend.(kvdb.ExtendedBackend); ok {
|
2020-10-20 17:18:40 +03:00
|
|
|
return v.View(f, reset)
|
2020-05-08 18:50:13 +03:00
|
|
|
}
|
|
|
|
|
2020-10-20 17:18:40 +03:00
|
|
|
reset()
|
2020-05-08 18:50:13 +03:00
|
|
|
return walletdb.View(db, f)
|
|
|
|
}
|
|
|
|
|
|
|
|
// PrintStats calls into the extended backend if available. This call is needed
|
|
|
|
// to be able to cast DB to ExtendedBackend.
|
|
|
|
func (db *DB) PrintStats() string {
|
|
|
|
if v, ok := db.Backend.(kvdb.ExtendedBackend); ok {
|
|
|
|
return v.PrintStats()
|
|
|
|
}
|
|
|
|
|
|
|
|
return "unimplemented"
|
|
|
|
}
|
|
|
|
|
2020-03-09 21:27:50 +03:00
|
|
|
// Open opens or creates channeldb. Any necessary schemas migrations due
|
|
|
|
// to updates will take place as necessary.
|
|
|
|
// TODO(bhandras): deprecate this function.
|
2019-04-02 02:33:36 +03:00
|
|
|
func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) {
|
|
|
|
opts := DefaultOptions()
|
|
|
|
for _, modifier := range modifiers {
|
|
|
|
modifier(&opts)
|
|
|
|
}
|
|
|
|
|
2020-11-09 12:21:25 +03:00
|
|
|
backend, err := kvdb.GetBoltBackend(&kvdb.BoltBackendConfig{
|
|
|
|
DBPath: dbPath,
|
|
|
|
DBFileName: dbName,
|
|
|
|
NoFreelistSync: opts.NoFreelistSync,
|
|
|
|
AutoCompact: opts.AutoCompact,
|
|
|
|
AutoCompactMinAge: opts.AutoCompactMinAge,
|
kvdb: add timeout options for bbolt (#4787)
* mod: bump btcwallet version to accept db timeout
* btcwallet: add DBTimeOut in config
* kvdb: add database timeout option for bbolt
This commit adds a DBTimeout option in bbolt config. The relevant
functions walletdb.Open/Create are updated to use this config. In
addition, the bolt compacter also applies the new timeout option.
* channeldb: add DBTimeout in db options
This commit adds the DBTimeout option for channeldb. A new unit
test file is created to test the default options. In addition,
the params used in kvdb.Create inside channeldb_test is updated
with a DefaultDBTimeout value.
* contractcourt+routing: use DBTimeout in kvdb
This commit touches multiple test files in contractcourt and routing.
The call of function kvdb.Create and kvdb.Open are now updated with
the new param DBTimeout, using the default value kvdb.DefaultDBTimeout.
* lncfg: add DBTimeout option in db config
The DBTimeout option is added to db config. A new unit test is
added to check the default DB config is created as expected.
* migration: add DBTimeout param in kvdb.Create/kvdb.Open
* keychain: update tests to use DBTimeout param
* htlcswitch+chainreg: add DBTimeout option
* macaroons: support DBTimeout config in creation
This commit adds the DBTimeout during the creation of macaroons.db.
The usage of kvdb.Create and kvdb.Open in its tests are updated with
a timeout value using kvdb.DefaultDBTimeout.
* walletunlocker: add dbTimeout option in UnlockerService
This commit adds a new param, dbTimeout, during the creation of
UnlockerService. This param is then passed to wallet.NewLoader
inside various service calls, specifying a timeout value to be
used when opening the bbolt. In addition, the macaroonService
is also called with this dbTimeout param.
* watchtower/wtdb: add dbTimeout param during creation
This commit adds the dbTimeout param for the creation of both
watchtower.db and wtclient.db.
* multi: add db timeout param for walletdb.Create
This commit adds the db timeout param for the function call
walletdb.Create. It touches only the test files found in chainntnfs,
lnwallet, and routing.
* lnd: pass DBTimeout config to relevant services
This commit enables lnd to pass the DBTimeout config to the following
services/config/functions,
- chainControlConfig
- walletunlocker
- wallet.NewLoader
- macaroons
- watchtower
In addition, the usage of wallet.Create is updated too.
* sample-config: add dbtimeout option
2020-12-08 02:31:49 +03:00
|
|
|
DBTimeout: opts.DBTimeout,
|
2020-11-09 12:21:25 +03:00
|
|
|
})
|
2016-03-24 08:11:57 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-03-09 21:27:50 +03:00
|
|
|
db, err := CreateWithBackend(backend, modifiers...)
|
|
|
|
if err == nil {
|
|
|
|
db.dbPath = dbPath
|
|
|
|
}
|
|
|
|
return db, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateWithBackend creates channeldb instance using the passed kvdb.Backend.
|
|
|
|
// Any necessary schemas migrations due to updates will take place as necessary.
|
|
|
|
func CreateWithBackend(backend kvdb.Backend, modifiers ...OptionModifier) (*DB, error) {
|
|
|
|
if err := initChannelDB(backend); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
opts := DefaultOptions()
|
|
|
|
for _, modifier := range modifiers {
|
|
|
|
modifier(&opts)
|
|
|
|
}
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
chanDB := &DB{
|
2020-03-09 21:27:50 +03:00
|
|
|
Backend: backend,
|
2019-12-13 05:22:19 +03:00
|
|
|
clock: opts.clock,
|
2020-05-12 01:38:45 +03:00
|
|
|
dryRun: opts.dryRun,
|
2016-11-23 00:57:26 +03:00
|
|
|
}
|
2019-04-02 02:33:36 +03:00
|
|
|
chanDB.graph = newChannelGraph(
|
|
|
|
chanDB, opts.RejectCacheSize, opts.ChannelCacheSize,
|
2020-11-25 03:40:54 +03:00
|
|
|
opts.BatchCommitInterval,
|
2019-04-02 02:33:36 +03:00
|
|
|
)
|
2016-11-23 00:57:26 +03:00
|
|
|
|
|
|
|
// Synchronize the version of database and apply migrations if needed.
|
|
|
|
if err := chanDB.syncVersions(dbVersions); err != nil {
|
2020-03-09 21:27:50 +03:00
|
|
|
backend.Close()
|
2016-11-23 00:57:26 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanDB, nil
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2017-12-13 12:28:58 +03:00
|
|
|
// Path returns the file path to the channel database.
|
|
|
|
func (d *DB) Path() string {
|
|
|
|
return d.dbPath
|
|
|
|
}
|
|
|
|
|
2020-05-22 01:36:16 +03:00
|
|
|
var topLevelBuckets = [][]byte{
|
|
|
|
openChannelBucket,
|
|
|
|
closedChannelBucket,
|
|
|
|
forwardingLogBucket,
|
|
|
|
fwdPackagesKey,
|
|
|
|
invoiceBucket,
|
2020-05-22 01:37:39 +03:00
|
|
|
payAddrIndexBucket,
|
2021-03-03 20:57:29 +03:00
|
|
|
setIDIndexBucket,
|
2020-06-10 13:34:27 +03:00
|
|
|
paymentsIndexBucket,
|
2020-09-08 14:47:21 +03:00
|
|
|
peersBucket,
|
2020-05-22 01:36:16 +03:00
|
|
|
nodeInfoBucket,
|
|
|
|
nodeBucket,
|
|
|
|
edgeBucket,
|
|
|
|
edgeIndexBucket,
|
|
|
|
graphMetaBucket,
|
|
|
|
metaBucket,
|
2020-07-07 20:49:36 +03:00
|
|
|
closeSummaryBucket,
|
2020-10-12 18:08:30 +03:00
|
|
|
outpointBucket,
|
2020-05-22 01:36:16 +03:00
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// Wipe completely deletes all saved state within all used buckets within the
|
|
|
|
// database. The deletion is done in a single transaction, therefore this
|
|
|
|
// operation is fully atomic.
|
2015-12-26 21:35:15 +03:00
|
|
|
func (d *DB) Wipe() error {
|
2020-12-11 04:08:56 +03:00
|
|
|
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
|
2020-05-22 01:36:16 +03:00
|
|
|
for _, tlb := range topLevelBuckets {
|
|
|
|
err := tx.DeleteTopLevelBucket(tlb)
|
|
|
|
if err != nil && err != kvdb.ErrBucketNotFound {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
2016-07-22 02:16:13 +03:00
|
|
|
return nil
|
2020-10-26 16:06:32 +03:00
|
|
|
}, func() {})
|
2020-12-11 04:08:56 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return initChannelDB(d.Backend)
|
2015-12-26 21:35:15 +03:00
|
|
|
}
|
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// createChannelDB creates and initializes a fresh version of channeldb. In
|
|
|
|
// the case that the target path has not yet been created or doesn't yet exist,
|
|
|
|
// then the path is created. Additionally, all required top-level buckets used
|
|
|
|
// within the database are created.
|
2020-03-09 21:27:50 +03:00
|
|
|
func initChannelDB(db kvdb.Backend) error {
|
|
|
|
err := kvdb.Update(db, func(tx kvdb.RwTx) error {
|
|
|
|
meta := &Meta{}
|
|
|
|
// Check if DB is already initialized.
|
|
|
|
err := fetchMeta(meta, tx)
|
|
|
|
if err == nil {
|
|
|
|
return nil
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
|
2020-05-22 01:36:16 +03:00
|
|
|
for _, tlb := range topLevelBuckets {
|
|
|
|
if _, err := tx.CreateTopLevelBucket(tlb); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-24 01:00:38 +03:00
|
|
|
}
|
|
|
|
|
2020-05-22 01:36:16 +03:00
|
|
|
nodes := tx.ReadWriteBucket(nodeBucket)
|
2018-09-10 01:49:21 +03:00
|
|
|
_, err = nodes.CreateBucket(aliasIndexBucket)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, err = nodes.CreateBucket(nodeUpdateIndexBucket)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-05-22 01:36:16 +03:00
|
|
|
edges := tx.ReadWriteBucket(edgeBucket)
|
2018-09-10 01:49:21 +03:00
|
|
|
if _, err := edges.CreateBucket(edgeIndexBucket); err != nil {
|
2016-12-08 09:47:01 +03:00
|
|
|
return err
|
|
|
|
}
|
2018-09-10 01:49:21 +03:00
|
|
|
if _, err := edges.CreateBucket(edgeUpdateIndexBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := edges.CreateBucket(channelPointBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-03-27 23:06:12 +03:00
|
|
|
if _, err := edges.CreateBucket(zombieBucket); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-09-10 01:49:21 +03:00
|
|
|
|
2020-05-22 01:36:16 +03:00
|
|
|
graphMeta := tx.ReadWriteBucket(graphMetaBucket)
|
2018-09-10 01:49:21 +03:00
|
|
|
_, err = graphMeta.CreateBucket(pruneLogBucket)
|
|
|
|
if err != nil {
|
2016-12-20 03:58:27 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-12-08 09:47:01 +03:00
|
|
|
|
2020-03-09 21:27:50 +03:00
|
|
|
meta.DbVersionNumber = getLatestDBVersion(dbVersions)
|
2016-11-28 05:48:57 +03:00
|
|
|
return putMeta(meta, tx)
|
2020-10-26 16:06:32 +03:00
|
|
|
}, func() {})
|
2016-03-24 08:11:57 +03:00
|
|
|
if err != nil {
|
2020-03-09 21:27:50 +03:00
|
|
|
return fmt.Errorf("unable to create new channeldb: %v", err)
|
2016-03-24 08:11:57 +03:00
|
|
|
}
|
|
|
|
|
2020-03-09 21:27:50 +03:00
|
|
|
return nil
|
2015-12-26 21:35:15 +03:00
|
|
|
}
|
2016-03-23 04:46:30 +03:00
|
|
|
|
2016-06-23 02:17:19 +03:00
|
|
|
// fileExists returns true if the file exists, and false otherwise.
|
2016-03-24 08:11:57 +03:00
|
|
|
func fileExists(path string) bool {
|
|
|
|
if _, err := os.Stat(path); err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
2016-03-23 04:46:30 +03:00
|
|
|
}
|
|
|
|
|
2018-06-15 05:47:00 +03:00
|
|
|
// FetchOpenChannels starts a new database transaction and returns all stored
|
|
|
|
// currently active/open channels associated with the target nodeID. In the case
|
|
|
|
// that no active channels are known to have been created with this node, then a
|
|
|
|
// zero-length slice is returned.
|
2016-10-26 02:11:23 +03:00
|
|
|
func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error) {
|
2016-06-21 07:41:00 +03:00
|
|
|
var channels []*OpenChannel
|
2020-05-07 01:45:50 +03:00
|
|
|
err := kvdb.View(d, func(tx kvdb.RTx) error {
|
2018-06-15 05:47:00 +03:00
|
|
|
var err error
|
|
|
|
channels, err = d.fetchOpenChannels(tx, nodeID)
|
|
|
|
return err
|
2020-10-20 17:18:40 +03:00
|
|
|
}, func() {
|
|
|
|
channels = nil
|
2018-06-15 05:47:00 +03:00
|
|
|
})
|
|
|
|
|
|
|
|
return channels, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetchOpenChannels uses and existing database transaction and returns all
|
|
|
|
// stored currently active/open channels associated with the target nodeID. In
|
|
|
|
// the case that no active channels are known to have been created with this
|
|
|
|
// node, then a zero-length slice is returned.
|
2020-05-07 01:45:50 +03:00
|
|
|
func (db *DB) fetchOpenChannels(tx kvdb.RTx,
|
2018-06-15 05:47:00 +03:00
|
|
|
nodeID *btcec.PublicKey) ([]*OpenChannel, error) {
|
|
|
|
|
|
|
|
// Get the bucket dedicated to storing the metadata for open channels.
|
2019-12-13 05:22:19 +03:00
|
|
|
openChanBucket := tx.ReadBucket(openChannelBucket)
|
2018-06-15 05:47:00 +03:00
|
|
|
if openChanBucket == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Within this top level bucket, fetch the bucket dedicated to storing
|
|
|
|
// open channel data specific to the remote node.
|
|
|
|
pub := nodeID.SerializeCompressed()
|
2019-12-13 05:22:19 +03:00
|
|
|
nodeChanBucket := openChanBucket.NestedReadBucket(pub)
|
2018-06-15 05:47:00 +03:00
|
|
|
if nodeChanBucket == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll need to go down an additional layer in order to retrieve
|
|
|
|
// the channels for each chain the node knows of.
|
|
|
|
var channels []*OpenChannel
|
|
|
|
err := nodeChanBucket.ForEach(func(chainHash, v []byte) error {
|
|
|
|
// If there's a value, it's not a bucket so ignore it.
|
|
|
|
if v != nil {
|
2016-07-10 02:19:18 +03:00
|
|
|
return nil
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
|
|
|
|
2018-06-15 05:47:00 +03:00
|
|
|
// If we've found a valid chainhash bucket, then we'll retrieve
|
|
|
|
// that so we can extract all the channels.
|
2019-12-13 05:22:19 +03:00
|
|
|
chainBucket := nodeChanBucket.NestedReadBucket(chainHash)
|
2018-06-15 05:47:00 +03:00
|
|
|
if chainBucket == nil {
|
|
|
|
return fmt.Errorf("unable to read bucket for chain=%x",
|
|
|
|
chainHash[:])
|
2016-06-21 07:41:00 +03:00
|
|
|
}
|
|
|
|
|
2018-06-15 05:47:00 +03:00
|
|
|
// Finally, we both of the necessary buckets retrieved, fetch
|
|
|
|
// all the active channels related to this node.
|
2020-05-15 19:07:35 +03:00
|
|
|
nodeChannels, err := db.fetchNodeChannels(chainBucket)
|
2018-06-15 05:47:00 +03:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to read channel for "+
|
|
|
|
"chain_hash=%x, node_key=%x: %v",
|
|
|
|
chainHash[:], pub, err)
|
|
|
|
}
|
|
|
|
|
2018-07-31 09:37:25 +03:00
|
|
|
channels = append(channels, nodeChannels...)
|
2018-06-15 05:47:00 +03:00
|
|
|
return nil
|
2016-10-27 00:53:10 +03:00
|
|
|
})
|
|
|
|
|
|
|
|
return channels, err
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// fetchNodeChannels retrieves all active channels from the target chainBucket
|
|
|
|
// which is under a node's dedicated channel bucket. This function is typically
|
|
|
|
// used to fetch all the active channels related to a particular node.
|
2020-05-07 01:48:00 +03:00
|
|
|
func (db *DB) fetchNodeChannels(chainBucket kvdb.RBucket) ([]*OpenChannel, error) {
|
2016-10-27 00:53:10 +03:00
|
|
|
|
|
|
|
var channels []*OpenChannel
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// A node may have channels on several chains, so for each known chain,
|
|
|
|
// we'll extract all the channels.
|
|
|
|
err := chainBucket.ForEach(func(chanPoint, v []byte) error {
|
|
|
|
// If there's a value, it's not a bucket so ignore it.
|
|
|
|
if v != nil {
|
2016-09-07 04:48:40 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// Once we've found a valid channel bucket, we'll extract it
|
|
|
|
// from the node's chain bucket.
|
2019-12-13 05:22:19 +03:00
|
|
|
chanBucket := chainBucket.NestedReadBucket(chanPoint)
|
2017-11-10 07:57:09 +03:00
|
|
|
|
|
|
|
var outPoint wire.OutPoint
|
|
|
|
err := readOutpoint(bytes.NewReader(chanPoint), &outPoint)
|
|
|
|
if err != nil {
|
2016-10-27 00:53:10 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-11-10 07:57:09 +03:00
|
|
|
oChannel, err := fetchOpenChannel(chanBucket, &outPoint)
|
2016-10-27 00:53:10 +03:00
|
|
|
if err != nil {
|
2017-02-08 03:41:14 +03:00
|
|
|
return fmt.Errorf("unable to read channel data for "+
|
2017-11-10 07:57:09 +03:00
|
|
|
"chan_point=%v: %v", outPoint, err)
|
2016-10-27 00:53:10 +03:00
|
|
|
}
|
2020-05-15 19:07:35 +03:00
|
|
|
oChannel.Db = db
|
2016-10-27 00:53:10 +03:00
|
|
|
|
|
|
|
channels = append(channels, oChannel)
|
2017-11-10 07:57:09 +03:00
|
|
|
|
2016-10-27 00:53:10 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return channels, nil
|
|
|
|
}
|
|
|
|
|
2018-12-10 06:26:02 +03:00
|
|
|
// FetchChannel attempts to locate a channel specified by the passed channel
|
|
|
|
// point. If the channel cannot be found, then an error will be returned.
|
|
|
|
func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
|
|
|
|
var (
|
|
|
|
targetChan *OpenChannel
|
|
|
|
targetChanPoint bytes.Buffer
|
|
|
|
)
|
|
|
|
|
|
|
|
if err := writeOutpoint(&targetChanPoint, &chanPoint); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// chanScan will traverse the following bucket structure:
|
|
|
|
// * nodePub => chainHash => chanPoint
|
|
|
|
//
|
|
|
|
// At each level we go one further, ensuring that we're traversing the
|
|
|
|
// proper key (that's actually a bucket). By only reading the bucket
|
|
|
|
// structure and skipping fully decoding each channel, we save a good
|
|
|
|
// bit of CPU as we don't need to do things like decompress public
|
|
|
|
// keys.
|
2020-05-07 01:45:50 +03:00
|
|
|
chanScan := func(tx kvdb.RTx) error {
|
2018-12-10 06:26:02 +03:00
|
|
|
// Get the bucket dedicated to storing the metadata for open
|
|
|
|
// channels.
|
2019-12-13 05:22:19 +03:00
|
|
|
openChanBucket := tx.ReadBucket(openChannelBucket)
|
2018-12-10 06:26:02 +03:00
|
|
|
if openChanBucket == nil {
|
|
|
|
return ErrNoActiveChannels
|
|
|
|
}
|
|
|
|
|
|
|
|
// Within the node channel bucket, are the set of node pubkeys
|
|
|
|
// we have channels with, we don't know the entire set, so
|
|
|
|
// we'll check them all.
|
|
|
|
return openChanBucket.ForEach(func(nodePub, v []byte) error {
|
|
|
|
// Ensure that this is a key the same size as a pubkey,
|
|
|
|
// and also that it leads directly to a bucket.
|
|
|
|
if len(nodePub) != 33 || v != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-13 05:22:19 +03:00
|
|
|
nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
|
2018-12-10 06:26:02 +03:00
|
|
|
if nodeChanBucket == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// The next layer down is all the chains that this node
|
|
|
|
// has channels on with us.
|
|
|
|
return nodeChanBucket.ForEach(func(chainHash, v []byte) error {
|
|
|
|
// If there's a value, it's not a bucket so
|
|
|
|
// ignore it.
|
|
|
|
if v != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-13 05:22:19 +03:00
|
|
|
chainBucket := nodeChanBucket.NestedReadBucket(
|
|
|
|
chainHash,
|
|
|
|
)
|
2018-12-10 06:26:02 +03:00
|
|
|
if chainBucket == nil {
|
|
|
|
return fmt.Errorf("unable to read "+
|
|
|
|
"bucket for chain=%x", chainHash[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally we reach the leaf bucket that stores
|
|
|
|
// all the chanPoints for this node.
|
2019-12-13 05:22:19 +03:00
|
|
|
chanBucket := chainBucket.NestedReadBucket(
|
2018-12-10 06:26:02 +03:00
|
|
|
targetChanPoint.Bytes(),
|
|
|
|
)
|
|
|
|
if chanBucket == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
channel, err := fetchOpenChannel(
|
|
|
|
chanBucket, &chanPoint,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
targetChan = channel
|
|
|
|
targetChan.Db = d
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-10-20 17:18:40 +03:00
|
|
|
err := kvdb.View(d, chanScan, func() {})
|
2018-12-10 06:26:02 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if targetChan != nil {
|
|
|
|
return targetChan, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we can't find the channel, then we return with an error, as we
|
|
|
|
// have nothing to backup.
|
|
|
|
return nil, ErrChannelNotFound
|
|
|
|
}
|
|
|
|
|
2016-10-27 00:53:10 +03:00
|
|
|
// FetchAllChannels attempts to retrieve all open channels currently stored
|
2018-04-12 13:49:19 +03:00
|
|
|
// within the database, including pending open, fully open and channels waiting
|
|
|
|
// for a closing transaction to confirm.
|
2016-10-27 00:53:10 +03:00
|
|
|
func (d *DB) FetchAllChannels() ([]*OpenChannel, error) {
|
2020-02-06 11:21:12 +03:00
|
|
|
return fetchChannels(d)
|
2018-04-12 13:49:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// FetchAllOpenChannels will return all channels that have the funding
|
|
|
|
// transaction confirmed, and is not waiting for a closing transaction to be
|
|
|
|
// confirmed.
|
|
|
|
func (d *DB) FetchAllOpenChannels() ([]*OpenChannel, error) {
|
2020-02-06 11:21:12 +03:00
|
|
|
return fetchChannels(
|
|
|
|
d,
|
|
|
|
pendingChannelFilter(false),
|
|
|
|
waitingCloseFilter(false),
|
|
|
|
)
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// FetchPendingChannels will return channels that have completed the process of
|
|
|
|
// generating and broadcasting funding transactions, but whose funding
|
2017-01-23 10:31:01 +03:00
|
|
|
// transactions have yet to be confirmed on the blockchain.
|
|
|
|
func (d *DB) FetchPendingChannels() ([]*OpenChannel, error) {
|
2020-02-06 11:21:12 +03:00
|
|
|
return fetchChannels(d,
|
|
|
|
pendingChannelFilter(true),
|
|
|
|
waitingCloseFilter(false),
|
|
|
|
)
|
2018-04-12 13:49:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// FetchWaitingCloseChannels will return all channels that have been opened,
|
2019-01-04 04:33:35 +03:00
|
|
|
// but are now waiting for a closing transaction to be confirmed.
|
|
|
|
//
|
|
|
|
// NOTE: This includes channels that are also pending to be opened.
|
2018-04-12 13:49:19 +03:00
|
|
|
func (d *DB) FetchWaitingCloseChannels() ([]*OpenChannel, error) {
|
2020-02-06 11:21:12 +03:00
|
|
|
return fetchChannels(
|
|
|
|
d, waitingCloseFilter(true),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// fetchChannelsFilter applies a filter to channels retrieved in fetchchannels.
|
|
|
|
// A set of filters can be combined to filter across multiple dimensions.
|
|
|
|
type fetchChannelsFilter func(channel *OpenChannel) bool
|
|
|
|
|
|
|
|
// pendingChannelFilter returns a filter based on whether channels are pending
|
|
|
|
// (ie, their funding transaction still needs to confirm). If pending is false,
|
|
|
|
// channels with confirmed funding transactions are returned.
|
|
|
|
func pendingChannelFilter(pending bool) fetchChannelsFilter {
|
|
|
|
return func(channel *OpenChannel) bool {
|
|
|
|
return channel.IsPending == pending
|
2019-01-04 04:33:35 +03:00
|
|
|
}
|
2020-02-06 11:21:12 +03:00
|
|
|
}
|
2019-01-04 04:33:35 +03:00
|
|
|
|
2020-02-06 11:21:12 +03:00
|
|
|
// waitingCloseFilter returns a filter which filters channels based on whether
|
|
|
|
// they are awaiting the confirmation of their closing transaction. If waiting
|
|
|
|
// close is true, channels that have had their closing tx broadcast are
|
|
|
|
// included. If it is false, channels that are not awaiting confirmation of
|
|
|
|
// their close transaction are returned.
|
|
|
|
func waitingCloseFilter(waitingClose bool) fetchChannelsFilter {
|
|
|
|
return func(channel *OpenChannel) bool {
|
|
|
|
// If the channel is in any other state than Default,
|
|
|
|
// then it means it is waiting to be closed.
|
|
|
|
channelWaitingClose :=
|
|
|
|
channel.ChanStatus() != ChanStatusDefault
|
|
|
|
|
|
|
|
// Include the channel if it matches the value for
|
|
|
|
// waiting close that we are filtering on.
|
|
|
|
return channelWaitingClose == waitingClose
|
|
|
|
}
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// fetchChannels attempts to retrieve channels currently stored in the
|
2020-02-06 11:21:12 +03:00
|
|
|
// database. It takes a set of filters which are applied to each channel to
|
|
|
|
// obtain a set of channels with the desired set of properties. Only channels
|
|
|
|
// which have a true value returned for *all* of the filters will be returned.
|
|
|
|
// If no filters are provided, every channel in the open channels bucket will
|
|
|
|
// be returned.
|
|
|
|
func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error) {
|
2016-10-27 00:53:10 +03:00
|
|
|
var channels []*OpenChannel
|
|
|
|
|
2020-05-07 01:45:50 +03:00
|
|
|
err := kvdb.View(d, func(tx kvdb.RTx) error {
|
2017-01-13 08:01:50 +03:00
|
|
|
// Get the bucket dedicated to storing the metadata for open
|
2016-10-27 00:53:10 +03:00
|
|
|
// channels.
|
2019-12-13 05:22:19 +03:00
|
|
|
openChanBucket := tx.ReadBucket(openChannelBucket)
|
2016-10-27 00:53:10 +03:00
|
|
|
if openChanBucket == nil {
|
|
|
|
return ErrNoActiveChannels
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
// Next, fetch the bucket dedicated to storing metadata related
|
|
|
|
// to all nodes. All keys within this bucket are the serialized
|
|
|
|
// public keys of all our direct counterparties.
|
2019-12-13 05:22:19 +03:00
|
|
|
nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
|
2016-10-27 00:53:10 +03:00
|
|
|
if nodeMetaBucket == nil {
|
|
|
|
return fmt.Errorf("node bucket not created")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally for each node public key in the bucket, fetch all
|
2017-11-10 07:57:09 +03:00
|
|
|
// the channels related to this particular node.
|
2016-10-27 00:53:10 +03:00
|
|
|
return nodeMetaBucket.ForEach(func(k, v []byte) error {
|
2019-12-13 05:22:19 +03:00
|
|
|
nodeChanBucket := openChanBucket.NestedReadBucket(k)
|
2016-10-27 00:53:10 +03:00
|
|
|
if nodeChanBucket == nil {
|
|
|
|
return nil
|
2016-06-21 07:41:00 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:57:09 +03:00
|
|
|
return nodeChanBucket.ForEach(func(chainHash, v []byte) error {
|
|
|
|
// If there's a value, it's not a bucket so
|
|
|
|
// ignore it.
|
|
|
|
if v != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we've found a valid chainhash bucket,
|
|
|
|
// then we'll retrieve that so we can extract
|
|
|
|
// all the channels.
|
2019-12-13 05:22:19 +03:00
|
|
|
chainBucket := nodeChanBucket.NestedReadBucket(
|
|
|
|
chainHash,
|
|
|
|
)
|
2017-11-10 07:57:09 +03:00
|
|
|
if chainBucket == nil {
|
|
|
|
return fmt.Errorf("unable to read "+
|
|
|
|
"bucket for chain=%x", chainHash[:])
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeChans, err := d.fetchNodeChannels(chainBucket)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to read "+
|
|
|
|
"channel for chain_hash=%x, "+
|
|
|
|
"node_key=%x: %v", chainHash[:], k, err)
|
|
|
|
}
|
2018-04-12 13:49:19 +03:00
|
|
|
for _, channel := range nodeChans {
|
2020-02-06 11:21:12 +03:00
|
|
|
// includeChannel indicates whether the channel
|
|
|
|
// meets the criteria specified by our filters.
|
|
|
|
includeChannel := true
|
|
|
|
|
|
|
|
// Run through each filter and check whether the
|
|
|
|
// channel should be included.
|
|
|
|
for _, f := range filters {
|
|
|
|
// If the channel fails the filter, set
|
|
|
|
// includeChannel to false and don't bother
|
|
|
|
// checking the remaining filters.
|
|
|
|
if !f(channel) {
|
|
|
|
includeChannel = false
|
|
|
|
break
|
|
|
|
}
|
2018-04-12 13:49:19 +03:00
|
|
|
}
|
|
|
|
|
2020-02-06 11:21:12 +03:00
|
|
|
// If the channel passed every filter, include it in
|
|
|
|
// our set of channels.
|
|
|
|
if includeChannel {
|
|
|
|
channels = append(channels, channel)
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
|
|
|
}
|
2017-11-10 07:57:09 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2016-06-21 07:41:00 +03:00
|
|
|
})
|
2020-10-20 17:18:40 +03:00
|
|
|
}, func() {
|
|
|
|
channels = nil
|
2016-03-24 08:39:52 +03:00
|
|
|
})
|
2018-04-12 13:49:19 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-03-24 08:39:52 +03:00
|
|
|
|
2018-04-12 13:49:19 +03:00
|
|
|
return channels, nil
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
|
2017-05-05 01:20:16 +03:00
|
|
|
// FetchClosedChannels attempts to fetch all closed channels from the database.
|
|
|
|
// The pendingOnly bool toggles if channels that aren't yet fully closed should
|
2017-12-18 05:40:05 +03:00
|
|
|
// be returned in the response or not. When a channel was cooperatively closed,
|
2017-05-05 01:20:16 +03:00
|
|
|
// it becomes fully closed after a single confirmation. When a channel was
|
|
|
|
// forcibly closed, it will become fully closed after _all_ the pending funds
|
|
|
|
// (if any) have been swept.
|
|
|
|
func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) {
|
|
|
|
var chanSummaries []*ChannelCloseSummary
|
|
|
|
|
2020-05-07 01:45:50 +03:00
|
|
|
if err := kvdb.View(d, func(tx kvdb.RTx) error {
|
2019-12-13 05:22:19 +03:00
|
|
|
closeBucket := tx.ReadBucket(closedChannelBucket)
|
2017-05-05 01:20:16 +03:00
|
|
|
if closeBucket == nil {
|
|
|
|
return ErrNoClosedChannels
|
|
|
|
}
|
|
|
|
|
|
|
|
return closeBucket.ForEach(func(chanID []byte, summaryBytes []byte) error {
|
2017-11-10 07:58:24 +03:00
|
|
|
summaryReader := bytes.NewReader(summaryBytes)
|
|
|
|
chanSummary, err := deserializeCloseChannelSummary(summaryReader)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-05 01:20:16 +03:00
|
|
|
|
|
|
|
// If the query specified to only include pending
|
|
|
|
// channels, then we'll skip any channels which aren't
|
|
|
|
// currently pending.
|
2017-11-10 07:58:24 +03:00
|
|
|
if !chanSummary.IsPending && pendingOnly {
|
2017-05-05 01:20:16 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
chanSummaries = append(chanSummaries, chanSummary)
|
|
|
|
return nil
|
|
|
|
})
|
2020-10-20 17:18:40 +03:00
|
|
|
}, func() {
|
|
|
|
chanSummaries = nil
|
2017-05-05 01:20:16 +03:00
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanSummaries, nil
|
|
|
|
}
|
2017-05-05 01:21:35 +03:00
|
|
|
|
2018-01-23 07:38:17 +03:00
|
|
|
// ErrClosedChannelNotFound signals that a closed channel could not be found in
|
|
|
|
// the channeldb.
|
|
|
|
var ErrClosedChannelNotFound = errors.New("unable to find closed channel summary")
|
|
|
|
|
|
|
|
// FetchClosedChannel queries for a channel close summary using the channel
|
|
|
|
// point of the channel in question.
|
|
|
|
func (d *DB) FetchClosedChannel(chanID *wire.OutPoint) (*ChannelCloseSummary, error) {
|
|
|
|
var chanSummary *ChannelCloseSummary
|
2020-05-07 01:45:50 +03:00
|
|
|
if err := kvdb.View(d, func(tx kvdb.RTx) error {
|
2019-12-13 05:22:19 +03:00
|
|
|
closeBucket := tx.ReadBucket(closedChannelBucket)
|
2018-01-23 07:38:17 +03:00
|
|
|
if closeBucket == nil {
|
|
|
|
return ErrClosedChannelNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
var b bytes.Buffer
|
|
|
|
var err error
|
|
|
|
if err = writeOutpoint(&b, chanID); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
summaryBytes := closeBucket.Get(b.Bytes())
|
|
|
|
if summaryBytes == nil {
|
|
|
|
return ErrClosedChannelNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
summaryReader := bytes.NewReader(summaryBytes)
|
|
|
|
chanSummary, err = deserializeCloseChannelSummary(summaryReader)
|
|
|
|
|
|
|
|
return err
|
2020-10-20 17:18:40 +03:00
|
|
|
}, func() {
|
|
|
|
chanSummary = nil
|
2018-01-23 07:38:17 +03:00
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanSummary, nil
|
|
|
|
}
|
|
|
|
|
2018-11-20 17:09:46 +03:00
|
|
|
// FetchClosedChannelForID queries for a channel close summary using the
|
|
|
|
// channel ID of the channel in question.
|
|
|
|
func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) (
|
|
|
|
*ChannelCloseSummary, error) {
|
|
|
|
|
|
|
|
var chanSummary *ChannelCloseSummary
|
2020-05-07 01:45:50 +03:00
|
|
|
if err := kvdb.View(d, func(tx kvdb.RTx) error {
|
2019-12-13 05:22:19 +03:00
|
|
|
closeBucket := tx.ReadBucket(closedChannelBucket)
|
2018-11-20 17:09:46 +03:00
|
|
|
if closeBucket == nil {
|
|
|
|
return ErrClosedChannelNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
// The first 30 bytes of the channel ID and outpoint will be
|
|
|
|
// equal.
|
2019-12-13 05:22:19 +03:00
|
|
|
cursor := closeBucket.ReadCursor()
|
2018-11-20 17:09:46 +03:00
|
|
|
op, c := cursor.Seek(cid[:30])
|
|
|
|
|
|
|
|
// We scan over all possible candidates for this channel ID.
|
|
|
|
for ; op != nil && bytes.Compare(cid[:30], op[:30]) <= 0; op, c = cursor.Next() {
|
|
|
|
var outPoint wire.OutPoint
|
|
|
|
err := readOutpoint(bytes.NewReader(op), &outPoint)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the found outpoint does not correspond to this
|
|
|
|
// channel ID, we continue.
|
|
|
|
if !cid.IsChanPoint(&outPoint) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deserialize the close summary and return.
|
|
|
|
r := bytes.NewReader(c)
|
|
|
|
chanSummary, err = deserializeCloseChannelSummary(r)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return ErrClosedChannelNotFound
|
2020-10-20 17:18:40 +03:00
|
|
|
}, func() {
|
|
|
|
chanSummary = nil
|
2018-11-20 17:09:46 +03:00
|
|
|
}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanSummary, nil
|
|
|
|
}
|
|
|
|
|
2017-05-05 01:21:35 +03:00
|
|
|
// MarkChanFullyClosed marks a channel as fully closed within the database. A
|
|
|
|
// channel should be marked as fully closed if the channel was initially
|
2018-04-17 05:06:21 +03:00
|
|
|
// cooperatively closed and it's reached a single confirmation, or after all
|
|
|
|
// the pending funds in a channel that has been forcibly closed have been
|
|
|
|
// swept.
|
2017-05-05 01:21:35 +03:00
|
|
|
func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
|
2019-12-13 05:22:19 +03:00
|
|
|
return kvdb.Update(d, func(tx kvdb.RwTx) error {
|
2017-05-05 01:21:35 +03:00
|
|
|
var b bytes.Buffer
|
2017-07-26 06:39:59 +03:00
|
|
|
if err := writeOutpoint(&b, chanPoint); err != nil {
|
2017-05-05 01:21:35 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
chanID := b.Bytes()
|
|
|
|
|
2019-12-13 05:22:19 +03:00
|
|
|
closedChanBucket, err := tx.CreateTopLevelBucket(
|
2017-11-11 06:36:35 +03:00
|
|
|
closedChannelBucket,
|
|
|
|
)
|
2017-05-05 01:21:35 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:58:24 +03:00
|
|
|
chanSummaryBytes := closedChanBucket.Get(chanID)
|
|
|
|
if chanSummaryBytes == nil {
|
2018-04-13 03:13:28 +03:00
|
|
|
return fmt.Errorf("no closed channel for "+
|
|
|
|
"chan_point=%v found", chanPoint)
|
2017-05-05 01:21:35 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 07:58:24 +03:00
|
|
|
chanSummaryReader := bytes.NewReader(chanSummaryBytes)
|
|
|
|
chanSummary, err := deserializeCloseChannelSummary(
|
|
|
|
chanSummaryReader,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
chanSummary.IsPending = false
|
|
|
|
|
|
|
|
var newSummary bytes.Buffer
|
|
|
|
err = serializeChannelCloseSummary(&newSummary, chanSummary)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-05-05 01:21:35 +03:00
|
|
|
|
2018-06-15 22:32:53 +03:00
|
|
|
err = closedChanBucket.Put(chanID, newSummary.Bytes())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the channel is closed, we'll check if we have any
|
|
|
|
// other open channels with this peer. If we don't we'll
|
|
|
|
// garbage collect it to ensure we don't establish persistent
|
|
|
|
// connections to peers without open channels.
|
|
|
|
return d.pruneLinkNode(tx, chanSummary.RemotePub)
|
2020-10-26 16:06:32 +03:00
|
|
|
}, func() {})
|
2017-05-05 01:21:35 +03:00
|
|
|
}
|
|
|
|
|
2018-06-15 22:32:53 +03:00
|
|
|
// pruneLinkNode determines whether we should garbage collect a link node from
|
|
|
|
// the database due to no longer having any open channels with it. If there are
|
|
|
|
// any left, then this acts as a no-op.
|
2020-05-15 19:07:35 +03:00
|
|
|
func (db *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) error {
|
|
|
|
openChannels, err := db.fetchOpenChannels(tx, remotePub)
|
2018-06-15 22:32:53 +03:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to fetch open channels for peer %x: "+
|
|
|
|
"%v", remotePub.SerializeCompressed(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(openChannels) > 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Infof("Pruning link node %x with zero open channels from database",
|
|
|
|
remotePub.SerializeCompressed())
|
|
|
|
|
2020-05-15 19:07:35 +03:00
|
|
|
return db.deleteLinkNode(tx, remotePub)
|
2018-06-15 22:32:53 +03:00
|
|
|
}
|
|
|
|
|
2018-07-18 02:40:14 +03:00
|
|
|
// PruneLinkNodes attempts to prune all link nodes found within the databse with
|
|
|
|
// whom we no longer have any open channels with.
|
2018-07-31 11:29:12 +03:00
|
|
|
func (d *DB) PruneLinkNodes() error {
|
2019-12-13 05:22:19 +03:00
|
|
|
return kvdb.Update(d, func(tx kvdb.RwTx) error {
|
2018-07-31 11:29:12 +03:00
|
|
|
linkNodes, err := d.fetchAllLinkNodes(tx)
|
2018-07-18 02:40:14 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, linkNode := range linkNodes {
|
2018-07-31 11:29:12 +03:00
|
|
|
err := d.pruneLinkNode(tx, linkNode.IdentityPub)
|
2018-07-18 02:40:14 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2020-10-26 16:06:32 +03:00
|
|
|
}, func() {})
|
2018-07-18 02:40:14 +03:00
|
|
|
}
|
|
|
|
|
2018-12-10 06:37:47 +03:00
|
|
|
// ChannelShell is a shell of a channel that is meant to be used for channel
|
|
|
|
// recovery purposes. It contains a minimal OpenChannel instance along with
|
|
|
|
// addresses for that target node.
|
|
|
|
type ChannelShell struct {
|
|
|
|
// NodeAddrs the set of addresses that this node has known to be
|
|
|
|
// reachable at in the past.
|
|
|
|
NodeAddrs []net.Addr
|
|
|
|
|
|
|
|
// Chan is a shell of an OpenChannel, it contains only the items
|
|
|
|
// required to restore the channel on disk.
|
|
|
|
Chan *OpenChannel
|
|
|
|
}
|
|
|
|
|
|
|
|
// RestoreChannelShells is a method that allows the caller to reconstruct the
|
|
|
|
// state of an OpenChannel from the ChannelShell. We'll attempt to write the
|
|
|
|
// new channel to disk, create a LinkNode instance with the passed node
|
|
|
|
// addresses, and finally create an edge within the graph for the channel as
|
|
|
|
// well. This method is idempotent, so repeated calls with the same set of
|
|
|
|
// channel shells won't modify the database after the initial call.
|
|
|
|
func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error {
|
2019-12-13 05:22:19 +03:00
|
|
|
err := kvdb.Update(d, func(tx kvdb.RwTx) error {
|
2018-12-10 06:37:47 +03:00
|
|
|
for _, channelShell := range channelShells {
|
|
|
|
channel := channelShell.Chan
|
|
|
|
|
2019-03-11 02:22:58 +03:00
|
|
|
// When we make a channel, we mark that the channel has
|
|
|
|
// been restored, this will signal to other sub-systems
|
|
|
|
// to not attempt to use the channel as if it was a
|
|
|
|
// regular one.
|
|
|
|
channel.chanStatus |= ChanStatusRestored
|
|
|
|
|
2018-12-10 06:37:47 +03:00
|
|
|
// First, we'll attempt to create a new open channel
|
|
|
|
// and link node for this channel. If the channel
|
|
|
|
// already exists, then in order to ensure this method
|
|
|
|
// is idempotent, we'll continue to the next step.
|
|
|
|
channel.Db = d
|
|
|
|
err := syncNewChannel(
|
|
|
|
tx, channel, channelShell.NodeAddrs,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2020-10-26 16:06:32 +03:00
|
|
|
}, func() {})
|
2019-04-02 02:32:52 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2018-12-10 06:37:47 +03:00
|
|
|
}
|
|
|
|
|
2018-12-10 06:15:09 +03:00
|
|
|
// AddrsForNode consults the graph and channel database for all addresses known
|
|
|
|
// to the passed node public key.
|
|
|
|
func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) {
|
|
|
|
var (
|
|
|
|
linkNode *LinkNode
|
|
|
|
graphNode LightningNode
|
|
|
|
)
|
|
|
|
|
2020-05-07 01:45:50 +03:00
|
|
|
dbErr := kvdb.View(d, func(tx kvdb.RTx) error {
|
2018-12-10 06:15:09 +03:00
|
|
|
var err error
|
|
|
|
|
|
|
|
linkNode, err = fetchLinkNode(tx, nodePub)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll also query the graph for this peer to see if they have
|
|
|
|
// any addresses that we don't currently have stored within the
|
|
|
|
// link node database.
|
2019-12-13 05:22:19 +03:00
|
|
|
nodes := tx.ReadBucket(nodeBucket)
|
2018-12-10 06:15:09 +03:00
|
|
|
if nodes == nil {
|
|
|
|
return ErrGraphNotFound
|
|
|
|
}
|
|
|
|
compressedPubKey := nodePub.SerializeCompressed()
|
|
|
|
graphNode, err = fetchLightningNode(nodes, compressedPubKey)
|
2019-03-11 02:26:27 +03:00
|
|
|
if err != nil && err != ErrGraphNodeNotFound {
|
|
|
|
// If the node isn't found, then that's OK, as we still
|
|
|
|
// have the link node data.
|
2018-12-10 06:15:09 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2020-10-20 17:18:40 +03:00
|
|
|
}, func() {
|
|
|
|
linkNode = nil
|
2018-12-10 06:15:09 +03:00
|
|
|
})
|
|
|
|
if dbErr != nil {
|
|
|
|
return nil, dbErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we have both sources of addrs for this node, we'll use a
|
|
|
|
// map to de-duplicate any addresses between the two sources, and
|
|
|
|
// produce a final list of the combined addrs.
|
|
|
|
addrs := make(map[string]net.Addr)
|
|
|
|
for _, addr := range linkNode.Addresses {
|
|
|
|
addrs[addr.String()] = addr
|
|
|
|
}
|
|
|
|
for _, addr := range graphNode.Addresses {
|
|
|
|
addrs[addr.String()] = addr
|
|
|
|
}
|
|
|
|
dedupedAddrs := make([]net.Addr, 0, len(addrs))
|
|
|
|
for _, addr := range addrs {
|
|
|
|
dedupedAddrs = append(dedupedAddrs, addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return dedupedAddrs, nil
|
|
|
|
}
|
|
|
|
|
2019-11-20 06:46:32 +03:00
|
|
|
// AbandonChannel attempts to remove the target channel from the open channel
|
|
|
|
// database. If the channel was already removed (has a closed channel entry),
|
|
|
|
// then we'll return a nil error. Otherwise, we'll insert a new close summary
|
|
|
|
// into the database.
|
2020-05-15 19:07:35 +03:00
|
|
|
func (db *DB) AbandonChannel(chanPoint *wire.OutPoint, bestHeight uint32) error {
|
2019-11-20 06:46:32 +03:00
|
|
|
// With the chanPoint constructed, we'll attempt to find the target
|
|
|
|
// channel in the database. If we can't find the channel, then we'll
|
|
|
|
// return the error back to the caller.
|
2020-05-15 19:07:35 +03:00
|
|
|
dbChan, err := db.FetchChannel(*chanPoint)
|
2019-11-20 06:46:32 +03:00
|
|
|
switch {
|
|
|
|
// If the channel wasn't found, then it's possible that it was already
|
|
|
|
// abandoned from the database.
|
|
|
|
case err == ErrChannelNotFound:
|
2020-05-15 19:07:35 +03:00
|
|
|
_, closedErr := db.FetchClosedChannel(chanPoint)
|
2019-11-20 06:46:32 +03:00
|
|
|
if closedErr != nil {
|
|
|
|
return closedErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the channel was already closed, then we don't return an
|
|
|
|
// error as we'd like fro this step to be repeatable.
|
|
|
|
return nil
|
|
|
|
case err != nil:
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we've found the channel, we'll populate a close summary for
|
|
|
|
// the channel, so we can store as much information for this abounded
|
|
|
|
// channel as possible. We also ensure that we set Pending to false, to
|
|
|
|
// indicate that this channel has been "fully" closed.
|
|
|
|
summary := &ChannelCloseSummary{
|
|
|
|
CloseType: Abandoned,
|
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ChainHash: dbChan.ChainHash,
|
|
|
|
CloseHeight: bestHeight,
|
|
|
|
RemotePub: dbChan.IdentityPub,
|
|
|
|
Capacity: dbChan.Capacity,
|
|
|
|
SettledBalance: dbChan.LocalCommitment.LocalBalance.ToSatoshis(),
|
|
|
|
ShortChanID: dbChan.ShortChanID(),
|
|
|
|
RemoteCurrentRevocation: dbChan.RemoteCurrentRevocation,
|
|
|
|
RemoteNextRevocation: dbChan.RemoteNextRevocation,
|
|
|
|
LocalChanConfig: dbChan.LocalChanCfg,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll close the channel in the DB, and return back to the
|
2020-02-21 14:24:23 +03:00
|
|
|
// caller. We set ourselves as the close initiator because we abandoned
|
|
|
|
// the channel.
|
|
|
|
return dbChan.CloseChannel(summary, ChanStatusLocalCloseInitiator)
|
2019-11-20 06:46:32 +03:00
|
|
|
}
|
|
|
|
|
2017-11-11 06:36:35 +03:00
|
|
|
// syncVersions function is used for safe db version synchronization. It
|
|
|
|
// applies migration functions to the current database and recovers the
|
|
|
|
// previous state of db if at least one error/panic appeared during migration.
|
2016-11-23 00:57:26 +03:00
|
|
|
func (d *DB) syncVersions(versions []version) error {
|
2016-11-22 23:50:27 +03:00
|
|
|
meta, err := d.FetchMeta(nil)
|
|
|
|
if err != nil {
|
2016-11-28 05:36:17 +03:00
|
|
|
if err == ErrMetaNotFound {
|
|
|
|
meta = &Meta{}
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
latestVersion := getLatestDBVersion(versions)
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Checking for schema update: latest_version=%v, "+
|
2019-10-13 00:30:26 +03:00
|
|
|
"db_version=%v", latestVersion, meta.DbVersionNumber)
|
2018-08-03 02:46:17 +03:00
|
|
|
|
|
|
|
switch {
|
|
|
|
|
|
|
|
// If the database reports a higher version that we are aware of, the
|
|
|
|
// user is probably trying to revert to a prior version of lnd. We fail
|
|
|
|
// here to prevent reversions and unintended corruption.
|
|
|
|
case meta.DbVersionNumber > latestVersion:
|
|
|
|
log.Errorf("Refusing to revert from db_version=%d to "+
|
|
|
|
"lower version=%d", meta.DbVersionNumber,
|
|
|
|
latestVersion)
|
|
|
|
return ErrDBReversion
|
|
|
|
|
|
|
|
// If the current database version matches the latest version number,
|
|
|
|
// then we don't need to perform any migrations.
|
|
|
|
case meta.DbVersionNumber == latestVersion:
|
2016-11-23 00:57:26 +03:00
|
|
|
return nil
|
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Performing database schema migration")
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
// Otherwise, we fetch the migrations which need to applied, and
|
|
|
|
// execute them serially within a single database transaction to ensure
|
|
|
|
// the migration is atomic.
|
2018-04-17 05:06:21 +03:00
|
|
|
migrations, migrationVersions := getMigrationsToApply(
|
|
|
|
versions, meta.DbVersionNumber,
|
|
|
|
)
|
2019-12-13 05:22:19 +03:00
|
|
|
return kvdb.Update(d, func(tx kvdb.RwTx) error {
|
2017-02-08 23:56:37 +03:00
|
|
|
for i, migration := range migrations {
|
2016-11-23 00:57:26 +03:00
|
|
|
if migration == nil {
|
|
|
|
continue
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Applying migration #%v", migrationVersions[i])
|
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
if err := migration(tx); err != nil {
|
2017-02-08 23:56:37 +03:00
|
|
|
log.Infof("Unable to apply migration #%v",
|
|
|
|
migrationVersions[i])
|
2016-11-22 23:50:27 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-11-23 00:57:26 +03:00
|
|
|
}
|
2016-11-22 23:50:27 +03:00
|
|
|
|
2016-11-23 00:57:26 +03:00
|
|
|
meta.DbVersionNumber = latestVersion
|
2020-05-12 01:38:45 +03:00
|
|
|
err := putMeta(meta, tx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// In dry-run mode, return an error to prevent the transaction
|
|
|
|
// from committing.
|
|
|
|
if d.dryRun {
|
|
|
|
return ErrDryRunMigrationOK
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2020-10-26 16:06:32 +03:00
|
|
|
}, func() {})
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
|
2016-12-08 09:47:01 +03:00
|
|
|
// ChannelGraph returns a new instance of the directed channel graph.
|
|
|
|
func (d *DB) ChannelGraph() *ChannelGraph {
|
2019-04-01 21:52:31 +03:00
|
|
|
return d.graph
|
2016-12-08 09:47:01 +03:00
|
|
|
}
|
|
|
|
|
2016-11-22 23:50:27 +03:00
|
|
|
func getLatestDBVersion(versions []version) uint32 {
|
|
|
|
return versions[len(versions)-1].number
|
|
|
|
}
|
|
|
|
|
|
|
|
// getMigrationsToApply retrieves the migration function that should be
|
|
|
|
// applied to the database.
|
2017-02-08 23:56:37 +03:00
|
|
|
func getMigrationsToApply(versions []version, version uint32) ([]migration, []uint32) {
|
2016-11-22 23:50:27 +03:00
|
|
|
migrations := make([]migration, 0, len(versions))
|
2017-02-08 23:56:37 +03:00
|
|
|
migrationVersions := make([]uint32, 0, len(versions))
|
2016-11-22 23:50:27 +03:00
|
|
|
|
|
|
|
for _, v := range versions {
|
|
|
|
if v.number > version {
|
|
|
|
migrations = append(migrations, v.migration)
|
2017-02-08 23:56:37 +03:00
|
|
|
migrationVersions = append(migrationVersions, v.number)
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-08 23:56:37 +03:00
|
|
|
return migrations, migrationVersions
|
2016-11-22 23:50:27 +03:00
|
|
|
}
|
2020-02-21 14:23:30 +03:00
|
|
|
|
|
|
|
// fetchHistoricalChanBucket returns a the channel bucket for a given outpoint
|
|
|
|
// from the historical channel bucket. If the bucket does not exist,
|
|
|
|
// ErrNoHistoricalBucket is returned.
|
2020-05-07 01:45:50 +03:00
|
|
|
func fetchHistoricalChanBucket(tx kvdb.RTx,
|
2020-05-07 01:48:00 +03:00
|
|
|
outPoint *wire.OutPoint) (kvdb.RBucket, error) {
|
2020-02-21 14:23:30 +03:00
|
|
|
|
|
|
|
// First fetch the top level bucket which stores all data related to
|
|
|
|
// historically stored channels.
|
2019-12-13 05:22:19 +03:00
|
|
|
historicalChanBucket := tx.ReadBucket(historicalChannelBucket)
|
2020-02-21 14:23:30 +03:00
|
|
|
if historicalChanBucket == nil {
|
|
|
|
return nil, ErrNoHistoricalBucket
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the bucket for the node and chain fetched, we can now go down
|
|
|
|
// another level, for the channel itself.
|
|
|
|
var chanPointBuf bytes.Buffer
|
|
|
|
if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-12-13 05:22:19 +03:00
|
|
|
chanBucket := historicalChanBucket.NestedReadBucket(chanPointBuf.Bytes())
|
2020-02-21 14:23:30 +03:00
|
|
|
if chanBucket == nil {
|
|
|
|
return nil, ErrChannelNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanBucket, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// FetchHistoricalChannel fetches open channel data from the historical channel
|
|
|
|
// bucket.
|
2020-05-15 19:07:35 +03:00
|
|
|
func (db *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, error) {
|
2020-02-21 14:23:30 +03:00
|
|
|
var channel *OpenChannel
|
2020-05-07 01:45:50 +03:00
|
|
|
err := kvdb.View(db, func(tx kvdb.RTx) error {
|
2020-02-21 14:23:30 +03:00
|
|
|
chanBucket, err := fetchHistoricalChanBucket(tx, outPoint)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
channel, err = fetchOpenChannel(chanBucket, outPoint)
|
|
|
|
return err
|
2020-10-20 17:18:40 +03:00
|
|
|
}, func() {
|
|
|
|
channel = nil
|
2020-02-21 14:23:30 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return channel, nil
|
|
|
|
}
|
2020-06-24 13:50:11 +03:00
|
|
|
|
|
|
|
// MakeTestDB creates a new instance of the ChannelDB for testing purposes.
|
|
|
|
// A callback which cleans up the created temporary directories is also
|
|
|
|
// returned and intended to be executed after the test completes.
|
|
|
|
func MakeTestDB(modifiers ...OptionModifier) (*DB, func(), error) {
|
|
|
|
// First, create a temporary directory to be used for the duration of
|
|
|
|
// this test.
|
|
|
|
tempDirName, err := ioutil.TempDir("", "channeldb")
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, create channeldb for the first time.
|
|
|
|
backend, backendCleanup, err := kvdb.GetTestBackend(tempDirName, "cdb")
|
|
|
|
if err != nil {
|
|
|
|
backendCleanup()
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cdb, err := CreateWithBackend(backend, modifiers...)
|
|
|
|
if err != nil {
|
|
|
|
backendCleanup()
|
|
|
|
os.RemoveAll(tempDirName)
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanUp := func() {
|
|
|
|
cdb.Close()
|
|
|
|
backendCleanup()
|
|
|
|
os.RemoveAll(tempDirName)
|
|
|
|
}
|
|
|
|
|
|
|
|
return cdb, cleanUp, nil
|
|
|
|
}
|