2015-12-26 21:35:15 +03:00
|
|
|
package channeldb
|
2015-12-26 02:00:53 +03:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"io/ioutil"
|
2017-07-29 21:24:28 +03:00
|
|
|
"math/rand"
|
2017-01-23 10:31:01 +03:00
|
|
|
"net"
|
2015-12-26 02:00:53 +03:00
|
|
|
"os"
|
2016-06-21 07:39:50 +03:00
|
|
|
"reflect"
|
2017-11-10 07:56:04 +03:00
|
|
|
"runtime"
|
2015-12-26 02:00:53 +03:00
|
|
|
"testing"
|
|
|
|
|
2020-03-19 12:00:53 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
|
|
|
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
|
|
|
_ "github.com/btcsuite/btcwallet/walletdb/bdb"
|
2018-07-18 05:10:07 +03:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
2020-01-20 12:57:34 +03:00
|
|
|
"github.com/lightningnetwork/lnd/clock"
|
2018-07-18 05:10:07 +03:00
|
|
|
"github.com/lightningnetwork/lnd/keychain"
|
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
|
|
|
"github.com/lightningnetwork/lnd/shachain"
|
2015-12-26 02:00:53 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2017-01-06 00:56:27 +03:00
|
|
|
key = [chainhash.HashSize]byte{
|
2015-12-26 02:00:53 +03:00
|
|
|
0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x68, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0xd, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
|
|
|
|
0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
|
|
|
|
}
|
2017-01-06 00:56:27 +03:00
|
|
|
rev = [chainhash.HashSize]byte{
|
2015-12-31 09:28:00 +03:00
|
|
|
0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0x2d, 0xe7, 0x93, 0xe4,
|
|
|
|
}
|
2015-12-26 02:00:53 +03:00
|
|
|
testTx = &wire.MsgTx{
|
|
|
|
Version: 1,
|
|
|
|
TxIn: []*wire.TxIn{
|
2017-02-23 22:02:08 +03:00
|
|
|
{
|
2015-12-26 02:00:53 +03:00
|
|
|
PreviousOutPoint: wire.OutPoint{
|
2017-01-06 00:56:27 +03:00
|
|
|
Hash: chainhash.Hash{},
|
2015-12-26 02:00:53 +03:00
|
|
|
Index: 0xffffffff,
|
|
|
|
},
|
|
|
|
SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62},
|
|
|
|
Sequence: 0xffffffff,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
TxOut: []*wire.TxOut{
|
2017-02-23 22:02:08 +03:00
|
|
|
{
|
2015-12-26 02:00:53 +03:00
|
|
|
Value: 5000000000,
|
|
|
|
PkScript: []byte{
|
|
|
|
0x41, // OP_DATA_65
|
|
|
|
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
|
|
|
|
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
|
|
|
|
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
|
|
|
|
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
|
|
|
|
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
|
|
|
|
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
|
|
|
|
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
|
|
|
|
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
|
|
|
|
0xa6, // 65-byte signature
|
|
|
|
0xac, // OP_CHECKSIG
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
LockTime: 5,
|
|
|
|
}
|
2016-09-03 04:51:34 +03:00
|
|
|
privKey, pubKey = btcec.PrivKeyFromBytes(btcec.S256(), key[:])
|
2018-01-31 07:19:40 +03:00
|
|
|
|
|
|
|
wireSig, _ = lnwire.NewSigFromSignature(testSig)
|
2020-01-20 12:57:34 +03:00
|
|
|
|
|
|
|
testClock = clock.NewTestClock(testNow)
|
2020-02-05 16:39:31 +03:00
|
|
|
|
|
|
|
// defaultPendingHeight is the default height at which we set
|
|
|
|
// channels to pending.
|
|
|
|
defaultPendingHeight = 100
|
|
|
|
|
|
|
|
// defaultAddr is the default address that we mark test channels pending
|
|
|
|
// with.
|
|
|
|
defaultAddr = &net.TCPAddr{
|
|
|
|
IP: net.ParseIP("127.0.0.1"),
|
|
|
|
Port: 18555,
|
|
|
|
}
|
2015-12-26 02:00:53 +03:00
|
|
|
)
|
|
|
|
|
2016-09-03 04:51:34 +03:00
|
|
|
// makeTestDB creates a new instance of the ChannelDB for testing purposes. A
|
|
|
|
// callback which cleans up the created temporary directories is also returned
|
|
|
|
// and intended to be executed after the test completes.
|
|
|
|
func makeTestDB() (*DB, func(), error) {
|
2016-03-24 08:39:52 +03:00
|
|
|
// First, create a temporary directory to be used for the duration of
|
|
|
|
// this test.
|
|
|
|
tempDirName, err := ioutil.TempDir("", "channeldb")
|
2015-12-26 02:00:53 +03:00
|
|
|
if err != nil {
|
2016-09-03 04:51:34 +03:00
|
|
|
return nil, nil, err
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2016-03-24 08:39:52 +03:00
|
|
|
|
2016-11-28 05:32:45 +03:00
|
|
|
// Next, create channeldb for the first time.
|
2020-03-09 21:27:50 +03:00
|
|
|
backend, backendCleanup, err := kvdb.GetTestBackend(tempDirName, "cdb")
|
2015-12-26 02:00:53 +03:00
|
|
|
if err != nil {
|
2020-03-09 21:27:50 +03:00
|
|
|
backendCleanup()
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cdb, err := CreateWithBackend(backend, OptionClock(testClock))
|
|
|
|
if err != nil {
|
|
|
|
backendCleanup()
|
|
|
|
os.RemoveAll(tempDirName)
|
2016-09-03 04:51:34 +03:00
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanUp := func() {
|
|
|
|
cdb.Close()
|
2020-03-09 21:27:50 +03:00
|
|
|
backendCleanup()
|
2016-12-22 23:04:41 +03:00
|
|
|
os.RemoveAll(tempDirName)
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
2016-09-03 04:51:34 +03:00
|
|
|
return cdb, cleanUp, nil
|
|
|
|
}
|
|
|
|
|
2020-02-05 16:39:31 +03:00
|
|
|
// testChannelParams is a struct which details the specifics of how a channel
|
|
|
|
// should be created.
|
|
|
|
type testChannelParams struct {
|
|
|
|
// channel is the channel that will be written to disk.
|
|
|
|
channel *OpenChannel
|
|
|
|
|
|
|
|
// addr is the address that the channel will be synced pending with.
|
|
|
|
addr *net.TCPAddr
|
|
|
|
|
|
|
|
// pendingHeight is the height that the channel should be recorded as
|
|
|
|
// pending.
|
|
|
|
pendingHeight uint32
|
|
|
|
|
|
|
|
// openChannel is set to true if the channel should be fully marked as
|
|
|
|
// open if this is false, the channel will be left in pending state.
|
|
|
|
openChannel bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// testChannelOption is a functional option which can be used to alter the
|
|
|
|
// default channel that is creates for testing.
|
|
|
|
type testChannelOption func(params *testChannelParams)
|
|
|
|
|
2020-03-19 12:00:53 +03:00
|
|
|
// channelCommitmentOption is an option which allows overwriting of the default
|
|
|
|
// commitment height and balances. The local boolean can be used to set these
|
|
|
|
// balances on the local or remote commit.
|
|
|
|
func channelCommitmentOption(height uint64, localBalance,
|
|
|
|
remoteBalance lnwire.MilliSatoshi, local bool) testChannelOption {
|
|
|
|
|
|
|
|
return func(params *testChannelParams) {
|
|
|
|
if local {
|
|
|
|
params.channel.LocalCommitment.CommitHeight = height
|
|
|
|
params.channel.LocalCommitment.LocalBalance = localBalance
|
|
|
|
params.channel.LocalCommitment.RemoteBalance = remoteBalance
|
|
|
|
} else {
|
|
|
|
params.channel.RemoteCommitment.CommitHeight = height
|
|
|
|
params.channel.RemoteCommitment.LocalBalance = localBalance
|
|
|
|
params.channel.RemoteCommitment.RemoteBalance = remoteBalance
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-05 16:39:31 +03:00
|
|
|
// pendingHeightOption is an option which can be used to set the height the
|
|
|
|
// channel is marked as pending at.
|
|
|
|
func pendingHeightOption(height uint32) testChannelOption {
|
|
|
|
return func(params *testChannelParams) {
|
|
|
|
params.pendingHeight = height
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// openChannelOption is an option which can be used to create a test channel
|
|
|
|
// that is open.
|
|
|
|
func openChannelOption() testChannelOption {
|
|
|
|
return func(params *testChannelParams) {
|
|
|
|
params.openChannel = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// localHtlcsOption is an option which allows setting of htlcs on the local
|
|
|
|
// commitment.
|
|
|
|
func localHtlcsOption(htlcs []HTLC) testChannelOption {
|
|
|
|
return func(params *testChannelParams) {
|
|
|
|
params.channel.LocalCommitment.Htlcs = htlcs
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// remoteHtlcsOption is an option which allows setting of htlcs on the remote
|
|
|
|
// commitment.
|
|
|
|
func remoteHtlcsOption(htlcs []HTLC) testChannelOption {
|
|
|
|
return func(params *testChannelParams) {
|
|
|
|
params.channel.RemoteCommitment.Htlcs = htlcs
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// localShutdownOption is an option which sets the local upfront shutdown
|
|
|
|
// script for the channel.
|
|
|
|
func localShutdownOption(addr lnwire.DeliveryAddress) testChannelOption {
|
|
|
|
return func(params *testChannelParams) {
|
|
|
|
params.channel.LocalShutdownScript = addr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// remoteShutdownOption is an option which sets the remote upfront shutdown
|
|
|
|
// script for the channel.
|
|
|
|
func remoteShutdownOption(addr lnwire.DeliveryAddress) testChannelOption {
|
|
|
|
return func(params *testChannelParams) {
|
|
|
|
params.channel.RemoteShutdownScript = addr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// fundingPointOption is an option which sets the funding outpoint of the
|
|
|
|
// channel.
|
|
|
|
func fundingPointOption(chanPoint wire.OutPoint) testChannelOption {
|
|
|
|
return func(params *testChannelParams) {
|
|
|
|
params.channel.FundingOutpoint = chanPoint
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-06 11:21:12 +03:00
|
|
|
// channelIDOption is an option which sets the short channel ID of the channel.
|
|
|
|
var channelIDOption = func(chanID lnwire.ShortChannelID) testChannelOption {
|
|
|
|
return func(params *testChannelParams) {
|
|
|
|
params.channel.ShortChannelID = chanID
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-05 16:39:31 +03:00
|
|
|
// createTestChannel writes a test channel to the database. It takes a set of
|
|
|
|
// functional options which can be used to overwrite the default of creating
|
|
|
|
// a pending channel that was broadcast at height 100.
|
|
|
|
func createTestChannel(t *testing.T, cdb *DB,
|
|
|
|
opts ...testChannelOption) *OpenChannel {
|
|
|
|
|
|
|
|
// Create a default set of parameters.
|
|
|
|
params := &testChannelParams{
|
|
|
|
channel: createTestChannelState(t, cdb),
|
|
|
|
addr: defaultAddr,
|
|
|
|
openChannel: false,
|
|
|
|
pendingHeight: uint32(defaultPendingHeight),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Apply all functional options to the test channel params.
|
|
|
|
for _, o := range opts {
|
|
|
|
o(params)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark the channel as pending.
|
|
|
|
err := params.channel.SyncPending(params.addr, params.pendingHeight)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to save and serialize channel "+
|
|
|
|
"state: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the parameters do not specify that we should open the channel
|
|
|
|
// fully, we return the pending channel.
|
|
|
|
if !params.openChannel {
|
|
|
|
return params.channel
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark the channel as open with the short channel id provided.
|
|
|
|
err = params.channel.MarkAsOpen(params.channel.ShortChannelID)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to mark channel open: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return params.channel
|
|
|
|
}
|
|
|
|
|
|
|
|
func createTestChannelState(t *testing.T, cdb *DB) *OpenChannel {
|
2016-12-14 17:01:48 +03:00
|
|
|
// Simulate 1000 channel updates.
|
2017-02-24 22:53:49 +03:00
|
|
|
producer, err := shachain.NewRevocationProducerFromBytes(key[:])
|
|
|
|
if err != nil {
|
2020-02-05 16:39:31 +03:00
|
|
|
t.Fatalf("could not get producer: %v", err)
|
2017-02-24 22:53:49 +03:00
|
|
|
}
|
2016-12-14 17:01:48 +03:00
|
|
|
store := shachain.NewRevocationStore()
|
2017-07-29 21:24:28 +03:00
|
|
|
for i := 0; i < 1; i++ {
|
2016-12-14 17:01:48 +03:00
|
|
|
preImage, err := producer.AtIndex(uint64(i))
|
2016-03-24 08:39:52 +03:00
|
|
|
if err != nil {
|
2020-02-05 16:39:31 +03:00
|
|
|
t.Fatalf("could not get "+
|
|
|
|
"preimage: %v", err)
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
|
|
|
|
2017-12-01 07:35:48 +03:00
|
|
|
if err := store.AddNextEntry(preImage); err != nil {
|
2020-02-05 16:39:31 +03:00
|
|
|
t.Fatalf("could not add entry: %v", err)
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-29 21:24:28 +03:00
|
|
|
localCfg := ChannelConfig{
|
|
|
|
ChannelConstraints: ChannelConstraints{
|
|
|
|
DustLimit: btcutil.Amount(rand.Int63()),
|
2017-08-22 08:51:45 +03:00
|
|
|
MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
|
2017-07-29 21:24:28 +03:00
|
|
|
ChanReserve: btcutil.Amount(rand.Int63()),
|
2017-08-22 08:51:45 +03:00
|
|
|
MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
|
2017-07-29 21:24:28 +03:00
|
|
|
MaxAcceptedHtlcs: uint16(rand.Int31()),
|
2018-12-11 00:56:41 +03:00
|
|
|
CsvDelay: uint16(rand.Int31()),
|
2017-07-29 21:24:28 +03:00
|
|
|
},
|
2018-02-18 02:14:07 +03:00
|
|
|
MultiSigKey: keychain.KeyDescriptor{
|
|
|
|
PubKey: privKey.PubKey(),
|
|
|
|
},
|
|
|
|
RevocationBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: privKey.PubKey(),
|
|
|
|
},
|
|
|
|
PaymentBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: privKey.PubKey(),
|
|
|
|
},
|
|
|
|
DelayBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: privKey.PubKey(),
|
|
|
|
},
|
|
|
|
HtlcBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: privKey.PubKey(),
|
|
|
|
},
|
2017-07-29 21:24:28 +03:00
|
|
|
}
|
|
|
|
remoteCfg := ChannelConfig{
|
|
|
|
ChannelConstraints: ChannelConstraints{
|
|
|
|
DustLimit: btcutil.Amount(rand.Int63()),
|
2017-08-22 08:51:45 +03:00
|
|
|
MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
|
2017-07-29 21:24:28 +03:00
|
|
|
ChanReserve: btcutil.Amount(rand.Int63()),
|
2017-08-22 08:51:45 +03:00
|
|
|
MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
|
2017-07-29 21:24:28 +03:00
|
|
|
MaxAcceptedHtlcs: uint16(rand.Int31()),
|
2018-12-11 00:56:41 +03:00
|
|
|
CsvDelay: uint16(rand.Int31()),
|
2017-07-29 21:24:28 +03:00
|
|
|
},
|
2018-02-18 02:14:07 +03:00
|
|
|
MultiSigKey: keychain.KeyDescriptor{
|
|
|
|
PubKey: privKey.PubKey(),
|
2018-08-14 05:21:37 +03:00
|
|
|
KeyLocator: keychain.KeyLocator{
|
|
|
|
Family: keychain.KeyFamilyMultiSig,
|
|
|
|
Index: 9,
|
|
|
|
},
|
2018-02-18 02:14:07 +03:00
|
|
|
},
|
|
|
|
RevocationBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: privKey.PubKey(),
|
2018-08-14 05:21:37 +03:00
|
|
|
KeyLocator: keychain.KeyLocator{
|
|
|
|
Family: keychain.KeyFamilyRevocationBase,
|
|
|
|
Index: 8,
|
|
|
|
},
|
2018-02-18 02:14:07 +03:00
|
|
|
},
|
|
|
|
PaymentBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: privKey.PubKey(),
|
2018-08-14 05:21:37 +03:00
|
|
|
KeyLocator: keychain.KeyLocator{
|
|
|
|
Family: keychain.KeyFamilyPaymentBase,
|
|
|
|
Index: 7,
|
|
|
|
},
|
2018-02-18 02:14:07 +03:00
|
|
|
},
|
|
|
|
DelayBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: privKey.PubKey(),
|
2018-08-14 05:21:37 +03:00
|
|
|
KeyLocator: keychain.KeyLocator{
|
|
|
|
Family: keychain.KeyFamilyDelayBase,
|
|
|
|
Index: 6,
|
|
|
|
},
|
2018-02-18 02:14:07 +03:00
|
|
|
},
|
|
|
|
HtlcBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: privKey.PubKey(),
|
2018-08-14 05:21:37 +03:00
|
|
|
KeyLocator: keychain.KeyLocator{
|
|
|
|
Family: keychain.KeyFamilyHtlcBase,
|
|
|
|
Index: 5,
|
|
|
|
},
|
2018-02-18 02:14:07 +03:00
|
|
|
},
|
2017-07-29 21:24:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
chanID := lnwire.NewShortChanIDFromInt(uint64(rand.Int63()))
|
|
|
|
|
2016-09-03 04:51:34 +03:00
|
|
|
return &OpenChannel{
|
2020-03-14 02:49:12 +03:00
|
|
|
ChanType: SingleFunderBit | FrozenBit,
|
2017-11-10 07:56:04 +03:00
|
|
|
ChainHash: key,
|
2019-01-04 04:33:33 +03:00
|
|
|
FundingOutpoint: wire.OutPoint{Hash: key, Index: rand.Uint32()},
|
2018-05-02 02:27:20 +03:00
|
|
|
ShortChannelID: chanID,
|
2017-11-10 07:56:04 +03:00
|
|
|
IsInitiator: true,
|
|
|
|
IsPending: true,
|
|
|
|
IdentityPub: pubKey,
|
|
|
|
Capacity: btcutil.Amount(10000),
|
|
|
|
LocalChanCfg: localCfg,
|
|
|
|
RemoteChanCfg: remoteCfg,
|
|
|
|
TotalMSatSent: 8,
|
|
|
|
TotalMSatReceived: 2,
|
|
|
|
LocalCommitment: ChannelCommitment{
|
|
|
|
CommitHeight: 0,
|
|
|
|
LocalBalance: lnwire.MilliSatoshi(9000),
|
|
|
|
RemoteBalance: lnwire.MilliSatoshi(3000),
|
|
|
|
CommitFee: btcutil.Amount(rand.Int63()),
|
|
|
|
FeePerKw: btcutil.Amount(5000),
|
|
|
|
CommitTx: testTx,
|
|
|
|
CommitSig: bytes.Repeat([]byte{1}, 71),
|
|
|
|
},
|
|
|
|
RemoteCommitment: ChannelCommitment{
|
|
|
|
CommitHeight: 0,
|
|
|
|
LocalBalance: lnwire.MilliSatoshi(3000),
|
|
|
|
RemoteBalance: lnwire.MilliSatoshi(9000),
|
|
|
|
CommitFee: btcutil.Amount(rand.Int63()),
|
|
|
|
FeePerKw: btcutil.Amount(5000),
|
|
|
|
CommitTx: testTx,
|
|
|
|
CommitSig: bytes.Repeat([]byte{1}, 71),
|
|
|
|
},
|
2017-07-29 21:24:28 +03:00
|
|
|
NumConfsRequired: 4,
|
|
|
|
RemoteCurrentRevocation: privKey.PubKey(),
|
|
|
|
RemoteNextRevocation: privKey.PubKey(),
|
|
|
|
RevocationProducer: producer,
|
|
|
|
RevocationStore: store,
|
2017-08-22 08:51:45 +03:00
|
|
|
Db: cdb,
|
2018-02-23 02:02:02 +03:00
|
|
|
Packager: NewChannelPackager(chanID),
|
2018-03-11 04:27:51 +03:00
|
|
|
FundingTxn: testTx,
|
2020-03-14 02:49:12 +03:00
|
|
|
ThawHeight: uint32(defaultPendingHeight),
|
2020-02-05 16:39:31 +03:00
|
|
|
}
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestOpenChannelPutGetDelete(t *testing.T) {
|
2017-06-17 01:59:20 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2016-09-03 04:51:34 +03:00
|
|
|
cdb, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
2016-12-22 23:04:41 +03:00
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2018-11-20 17:09:46 +03:00
|
|
|
defer cleanUp()
|
2015-12-26 02:00:53 +03:00
|
|
|
|
2020-02-05 16:39:31 +03:00
|
|
|
// Create the test channel state, with additional htlcs on the local
|
|
|
|
// and remote commitment.
|
|
|
|
localHtlcs := []HTLC{
|
|
|
|
{Signature: testSig.Serialize(),
|
2017-07-30 03:40:14 +03:00
|
|
|
Incoming: true,
|
|
|
|
Amt: 10,
|
|
|
|
RHash: key,
|
|
|
|
RefundTimeout: 1,
|
2017-11-10 07:56:04 +03:00
|
|
|
OnionBlob: []byte("onionblob"),
|
|
|
|
},
|
|
|
|
}
|
2020-02-05 16:39:31 +03:00
|
|
|
|
|
|
|
remoteHtlcs := []HTLC{
|
2017-11-10 07:56:04 +03:00
|
|
|
{
|
|
|
|
Signature: testSig.Serialize(),
|
|
|
|
Incoming: false,
|
|
|
|
Amt: 10,
|
|
|
|
RHash: key,
|
|
|
|
RefundTimeout: 1,
|
|
|
|
OnionBlob: []byte("onionblob"),
|
2016-09-07 05:17:34 +03:00
|
|
|
},
|
|
|
|
}
|
2019-09-06 14:14:38 +03:00
|
|
|
|
2020-02-05 16:39:31 +03:00
|
|
|
state := createTestChannel(
|
|
|
|
t, cdb,
|
|
|
|
remoteHtlcsOption(remoteHtlcs),
|
|
|
|
localHtlcsOption(localHtlcs),
|
|
|
|
)
|
2015-12-26 02:00:53 +03:00
|
|
|
|
2016-10-26 02:11:23 +03:00
|
|
|
openChannels, err := cdb.FetchOpenChannels(state.IdentityPub)
|
2016-03-24 08:39:52 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch open channel: %v", err)
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 07:39:50 +03:00
|
|
|
newState := openChannels[0]
|
|
|
|
|
2015-12-26 02:00:53 +03:00
|
|
|
// The decoded channel state should be identical to what we stored
|
|
|
|
// above.
|
2017-07-29 21:24:28 +03:00
|
|
|
if !reflect.DeepEqual(state, newState) {
|
|
|
|
t.Fatalf("channel state doesn't match:: %v vs %v",
|
|
|
|
spew.Sdump(state), spew.Sdump(newState))
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2016-06-23 02:15:07 +03:00
|
|
|
|
2017-07-29 21:24:28 +03:00
|
|
|
// We'll also test that the channel is properly able to hot swap the
|
|
|
|
// next revocation for the state machine. This tests the initial
|
|
|
|
// post-funding revocation exchange.
|
|
|
|
nextRevKey, err := btcec.NewPrivateKey(btcec.S256())
|
2016-12-14 17:01:48 +03:00
|
|
|
if err != nil {
|
2017-07-29 21:24:28 +03:00
|
|
|
t.Fatalf("unable to create new private key: %v", err)
|
2016-12-14 17:01:48 +03:00
|
|
|
}
|
2017-07-29 21:24:28 +03:00
|
|
|
if err := state.InsertNextRevocation(nextRevKey.PubKey()); err != nil {
|
|
|
|
t.Fatalf("unable to update revocation: %v", err)
|
2016-12-14 17:01:48 +03:00
|
|
|
}
|
|
|
|
|
2017-07-29 21:24:28 +03:00
|
|
|
openChannels, err = cdb.FetchOpenChannels(state.IdentityPub)
|
2016-06-30 21:39:57 +03:00
|
|
|
if err != nil {
|
2017-07-29 21:24:28 +03:00
|
|
|
t.Fatalf("unable to fetch open channel: %v", err)
|
2016-09-07 05:17:34 +03:00
|
|
|
}
|
2017-07-29 21:24:28 +03:00
|
|
|
updatedChan := openChannels[0]
|
|
|
|
|
|
|
|
// Ensure that the revocation was set properly.
|
|
|
|
if !nextRevKey.PubKey().IsEqual(updatedChan.RemoteNextRevocation) {
|
|
|
|
t.Fatalf("next revocation wasn't updated")
|
2016-11-16 05:51:48 +03:00
|
|
|
}
|
2016-06-30 21:39:57 +03:00
|
|
|
|
2016-06-23 02:15:07 +03:00
|
|
|
// Finally to wrap up the test, delete the state of the channel within
|
|
|
|
// the database. This involves "closing" the channel which removes all
|
|
|
|
// written state, and creates a small "summary" elsewhere within the
|
|
|
|
// database.
|
2017-05-05 01:21:56 +03:00
|
|
|
closeSummary := &ChannelCloseSummary{
|
2017-07-29 21:24:28 +03:00
|
|
|
ChanPoint: state.FundingOutpoint,
|
2017-05-15 05:02:59 +03:00
|
|
|
RemotePub: state.IdentityPub,
|
|
|
|
SettledBalance: btcutil.Amount(500),
|
|
|
|
TimeLockedBalance: btcutil.Amount(10000),
|
|
|
|
IsPending: false,
|
|
|
|
CloseType: CooperativeClose,
|
2017-05-05 01:21:56 +03:00
|
|
|
}
|
|
|
|
if err := state.CloseChannel(closeSummary); err != nil {
|
2016-06-23 02:15:07 +03:00
|
|
|
t.Fatalf("unable to close channel: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// As the channel is now closed, attempting to fetch all open channels
|
|
|
|
// for our fake node ID should return an empty slice.
|
2016-10-26 02:11:23 +03:00
|
|
|
openChans, err := cdb.FetchOpenChannels(state.IdentityPub)
|
2016-06-23 02:15:07 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch open channels: %v", err)
|
|
|
|
}
|
2017-02-08 03:27:53 +03:00
|
|
|
if len(openChans) != 0 {
|
|
|
|
t.Fatalf("all channels not deleted, found %v", len(openChans))
|
|
|
|
}
|
2016-06-23 02:15:07 +03:00
|
|
|
|
2017-02-08 03:27:53 +03:00
|
|
|
// Additionally, attempting to fetch all the open channels globally
|
|
|
|
// should yield no results.
|
|
|
|
openChans, err = cdb.FetchAllChannels()
|
|
|
|
if err != nil {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("unable to fetch all open chans")
|
2017-02-08 03:27:53 +03:00
|
|
|
}
|
2016-06-23 02:15:07 +03:00
|
|
|
if len(openChans) != 0 {
|
|
|
|
t.Fatalf("all channels not deleted, found %v", len(openChans))
|
|
|
|
}
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
2019-12-03 12:38:29 +03:00
|
|
|
// TestOptionalShutdown tests the reading and writing of channels with and
|
|
|
|
// without optional shutdown script fields.
|
|
|
|
func TestOptionalShutdown(t *testing.T) {
|
|
|
|
local := lnwire.DeliveryAddress([]byte("local shutdown script"))
|
|
|
|
remote := lnwire.DeliveryAddress([]byte("remote shutdown script"))
|
|
|
|
|
|
|
|
if _, err := rand.Read(remote); err != nil {
|
|
|
|
t.Fatalf("Could not create random script: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
2020-02-05 16:39:31 +03:00
|
|
|
localShutdown lnwire.DeliveryAddress
|
|
|
|
remoteShutdown lnwire.DeliveryAddress
|
2019-12-03 12:38:29 +03:00
|
|
|
}{
|
|
|
|
{
|
2020-02-05 16:39:31 +03:00
|
|
|
name: "no shutdown scripts",
|
|
|
|
localShutdown: nil,
|
|
|
|
remoteShutdown: nil,
|
2019-12-03 12:38:29 +03:00
|
|
|
},
|
|
|
|
{
|
2020-02-05 16:39:31 +03:00
|
|
|
name: "local shutdown script",
|
|
|
|
localShutdown: local,
|
|
|
|
remoteShutdown: nil,
|
2019-12-03 12:38:29 +03:00
|
|
|
},
|
|
|
|
{
|
2020-02-05 16:39:31 +03:00
|
|
|
name: "remote shutdown script",
|
|
|
|
localShutdown: nil,
|
|
|
|
remoteShutdown: remote,
|
2019-12-03 12:38:29 +03:00
|
|
|
},
|
|
|
|
{
|
2020-02-05 16:39:31 +03:00
|
|
|
name: "both scripts set",
|
|
|
|
localShutdown: local,
|
|
|
|
remoteShutdown: remote,
|
2019-12-03 12:38:29 +03:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
test := test
|
|
|
|
|
|
|
|
t.Run(test.name, func(t *testing.T) {
|
|
|
|
cdb, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
2020-02-05 16:39:31 +03:00
|
|
|
// Create a channel with upfront scripts set as
|
|
|
|
// specified in the test.
|
|
|
|
state := createTestChannel(
|
|
|
|
t, cdb,
|
|
|
|
localShutdownOption(test.localShutdown),
|
|
|
|
remoteShutdownOption(test.remoteShutdown),
|
|
|
|
)
|
|
|
|
|
|
|
|
openChannels, err := cdb.FetchOpenChannels(
|
|
|
|
state.IdentityPub,
|
|
|
|
)
|
2019-12-03 12:38:29 +03:00
|
|
|
if err != nil {
|
2020-02-05 16:39:31 +03:00
|
|
|
t.Fatalf("unable to fetch open"+
|
|
|
|
" channel: %v", err)
|
2019-12-03 12:38:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(openChannels) != 1 {
|
2020-02-05 16:39:31 +03:00
|
|
|
t.Fatalf("Expected one channel open,"+
|
|
|
|
" got: %v", len(openChannels))
|
2019-12-03 12:38:29 +03:00
|
|
|
}
|
|
|
|
|
2020-02-05 16:39:31 +03:00
|
|
|
if !bytes.Equal(openChannels[0].LocalShutdownScript,
|
|
|
|
test.localShutdown) {
|
|
|
|
|
|
|
|
t.Fatalf("Expected local: %x, got: %x",
|
|
|
|
test.localShutdown,
|
2019-12-03 12:38:29 +03:00
|
|
|
openChannels[0].LocalShutdownScript)
|
|
|
|
}
|
|
|
|
|
2020-02-05 16:39:31 +03:00
|
|
|
if !bytes.Equal(openChannels[0].RemoteShutdownScript,
|
|
|
|
test.remoteShutdown) {
|
|
|
|
|
|
|
|
t.Fatalf("Expected remote: %x, got: %x",
|
|
|
|
test.remoteShutdown,
|
2019-12-03 12:38:29 +03:00
|
|
|
openChannels[0].RemoteShutdownScript)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:56:04 +03:00
|
|
|
func assertCommitmentEqual(t *testing.T, a, b *ChannelCommitment) {
|
|
|
|
if !reflect.DeepEqual(a, b) {
|
|
|
|
_, _, line, _ := runtime.Caller(1)
|
|
|
|
t.Fatalf("line %v: commitments don't match: %v vs %v",
|
|
|
|
line, spew.Sdump(a), spew.Sdump(b))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-07 05:17:34 +03:00
|
|
|
func TestChannelStateTransition(t *testing.T) {
|
2017-06-17 01:59:20 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2016-09-03 04:51:34 +03:00
|
|
|
cdb, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
2016-11-16 05:50:24 +03:00
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
2018-11-20 17:09:46 +03:00
|
|
|
defer cleanUp()
|
2016-09-03 04:51:34 +03:00
|
|
|
|
|
|
|
// First create a minimal channel, then perform a full sync in order to
|
|
|
|
// persist the data.
|
2020-02-05 16:39:31 +03:00
|
|
|
channel := createTestChannel(t, cdb)
|
2016-09-03 04:51:34 +03:00
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// Add some HTLCs which were added during this new state transition.
|
|
|
|
// Half of the HTLCs are incoming, while the other half are outgoing.
|
2017-02-21 08:58:34 +03:00
|
|
|
var (
|
2017-11-10 08:00:35 +03:00
|
|
|
htlcs []HTLC
|
2017-08-22 08:51:45 +03:00
|
|
|
htlcAmt lnwire.MilliSatoshi
|
2017-02-21 08:58:34 +03:00
|
|
|
)
|
2016-09-03 04:51:34 +03:00
|
|
|
for i := uint32(0); i < 10; i++ {
|
|
|
|
var incoming bool
|
|
|
|
if i > 5 {
|
|
|
|
incoming = true
|
|
|
|
}
|
2017-11-10 08:00:35 +03:00
|
|
|
htlc := HTLC{
|
2017-07-30 03:40:14 +03:00
|
|
|
Signature: testSig.Serialize(),
|
|
|
|
Incoming: incoming,
|
|
|
|
Amt: 10,
|
|
|
|
RHash: key,
|
|
|
|
RefundTimeout: i,
|
|
|
|
OutputIndex: int32(i * 3),
|
2017-11-10 08:00:35 +03:00
|
|
|
LogIndex: uint64(i * 2),
|
|
|
|
HtlcIndex: uint64(i),
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
2017-11-10 08:00:35 +03:00
|
|
|
htlc.OnionBlob = make([]byte, 10)
|
|
|
|
copy(htlc.OnionBlob[:], bytes.Repeat([]byte{2}, 10))
|
2016-09-03 04:51:34 +03:00
|
|
|
htlcs = append(htlcs, htlc)
|
2017-02-21 08:58:34 +03:00
|
|
|
htlcAmt += htlc.Amt
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// Create a new channel delta which includes the above HTLCs, some
|
2016-09-03 04:51:34 +03:00
|
|
|
// balance updates, and an increment of the current commitment height.
|
|
|
|
// Additionally, modify the signature and commitment transaction.
|
|
|
|
newSequence := uint32(129498)
|
|
|
|
newSig := bytes.Repeat([]byte{3}, 71)
|
2017-11-10 08:00:35 +03:00
|
|
|
newTx := channel.LocalCommitment.CommitTx.Copy()
|
2016-09-06 02:52:54 +03:00
|
|
|
newTx.TxIn[0].Sequence = newSequence
|
2017-11-10 08:00:35 +03:00
|
|
|
commitment := ChannelCommitment{
|
|
|
|
CommitHeight: 1,
|
|
|
|
LocalLogIndex: 2,
|
|
|
|
LocalHtlcIndex: 1,
|
|
|
|
RemoteLogIndex: 2,
|
|
|
|
RemoteHtlcIndex: 1,
|
|
|
|
LocalBalance: lnwire.MilliSatoshi(1e8),
|
|
|
|
RemoteBalance: lnwire.MilliSatoshi(1e8),
|
|
|
|
CommitFee: 55,
|
|
|
|
FeePerKw: 99,
|
|
|
|
CommitTx: newTx,
|
|
|
|
CommitSig: newSig,
|
|
|
|
Htlcs: htlcs,
|
|
|
|
}
|
|
|
|
|
|
|
|
// First update the local node's broadcastable state and also add a
|
|
|
|
// CommitDiff remote node's as well in order to simulate a proper state
|
|
|
|
// transition.
|
2020-01-03 17:53:51 +03:00
|
|
|
unsignedAckedUpdates := []LogUpdate{
|
|
|
|
{
|
|
|
|
LogIndex: 2,
|
|
|
|
UpdateMsg: &lnwire.UpdateAddHTLC{
|
|
|
|
ChanID: lnwire.ChannelID{1, 2, 3},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
err = channel.UpdateCommitment(&commitment, unsignedAckedUpdates)
|
|
|
|
if err != nil {
|
2016-09-07 05:17:34 +03:00
|
|
|
t.Fatalf("unable to update commitment: %v", err)
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
|
|
|
|
2020-01-03 17:53:51 +03:00
|
|
|
// Assert that update is correctly written to the database.
|
|
|
|
dbUnsignedAckedUpdates, err := channel.UnsignedAckedUpdates()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch dangling remote updates: %v", err)
|
|
|
|
}
|
|
|
|
if len(dbUnsignedAckedUpdates) != 1 {
|
|
|
|
t.Fatalf("unexpected number of dangling remote updates")
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(
|
|
|
|
dbUnsignedAckedUpdates[0], unsignedAckedUpdates[0],
|
|
|
|
) {
|
|
|
|
t.Fatalf("unexpected update")
|
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// The balances, new update, the HTLCs and the changes to the fake
|
2016-09-07 05:17:34 +03:00
|
|
|
// commitment transaction along with the modified signature should all
|
|
|
|
// have been updated.
|
2016-10-26 02:11:23 +03:00
|
|
|
updatedChannel, err := cdb.FetchOpenChannels(channel.IdentityPub)
|
2016-09-03 04:51:34 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch updated channel: %v", err)
|
|
|
|
}
|
2017-11-10 08:00:35 +03:00
|
|
|
assertCommitmentEqual(t, &commitment, &updatedChannel[0].LocalCommitment)
|
2016-11-28 06:10:05 +03:00
|
|
|
numDiskUpdates, err := updatedChannel[0].CommitmentHeight()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to read commitment height from disk: %v", err)
|
|
|
|
}
|
2017-11-10 08:00:35 +03:00
|
|
|
if numDiskUpdates != uint64(commitment.CommitHeight) {
|
2016-11-28 06:10:05 +03:00
|
|
|
t.Fatalf("num disk updates doesn't match: %v vs %v",
|
2017-11-10 08:00:35 +03:00
|
|
|
numDiskUpdates, commitment.CommitHeight)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempting to query for a commitment diff should return
|
|
|
|
// ErrNoPendingCommit as we haven't yet created a new state for them.
|
|
|
|
_, err = channel.RemoteCommitChainTip()
|
|
|
|
if err != ErrNoPendingCommit {
|
|
|
|
t.Fatalf("expected ErrNoPendingCommit, instead got %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// To simulate us extending a new state to the remote party, we'll also
|
|
|
|
// create a new commit diff for them.
|
|
|
|
remoteCommit := commitment
|
|
|
|
remoteCommit.LocalBalance = lnwire.MilliSatoshi(2e8)
|
|
|
|
remoteCommit.RemoteBalance = lnwire.MilliSatoshi(3e8)
|
|
|
|
remoteCommit.CommitHeight = 1
|
|
|
|
commitDiff := &CommitDiff{
|
|
|
|
Commitment: remoteCommit,
|
|
|
|
CommitSig: &lnwire.CommitSig{
|
|
|
|
ChanID: lnwire.ChannelID(key),
|
2018-01-31 07:19:40 +03:00
|
|
|
CommitSig: wireSig,
|
|
|
|
HtlcSigs: []lnwire.Sig{
|
|
|
|
wireSig,
|
|
|
|
wireSig,
|
2017-11-10 08:00:35 +03:00
|
|
|
},
|
|
|
|
},
|
|
|
|
LogUpdates: []LogUpdate{
|
|
|
|
{
|
|
|
|
LogIndex: 1,
|
|
|
|
UpdateMsg: &lnwire.UpdateAddHTLC{
|
|
|
|
ID: 1,
|
|
|
|
Amount: lnwire.NewMSatFromSatoshis(100),
|
|
|
|
Expiry: 25,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
LogIndex: 2,
|
|
|
|
UpdateMsg: &lnwire.UpdateAddHTLC{
|
|
|
|
ID: 2,
|
|
|
|
Amount: lnwire.NewMSatFromSatoshis(200),
|
|
|
|
Expiry: 50,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-02-23 02:02:02 +03:00
|
|
|
OpenedCircuitKeys: []CircuitKey{},
|
|
|
|
ClosedCircuitKeys: []CircuitKey{},
|
2017-11-10 08:00:35 +03:00
|
|
|
}
|
|
|
|
copy(commitDiff.LogUpdates[0].UpdateMsg.(*lnwire.UpdateAddHTLC).PaymentHash[:],
|
|
|
|
bytes.Repeat([]byte{1}, 32))
|
|
|
|
copy(commitDiff.LogUpdates[1].UpdateMsg.(*lnwire.UpdateAddHTLC).PaymentHash[:],
|
|
|
|
bytes.Repeat([]byte{2}, 32))
|
|
|
|
if err := channel.AppendRemoteCommitChain(commitDiff); err != nil {
|
|
|
|
t.Fatalf("unable to add to commit chain: %v", err)
|
2016-09-07 05:17:34 +03:00
|
|
|
}
|
|
|
|
|
2018-04-18 05:03:27 +03:00
|
|
|
// The commitment tip should now match the commitment that we just
|
2017-11-10 08:00:35 +03:00
|
|
|
// inserted.
|
|
|
|
diskCommitDiff, err := channel.RemoteCommitChainTip()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch commit diff: %v", err)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(commitDiff, diskCommitDiff) {
|
|
|
|
t.Fatalf("commit diffs don't match: %v vs %v", spew.Sdump(remoteCommit),
|
|
|
|
spew.Sdump(diskCommitDiff))
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll save the old remote commitment as this will be added to the
|
|
|
|
// revocation log shortly.
|
|
|
|
oldRemoteCommit := channel.RemoteCommitment
|
|
|
|
|
2016-09-07 05:17:34 +03:00
|
|
|
// Next, write to the log which tracks the necessary revocation state
|
|
|
|
// needed to rectify any fishy behavior by the remote party. Modify the
|
|
|
|
// current uncollapsed revocation state to simulate a state transition
|
|
|
|
// by the remote party.
|
2017-07-29 21:24:28 +03:00
|
|
|
channel.RemoteCurrentRevocation = channel.RemoteNextRevocation
|
|
|
|
newPriv, err := btcec.NewPrivateKey(btcec.S256())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to generate key: %v", err)
|
|
|
|
}
|
|
|
|
channel.RemoteNextRevocation = newPriv.PubKey()
|
2018-02-23 02:02:02 +03:00
|
|
|
|
2018-05-02 02:27:20 +03:00
|
|
|
fwdPkg := NewFwdPkg(channel.ShortChanID(), oldRemoteCommit.CommitHeight,
|
2018-02-23 02:02:02 +03:00
|
|
|
diskCommitDiff.LogUpdates, nil)
|
|
|
|
|
2020-07-02 09:16:04 +03:00
|
|
|
err = channel.AdvanceCommitChainTail(fwdPkg, nil)
|
2018-02-23 02:02:02 +03:00
|
|
|
if err != nil {
|
2016-09-07 05:17:34 +03:00
|
|
|
t.Fatalf("unable to append to revocation log: %v", err)
|
|
|
|
}
|
2016-09-03 04:51:34 +03:00
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// At this point, the remote commit chain should be nil, and the posted
|
2017-11-10 08:00:35 +03:00
|
|
|
// remote commitment should match the one we added as a diff above.
|
|
|
|
if _, err := channel.RemoteCommitChainTip(); err != ErrNoPendingCommit {
|
|
|
|
t.Fatalf("expected ErrNoPendingCommit, instead got %v", err)
|
|
|
|
}
|
|
|
|
|
2018-02-07 06:13:07 +03:00
|
|
|
// We should be able to fetch the channel delta created above by its
|
2016-09-03 04:51:34 +03:00
|
|
|
// update number with all the state properly reconstructed.
|
2017-11-10 08:00:35 +03:00
|
|
|
diskPrevCommit, err := channel.FindPreviousState(
|
|
|
|
oldRemoteCommit.CommitHeight,
|
|
|
|
)
|
2016-09-03 04:51:34 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch past delta: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The two deltas (the original vs the on-disk version) should
|
|
|
|
// identical, and all HTLC data should properly be retained.
|
2017-11-10 08:00:35 +03:00
|
|
|
assertCommitmentEqual(t, &oldRemoteCommit, diskPrevCommit)
|
2017-02-08 03:27:53 +03:00
|
|
|
|
2017-03-25 02:07:34 +03:00
|
|
|
// The state number recovered from the tail of the revocation log
|
|
|
|
// should be identical to this current state.
|
|
|
|
logTail, err := channel.RevocationLogTail()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to retrieve log: %v", err)
|
|
|
|
}
|
2017-11-10 08:00:35 +03:00
|
|
|
if logTail.CommitHeight != oldRemoteCommit.CommitHeight {
|
2017-03-25 02:07:34 +03:00
|
|
|
t.Fatal("update number doesn't match")
|
|
|
|
}
|
|
|
|
|
2017-11-10 08:00:35 +03:00
|
|
|
oldRemoteCommit = channel.RemoteCommitment
|
|
|
|
|
|
|
|
// Next modify the posted diff commitment slightly, then create a new
|
|
|
|
// commitment diff and advance the tail.
|
|
|
|
commitDiff.Commitment.CommitHeight = 2
|
|
|
|
commitDiff.Commitment.LocalBalance -= htlcAmt
|
|
|
|
commitDiff.Commitment.RemoteBalance += htlcAmt
|
|
|
|
commitDiff.LogUpdates = []LogUpdate{}
|
|
|
|
if err := channel.AppendRemoteCommitChain(commitDiff); err != nil {
|
2017-11-11 06:36:35 +03:00
|
|
|
t.Fatalf("unable to add to commit chain: %v", err)
|
2017-11-10 08:00:35 +03:00
|
|
|
}
|
2018-02-23 02:02:02 +03:00
|
|
|
|
2018-05-02 02:27:20 +03:00
|
|
|
fwdPkg = NewFwdPkg(channel.ShortChanID(), oldRemoteCommit.CommitHeight, nil, nil)
|
2018-02-23 02:02:02 +03:00
|
|
|
|
2020-07-02 09:16:04 +03:00
|
|
|
err = channel.AdvanceCommitChainTail(fwdPkg, nil)
|
2018-02-23 02:02:02 +03:00
|
|
|
if err != nil {
|
2017-02-21 08:58:34 +03:00
|
|
|
t.Fatalf("unable to append to revocation log: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Once again, fetch the state and ensure it has been properly updated.
|
2017-11-10 08:00:35 +03:00
|
|
|
prevCommit, err := channel.FindPreviousState(oldRemoteCommit.CommitHeight)
|
2017-02-21 08:58:34 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch past delta: %v", err)
|
|
|
|
}
|
2017-11-10 08:00:35 +03:00
|
|
|
assertCommitmentEqual(t, &oldRemoteCommit, prevCommit)
|
2017-02-21 08:58:34 +03:00
|
|
|
|
2017-03-25 02:07:34 +03:00
|
|
|
// Once again, state number recovered from the tail of the revocation
|
|
|
|
// log should be identical to this current state.
|
|
|
|
logTail, err = channel.RevocationLogTail()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to retrieve log: %v", err)
|
|
|
|
}
|
2017-11-10 08:00:35 +03:00
|
|
|
if logTail.CommitHeight != oldRemoteCommit.CommitHeight {
|
2017-03-25 02:07:34 +03:00
|
|
|
t.Fatal("update number doesn't match")
|
|
|
|
}
|
|
|
|
|
2016-09-07 05:17:34 +03:00
|
|
|
// The revocation state stored on-disk should now also be identical.
|
2016-10-26 02:11:23 +03:00
|
|
|
updatedChannel, err = cdb.FetchOpenChannels(channel.IdentityPub)
|
2016-09-07 05:17:34 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch updated channel: %v", err)
|
|
|
|
}
|
2017-07-29 21:24:28 +03:00
|
|
|
if !channel.RemoteCurrentRevocation.IsEqual(updatedChannel[0].RemoteCurrentRevocation) {
|
|
|
|
t.Fatalf("revocation state was not synced")
|
|
|
|
}
|
|
|
|
if !channel.RemoteNextRevocation.IsEqual(updatedChannel[0].RemoteNextRevocation) {
|
|
|
|
t.Fatalf("revocation state was not synced")
|
2016-09-07 05:17:34 +03:00
|
|
|
}
|
2017-02-08 03:27:53 +03:00
|
|
|
|
|
|
|
// Now attempt to delete the channel from the database.
|
2017-05-05 01:21:56 +03:00
|
|
|
closeSummary := &ChannelCloseSummary{
|
2017-07-29 21:24:28 +03:00
|
|
|
ChanPoint: channel.FundingOutpoint,
|
2017-05-15 05:02:59 +03:00
|
|
|
RemotePub: channel.IdentityPub,
|
|
|
|
SettledBalance: btcutil.Amount(500),
|
|
|
|
TimeLockedBalance: btcutil.Amount(10000),
|
|
|
|
IsPending: false,
|
2018-04-04 09:57:55 +03:00
|
|
|
CloseType: RemoteForceClose,
|
2017-05-05 01:21:56 +03:00
|
|
|
}
|
|
|
|
if err := updatedChannel[0].CloseChannel(closeSummary); err != nil {
|
2017-02-08 03:27:53 +03:00
|
|
|
t.Fatalf("unable to delete updated channel: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we attempt to fetch the target channel again, it shouldn't be
|
|
|
|
// found.
|
|
|
|
channels, err := cdb.FetchOpenChannels(channel.IdentityPub)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch updated channels: %v", err)
|
|
|
|
}
|
|
|
|
if len(channels) != 0 {
|
|
|
|
t.Fatalf("%v channels, found, but none should be",
|
|
|
|
len(channels))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempting to find previous states on the channel should fail as the
|
|
|
|
// revocation log has been deleted.
|
2017-11-10 08:00:35 +03:00
|
|
|
_, err = updatedChannel[0].FindPreviousState(oldRemoteCommit.CommitHeight)
|
2017-02-08 03:27:53 +03:00
|
|
|
if err == nil {
|
2018-02-07 06:11:11 +03:00
|
|
|
t.Fatal("revocation log search should have failed")
|
2017-02-08 03:27:53 +03:00
|
|
|
}
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2017-01-23 10:31:01 +03:00
|
|
|
|
|
|
|
func TestFetchPendingChannels(t *testing.T) {
|
2017-06-17 01:59:20 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2017-01-23 10:31:01 +03:00
|
|
|
cdb, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
2018-02-07 06:11:11 +03:00
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
2017-01-23 10:31:01 +03:00
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
2020-02-05 16:39:31 +03:00
|
|
|
// Create a pending channel that was broadcast at height 99.
|
2017-06-06 01:02:27 +03:00
|
|
|
const broadcastHeight = 99
|
2020-02-05 16:39:31 +03:00
|
|
|
createTestChannel(t, cdb, pendingHeightOption(broadcastHeight))
|
2017-01-23 10:31:01 +03:00
|
|
|
|
|
|
|
pendingChannels, err := cdb.FetchPendingChannels()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to list pending channels: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(pendingChannels) != 1 {
|
|
|
|
t.Fatalf("incorrect number of pending channels: expecting %v,"+
|
|
|
|
"got %v", 1, len(pendingChannels))
|
|
|
|
}
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// The broadcast height of the pending channel should have been set
|
2017-06-06 01:02:27 +03:00
|
|
|
// properly.
|
|
|
|
if pendingChannels[0].FundingBroadcastHeight != broadcastHeight {
|
|
|
|
t.Fatalf("broadcast height mismatch: expected %v, got %v",
|
|
|
|
pendingChannels[0].FundingBroadcastHeight,
|
|
|
|
broadcastHeight)
|
|
|
|
}
|
|
|
|
|
2017-06-16 23:28:26 +03:00
|
|
|
chanOpenLoc := lnwire.ShortChannelID{
|
|
|
|
BlockHeight: 5,
|
|
|
|
TxIndex: 10,
|
|
|
|
TxPosition: 15,
|
|
|
|
}
|
2017-11-10 07:56:04 +03:00
|
|
|
err = pendingChannels[0].MarkAsOpen(chanOpenLoc)
|
2017-05-11 02:44:40 +03:00
|
|
|
if err != nil {
|
2017-01-23 10:31:01 +03:00
|
|
|
t.Fatalf("unable to mark channel as open: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-03-28 03:07:15 +03:00
|
|
|
if pendingChannels[0].IsPending {
|
|
|
|
t.Fatalf("channel marked open should no longer be pending")
|
|
|
|
}
|
|
|
|
|
2018-05-02 02:27:20 +03:00
|
|
|
if pendingChannels[0].ShortChanID() != chanOpenLoc {
|
2018-03-28 03:07:15 +03:00
|
|
|
t.Fatalf("channel opening height not updated: expected %v, "+
|
2018-05-02 02:27:20 +03:00
|
|
|
"got %v", spew.Sdump(pendingChannels[0].ShortChanID()),
|
2018-03-28 03:07:15 +03:00
|
|
|
chanOpenLoc)
|
|
|
|
}
|
|
|
|
|
2017-05-11 02:44:40 +03:00
|
|
|
// Next, we'll re-fetch the channel to ensure that the open height was
|
|
|
|
// properly set.
|
|
|
|
openChans, err := cdb.FetchAllChannels()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch channels: %v", err)
|
|
|
|
}
|
2018-05-02 02:27:20 +03:00
|
|
|
if openChans[0].ShortChanID() != chanOpenLoc {
|
2017-05-11 02:44:40 +03:00
|
|
|
t.Fatalf("channel opening heights don't match: expected %v, "+
|
2018-05-02 02:27:20 +03:00
|
|
|
"got %v", spew.Sdump(openChans[0].ShortChanID()),
|
2017-06-16 23:28:26 +03:00
|
|
|
chanOpenLoc)
|
2017-05-11 02:44:40 +03:00
|
|
|
}
|
2017-06-06 01:02:27 +03:00
|
|
|
if openChans[0].FundingBroadcastHeight != broadcastHeight {
|
|
|
|
t.Fatalf("broadcast height mismatch: expected %v, got %v",
|
|
|
|
openChans[0].FundingBroadcastHeight,
|
|
|
|
broadcastHeight)
|
|
|
|
}
|
2017-05-11 02:44:40 +03:00
|
|
|
|
2017-01-23 10:31:01 +03:00
|
|
|
pendingChannels, err = cdb.FetchPendingChannels()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to list pending channels: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(pendingChannels) != 0 {
|
|
|
|
t.Fatalf("incorrect number of pending channels: expecting %v,"+
|
|
|
|
"got %v", 0, len(pendingChannels))
|
|
|
|
}
|
|
|
|
}
|
2017-05-05 01:21:56 +03:00
|
|
|
|
|
|
|
func TestFetchClosedChannels(t *testing.T) {
|
2017-06-17 01:59:20 +03:00
|
|
|
t.Parallel()
|
|
|
|
|
2017-05-05 01:21:56 +03:00
|
|
|
cdb, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
2020-02-05 16:39:31 +03:00
|
|
|
// Create an open channel in the database.
|
|
|
|
state := createTestChannel(t, cdb, openChannelOption())
|
2017-05-05 01:21:56 +03:00
|
|
|
|
|
|
|
// Next, close the channel by including a close channel summary in the
|
|
|
|
// database.
|
|
|
|
summary := &ChannelCloseSummary{
|
2017-07-29 21:24:28 +03:00
|
|
|
ChanPoint: state.FundingOutpoint,
|
2017-05-15 05:02:59 +03:00
|
|
|
ClosingTXID: rev,
|
|
|
|
RemotePub: state.IdentityPub,
|
|
|
|
Capacity: state.Capacity,
|
2017-11-10 07:56:04 +03:00
|
|
|
SettledBalance: state.LocalCommitment.LocalBalance.ToSatoshis(),
|
|
|
|
TimeLockedBalance: state.RemoteCommitment.LocalBalance.ToSatoshis() + 10000,
|
2018-04-04 09:57:55 +03:00
|
|
|
CloseType: RemoteForceClose,
|
2017-05-15 05:02:59 +03:00
|
|
|
IsPending: true,
|
2018-08-14 05:21:37 +03:00
|
|
|
LocalChanConfig: state.LocalChanCfg,
|
2017-05-05 01:21:56 +03:00
|
|
|
}
|
|
|
|
if err := state.CloseChannel(summary); err != nil {
|
|
|
|
t.Fatalf("unable to close channel: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query the database to ensure that the channel has now been properly
|
|
|
|
// closed. We should get the same result whether querying for pending
|
|
|
|
// channels only, or not.
|
|
|
|
pendingClosed, err := cdb.FetchClosedChannels(true)
|
|
|
|
if err != nil {
|
2018-02-07 06:11:11 +03:00
|
|
|
t.Fatalf("failed fetching closed channels: %v", err)
|
2017-05-05 01:21:56 +03:00
|
|
|
}
|
|
|
|
if len(pendingClosed) != 1 {
|
|
|
|
t.Fatalf("incorrect number of pending closed channels: expecting %v,"+
|
|
|
|
"got %v", 1, len(pendingClosed))
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(summary, pendingClosed[0]) {
|
|
|
|
t.Fatalf("database summaries don't match: expected %v got %v",
|
|
|
|
spew.Sdump(summary), spew.Sdump(pendingClosed[0]))
|
|
|
|
}
|
|
|
|
closed, err := cdb.FetchClosedChannels(false)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed fetching all closed channels: %v", err)
|
|
|
|
}
|
|
|
|
if len(closed) != 1 {
|
|
|
|
t.Fatalf("incorrect number of closed channels: expecting %v, "+
|
|
|
|
"got %v", 1, len(closed))
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(summary, closed[0]) {
|
|
|
|
t.Fatalf("database summaries don't match: expected %v got %v",
|
|
|
|
spew.Sdump(summary), spew.Sdump(closed[0]))
|
|
|
|
}
|
|
|
|
|
2017-11-10 07:56:04 +03:00
|
|
|
// Mark the channel as fully closed.
|
2017-07-29 21:24:28 +03:00
|
|
|
err = cdb.MarkChanFullyClosed(&state.FundingOutpoint)
|
|
|
|
if err != nil {
|
2017-05-05 01:21:56 +03:00
|
|
|
t.Fatalf("failed fully closing channel: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The channel should no longer be considered pending, but should still
|
|
|
|
// be retrieved when fetching all the closed channels.
|
|
|
|
closed, err = cdb.FetchClosedChannels(false)
|
|
|
|
if err != nil {
|
2018-02-07 06:11:11 +03:00
|
|
|
t.Fatalf("failed fetching closed channels: %v", err)
|
2017-05-05 01:21:56 +03:00
|
|
|
}
|
|
|
|
if len(closed) != 1 {
|
|
|
|
t.Fatalf("incorrect number of closed channels: expecting %v, "+
|
|
|
|
"got %v", 1, len(closed))
|
|
|
|
}
|
|
|
|
pendingClose, err := cdb.FetchClosedChannels(true)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed fetching channels pending close: %v", err)
|
|
|
|
}
|
|
|
|
if len(pendingClose) != 0 {
|
|
|
|
t.Fatalf("incorrect number of closed channels: expecting %v, "+
|
|
|
|
"got %v", 0, len(closed))
|
|
|
|
}
|
|
|
|
}
|
2018-05-02 02:55:22 +03:00
|
|
|
|
2019-01-04 04:33:34 +03:00
|
|
|
// TestFetchWaitingCloseChannels ensures that the correct channels that are
|
|
|
|
// waiting to be closed are returned.
|
|
|
|
func TestFetchWaitingCloseChannels(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
const numChannels = 2
|
|
|
|
const broadcastHeight = 99
|
|
|
|
|
|
|
|
// We'll start by creating two channels within our test database. One of
|
|
|
|
// them will have their funding transaction confirmed on-chain, while
|
|
|
|
// the other one will remain unconfirmed.
|
|
|
|
db, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
channels := make([]*OpenChannel, numChannels)
|
|
|
|
for i := 0; i < numChannels; i++ {
|
2020-02-05 16:39:31 +03:00
|
|
|
// Create a pending channel in the database at the broadcast
|
|
|
|
// height.
|
|
|
|
channels[i] = createTestChannel(
|
|
|
|
t, db, pendingHeightOption(broadcastHeight),
|
|
|
|
)
|
2019-01-04 04:33:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll only confirm the first one.
|
|
|
|
channelConf := lnwire.ShortChannelID{
|
|
|
|
BlockHeight: broadcastHeight + 1,
|
|
|
|
TxIndex: 10,
|
|
|
|
TxPosition: 15,
|
|
|
|
}
|
|
|
|
if err := channels[0].MarkAsOpen(channelConf); err != nil {
|
|
|
|
t.Fatalf("unable to mark channel as open: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then, we'll mark the channels as if their commitments were broadcast.
|
|
|
|
// This would happen in the event of a force close and should make the
|
|
|
|
// channels enter a state of waiting close.
|
|
|
|
for _, channel := range channels {
|
2019-09-06 14:14:39 +03:00
|
|
|
closeTx := wire.NewMsgTx(2)
|
|
|
|
closeTx.AddTxIn(
|
|
|
|
&wire.TxIn{
|
|
|
|
PreviousOutPoint: channel.FundingOutpoint,
|
|
|
|
},
|
|
|
|
)
|
2019-12-05 00:30:46 +03:00
|
|
|
|
2020-02-21 14:24:23 +03:00
|
|
|
if err := channel.MarkCommitmentBroadcasted(closeTx, true); err != nil {
|
2019-01-04 04:33:34 +03:00
|
|
|
t.Fatalf("unable to mark commitment broadcast: %v", err)
|
|
|
|
}
|
2019-12-05 00:29:30 +03:00
|
|
|
|
2019-12-05 00:30:46 +03:00
|
|
|
// Now try to marking a coop close with a nil tx. This should
|
|
|
|
// succeed, but it shouldn't exit when queried.
|
2020-02-21 14:24:23 +03:00
|
|
|
if err = channel.MarkCoopBroadcasted(nil, true); err != nil {
|
2019-12-05 00:30:46 +03:00
|
|
|
t.Fatalf("unable to mark nil coop broadcast: %v", err)
|
|
|
|
}
|
|
|
|
_, err := channel.BroadcastedCooperative()
|
|
|
|
if err != ErrNoCloseTx {
|
|
|
|
t.Fatalf("expected no closing tx error, got: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, modify the close tx deterministically and also mark
|
|
|
|
// it as coop closed. Later we will test that distinct
|
|
|
|
// transactions are returned for both coop and force closes.
|
2019-12-05 00:29:30 +03:00
|
|
|
closeTx.TxIn[0].PreviousOutPoint.Index ^= 1
|
2020-02-21 14:24:23 +03:00
|
|
|
if err := channel.MarkCoopBroadcasted(closeTx, true); err != nil {
|
2019-12-05 00:29:30 +03:00
|
|
|
t.Fatalf("unable to mark coop broadcast: %v", err)
|
|
|
|
}
|
2019-01-04 04:33:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now, we'll fetch all the channels waiting to be closed from the
|
|
|
|
// database. We should expect to see both channels above, even if any of
|
|
|
|
// them haven't had their funding transaction confirm on-chain.
|
|
|
|
waitingCloseChannels, err := db.FetchWaitingCloseChannels()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch all waiting close channels: %v", err)
|
|
|
|
}
|
2019-12-05 00:29:30 +03:00
|
|
|
if len(waitingCloseChannels) != numChannels {
|
2019-01-04 04:33:34 +03:00
|
|
|
t.Fatalf("expected %d channels waiting to be closed, got %d", 2,
|
|
|
|
len(waitingCloseChannels))
|
|
|
|
}
|
|
|
|
expectedChannels := make(map[wire.OutPoint]struct{})
|
|
|
|
for _, channel := range channels {
|
|
|
|
expectedChannels[channel.FundingOutpoint] = struct{}{}
|
|
|
|
}
|
|
|
|
for _, channel := range waitingCloseChannels {
|
|
|
|
if _, ok := expectedChannels[channel.FundingOutpoint]; !ok {
|
|
|
|
t.Fatalf("expected channel %v to be waiting close",
|
|
|
|
channel.FundingOutpoint)
|
|
|
|
}
|
2019-09-06 14:14:39 +03:00
|
|
|
|
2019-12-05 00:29:30 +03:00
|
|
|
chanPoint := channel.FundingOutpoint
|
|
|
|
|
|
|
|
// Assert that the force close transaction is retrievable.
|
|
|
|
forceCloseTx, err := channel.BroadcastedCommitment()
|
2019-09-06 14:14:39 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Unable to retrieve commitment: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-12-05 00:29:30 +03:00
|
|
|
if forceCloseTx.TxIn[0].PreviousOutPoint != chanPoint {
|
|
|
|
t.Fatalf("expected outpoint %v, got %v",
|
|
|
|
chanPoint,
|
|
|
|
forceCloseTx.TxIn[0].PreviousOutPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert that the coop close transaction is retrievable.
|
|
|
|
coopCloseTx, err := channel.BroadcastedCooperative()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to retrieve coop close: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chanPoint.Index ^= 1
|
|
|
|
if coopCloseTx.TxIn[0].PreviousOutPoint != chanPoint {
|
2019-09-06 14:14:39 +03:00
|
|
|
t.Fatalf("expected outpoint %v, got %v",
|
2019-12-05 00:29:30 +03:00
|
|
|
chanPoint,
|
|
|
|
coopCloseTx.TxIn[0].PreviousOutPoint)
|
2019-09-06 14:14:39 +03:00
|
|
|
}
|
2019-01-04 04:33:34 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-02 02:55:22 +03:00
|
|
|
// TestRefreshShortChanID asserts that RefreshShortChanID updates the in-memory
|
2019-11-05 10:57:38 +03:00
|
|
|
// state of another OpenChannel to reflect a preceding call to MarkOpen on a
|
|
|
|
// different OpenChannel.
|
2018-05-02 02:55:22 +03:00
|
|
|
func TestRefreshShortChanID(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
cdb, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// First create a test channel.
|
2020-02-05 16:39:31 +03:00
|
|
|
state := createTestChannel(t, cdb)
|
2018-05-02 02:55:22 +03:00
|
|
|
|
|
|
|
// Next, locate the pending channel with the database.
|
|
|
|
pendingChannels, err := cdb.FetchPendingChannels()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to load pending channels; %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var pendingChannel *OpenChannel
|
|
|
|
for _, channel := range pendingChannels {
|
|
|
|
if channel.FundingOutpoint == state.FundingOutpoint {
|
|
|
|
pendingChannel = channel
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if pendingChannel == nil {
|
|
|
|
t.Fatalf("unable to find pending channel with funding "+
|
|
|
|
"outpoint=%v: %v", state.FundingOutpoint, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, simulate the confirmation of the channel by marking it as
|
|
|
|
// pending within the database.
|
|
|
|
chanOpenLoc := lnwire.ShortChannelID{
|
|
|
|
BlockHeight: 105,
|
|
|
|
TxIndex: 10,
|
|
|
|
TxPosition: 15,
|
|
|
|
}
|
|
|
|
|
|
|
|
err = state.MarkAsOpen(chanOpenLoc)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to mark channel open: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The short_chan_id of the receiver to MarkAsOpen should reflect the
|
|
|
|
// open location, but the other pending channel should remain unchanged.
|
|
|
|
if state.ShortChanID() == pendingChannel.ShortChanID() {
|
|
|
|
t.Fatalf("pending channel short_chan_ID should not have been " +
|
|
|
|
"updated before refreshing short_chan_id")
|
|
|
|
}
|
|
|
|
|
2018-06-19 15:36:12 +03:00
|
|
|
// Now that the receiver's short channel id has been updated, check to
|
|
|
|
// ensure that the channel packager's source has been updated as well.
|
|
|
|
// This ensures that the packager will read and write to buckets
|
|
|
|
// corresponding to the new short chan id, instead of the prior.
|
|
|
|
if state.Packager.(*ChannelPackager).source != chanOpenLoc {
|
|
|
|
t.Fatalf("channel packager source was not updated: want %v, "+
|
|
|
|
"got %v", chanOpenLoc,
|
|
|
|
state.Packager.(*ChannelPackager).source)
|
|
|
|
}
|
|
|
|
|
2018-05-02 02:55:22 +03:00
|
|
|
// Now, refresh the short channel ID of the pending channel.
|
|
|
|
err = pendingChannel.RefreshShortChanID()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to refresh short_chan_id: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This should result in both OpenChannel's now having the same
|
|
|
|
// ShortChanID.
|
|
|
|
if state.ShortChanID() != pendingChannel.ShortChanID() {
|
|
|
|
t.Fatalf("expected pending channel short_chan_id to be "+
|
|
|
|
"refreshed: want %v, got %v", state.ShortChanID(),
|
|
|
|
pendingChannel.ShortChanID())
|
|
|
|
}
|
2018-06-19 15:36:12 +03:00
|
|
|
|
|
|
|
// Check to ensure that the _other_ OpenChannel channel packager's
|
|
|
|
// source has also been updated after the refresh. This ensures that the
|
|
|
|
// other packagers will read and write to buckets corresponding to the
|
|
|
|
// updated short chan id.
|
|
|
|
if pendingChannel.Packager.(*ChannelPackager).source != chanOpenLoc {
|
|
|
|
t.Fatalf("channel packager source was not updated: want %v, "+
|
|
|
|
"got %v", chanOpenLoc,
|
|
|
|
pendingChannel.Packager.(*ChannelPackager).source)
|
|
|
|
}
|
2019-11-05 10:57:38 +03:00
|
|
|
|
|
|
|
// Check to ensure that this channel is no longer pending and this field
|
|
|
|
// is up to date.
|
|
|
|
if pendingChannel.IsPending {
|
|
|
|
t.Fatalf("channel pending state wasn't updated: want false got true")
|
|
|
|
}
|
2018-05-02 02:55:22 +03:00
|
|
|
}
|
2020-02-21 14:24:23 +03:00
|
|
|
|
|
|
|
// TestCloseInitiator tests the setting of close initiator statuses for
|
|
|
|
// cooperative closes and local force closes.
|
|
|
|
func TestCloseInitiator(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
// updateChannel is called to update the channel as broadcast,
|
|
|
|
// cooperatively or not, based on the test's requirements.
|
|
|
|
updateChannel func(c *OpenChannel) error
|
|
|
|
expectedStatuses []ChannelStatus
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "local coop close",
|
|
|
|
// Mark the channel as cooperatively closed, initiated
|
|
|
|
// by the local party.
|
|
|
|
updateChannel: func(c *OpenChannel) error {
|
|
|
|
return c.MarkCoopBroadcasted(
|
|
|
|
&wire.MsgTx{}, true,
|
|
|
|
)
|
|
|
|
},
|
|
|
|
expectedStatuses: []ChannelStatus{
|
|
|
|
ChanStatusLocalCloseInitiator,
|
|
|
|
ChanStatusCoopBroadcasted,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "remote coop close",
|
|
|
|
// Mark the channel as cooperatively closed, initiated
|
|
|
|
// by the remote party.
|
|
|
|
updateChannel: func(c *OpenChannel) error {
|
|
|
|
return c.MarkCoopBroadcasted(
|
|
|
|
&wire.MsgTx{}, false,
|
|
|
|
)
|
|
|
|
},
|
|
|
|
expectedStatuses: []ChannelStatus{
|
|
|
|
ChanStatusRemoteCloseInitiator,
|
|
|
|
ChanStatusCoopBroadcasted,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "local force close",
|
|
|
|
// Mark the channel's commitment as broadcast with
|
|
|
|
// local initiator.
|
|
|
|
updateChannel: func(c *OpenChannel) error {
|
|
|
|
return c.MarkCommitmentBroadcasted(
|
|
|
|
&wire.MsgTx{}, true,
|
|
|
|
)
|
|
|
|
},
|
|
|
|
expectedStatuses: []ChannelStatus{
|
|
|
|
ChanStatusLocalCloseInitiator,
|
|
|
|
ChanStatusCommitBroadcasted,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
test := test
|
|
|
|
|
|
|
|
t.Run(test.name, func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
cdb, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// Create an open channel.
|
|
|
|
channel := createTestChannel(
|
|
|
|
t, cdb, openChannelOption(),
|
|
|
|
)
|
|
|
|
|
|
|
|
err = test.updateChannel(channel)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Lookup open channels in the database.
|
|
|
|
dbChans, err := fetchChannels(
|
|
|
|
cdb, pendingChannelFilter(false),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
if len(dbChans) != 1 {
|
|
|
|
t.Fatalf("expected 1 channel, got: %v",
|
|
|
|
len(dbChans))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the statuses that we expect were written
|
|
|
|
// to disk.
|
|
|
|
for _, status := range test.expectedStatuses {
|
|
|
|
if !dbChans[0].HasChanStatus(status) {
|
|
|
|
t.Fatalf("expected channel to have "+
|
|
|
|
"status: %v, has status: %v",
|
|
|
|
status, dbChans[0].chanStatus)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2020-02-21 14:24:23 +03:00
|
|
|
|
|
|
|
// TestCloseChannelStatus tests setting of a channel status on the historical
|
|
|
|
// channel on channel close.
|
|
|
|
func TestCloseChannelStatus(t *testing.T) {
|
|
|
|
cdb, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// Create an open channel.
|
|
|
|
channel := createTestChannel(
|
|
|
|
t, cdb, openChannelOption(),
|
|
|
|
)
|
|
|
|
|
|
|
|
if err := channel.CloseChannel(
|
|
|
|
&ChannelCloseSummary{
|
|
|
|
ChanPoint: channel.FundingOutpoint,
|
|
|
|
RemotePub: channel.IdentityPub,
|
|
|
|
}, ChanStatusRemoteCloseInitiator,
|
|
|
|
); err != nil {
|
|
|
|
t.Fatalf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
histChan, err := channel.Db.FetchHistoricalChannel(
|
|
|
|
&channel.FundingOutpoint,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !histChan.HasChanStatus(ChanStatusRemoteCloseInitiator) {
|
|
|
|
t.Fatalf("channel should have status")
|
|
|
|
}
|
|
|
|
}
|
2020-03-19 12:00:53 +03:00
|
|
|
|
|
|
|
// TestBalanceAtHeight tests lookup of our local and remote balance at a given
|
|
|
|
// height.
|
|
|
|
func TestBalanceAtHeight(t *testing.T) {
|
|
|
|
const (
|
|
|
|
// Values that will be set on our current local commit in
|
|
|
|
// memory.
|
|
|
|
localHeight = 2
|
|
|
|
localLocalBalance = 1000
|
|
|
|
localRemoteBalance = 1500
|
|
|
|
|
|
|
|
// Values that will be set on our current remote commit in
|
|
|
|
// memory.
|
|
|
|
remoteHeight = 3
|
|
|
|
remoteLocalBalance = 2000
|
|
|
|
remoteRemoteBalance = 2500
|
|
|
|
|
|
|
|
// Values that will be written to disk in the revocation log.
|
|
|
|
oldHeight = 0
|
|
|
|
oldLocalBalance = 200
|
|
|
|
oldRemoteBalance = 300
|
|
|
|
|
|
|
|
// Heights to test error cases.
|
|
|
|
unknownHeight = 1
|
|
|
|
unreachedHeight = 4
|
|
|
|
)
|
|
|
|
|
|
|
|
// putRevokedState is a helper function used to put commitments is
|
|
|
|
// the revocation log bucket to test lookup of balances at heights that
|
|
|
|
// are not our current height.
|
|
|
|
putRevokedState := func(c *OpenChannel, height uint64, local,
|
|
|
|
remote lnwire.MilliSatoshi) error {
|
|
|
|
|
|
|
|
err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
|
|
|
|
chanBucket, err := fetchChanBucketRw(
|
|
|
|
tx, c.IdentityPub, &c.FundingOutpoint,
|
|
|
|
c.ChainHash,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
logKey := revocationLogBucket
|
|
|
|
logBucket, err := chanBucket.CreateBucketIfNotExists(
|
|
|
|
logKey,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make a copy of our current commitment so we do not
|
|
|
|
// need to re-fill all the required fields and copy in
|
|
|
|
// our new desired values.
|
|
|
|
commit := c.LocalCommitment
|
|
|
|
commit.CommitHeight = height
|
|
|
|
commit.LocalBalance = local
|
|
|
|
commit.RemoteBalance = remote
|
|
|
|
|
|
|
|
return appendChannelLogEntry(logBucket, &commit)
|
|
|
|
})
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
targetHeight uint64
|
|
|
|
expectedLocalBalance lnwire.MilliSatoshi
|
|
|
|
expectedRemoteBalance lnwire.MilliSatoshi
|
|
|
|
expectedError error
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "target is current local height",
|
|
|
|
targetHeight: localHeight,
|
|
|
|
expectedLocalBalance: localLocalBalance,
|
|
|
|
expectedRemoteBalance: localRemoteBalance,
|
|
|
|
expectedError: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "target is current remote height",
|
|
|
|
targetHeight: remoteHeight,
|
|
|
|
expectedLocalBalance: remoteLocalBalance,
|
|
|
|
expectedRemoteBalance: remoteRemoteBalance,
|
|
|
|
expectedError: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "need to lookup commit",
|
|
|
|
targetHeight: oldHeight,
|
|
|
|
expectedLocalBalance: oldLocalBalance,
|
|
|
|
expectedRemoteBalance: oldRemoteBalance,
|
|
|
|
expectedError: nil,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "height not found",
|
|
|
|
targetHeight: unknownHeight,
|
|
|
|
expectedLocalBalance: 0,
|
|
|
|
expectedRemoteBalance: 0,
|
|
|
|
expectedError: errLogEntryNotFound,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "height not reached",
|
|
|
|
targetHeight: unreachedHeight,
|
|
|
|
expectedLocalBalance: 0,
|
|
|
|
expectedRemoteBalance: 0,
|
|
|
|
expectedError: errHeightNotReached,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
test := test
|
|
|
|
|
|
|
|
t.Run(test.name, func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
cdb, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// Create options to set the heights and balances of
|
|
|
|
// our local and remote commitments.
|
|
|
|
localCommitOpt := channelCommitmentOption(
|
|
|
|
localHeight, localLocalBalance,
|
|
|
|
localRemoteBalance, true,
|
|
|
|
)
|
|
|
|
|
|
|
|
remoteCommitOpt := channelCommitmentOption(
|
|
|
|
remoteHeight, remoteLocalBalance,
|
|
|
|
remoteRemoteBalance, false,
|
|
|
|
)
|
|
|
|
|
|
|
|
// Create an open channel.
|
|
|
|
channel := createTestChannel(
|
|
|
|
t, cdb, openChannelOption(),
|
|
|
|
localCommitOpt, remoteCommitOpt,
|
|
|
|
)
|
|
|
|
|
|
|
|
// Write an older commit to disk.
|
|
|
|
err = putRevokedState(channel, oldHeight,
|
|
|
|
oldLocalBalance, oldRemoteBalance)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
local, remote, err := channel.BalancesAtHeight(
|
|
|
|
test.targetHeight,
|
|
|
|
)
|
|
|
|
if err != test.expectedError {
|
|
|
|
t.Fatalf("expected: %v, got: %v",
|
|
|
|
test.expectedError, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if local != test.expectedLocalBalance {
|
|
|
|
t.Fatalf("expected local: %v, got: %v",
|
|
|
|
test.expectedLocalBalance, local)
|
|
|
|
}
|
|
|
|
|
|
|
|
if remote != test.expectedRemoteBalance {
|
|
|
|
t.Fatalf("expected remote: %v, got: %v",
|
|
|
|
test.expectedRemoteBalance, remote)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2020-04-11 02:01:21 +03:00
|
|
|
|
|
|
|
// TestHasChanStatus asserts the behavior of HasChanStatus by checking the
|
|
|
|
// behavior of various status flags in addition to the special case of
|
|
|
|
// ChanStatusDefault which is treated like a flag in the code base even though
|
|
|
|
// it isn't.
|
|
|
|
func TestHasChanStatus(t *testing.T) {
|
|
|
|
tests := []struct {
|
|
|
|
name string
|
|
|
|
status ChannelStatus
|
|
|
|
expHas map[ChannelStatus]bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "default",
|
|
|
|
status: ChanStatusDefault,
|
|
|
|
expHas: map[ChannelStatus]bool{
|
|
|
|
ChanStatusDefault: true,
|
|
|
|
ChanStatusBorked: false,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "single flag",
|
|
|
|
status: ChanStatusBorked,
|
|
|
|
expHas: map[ChannelStatus]bool{
|
|
|
|
ChanStatusDefault: false,
|
|
|
|
ChanStatusBorked: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "multiple flags",
|
|
|
|
status: ChanStatusBorked | ChanStatusLocalDataLoss,
|
|
|
|
expHas: map[ChannelStatus]bool{
|
|
|
|
ChanStatusDefault: false,
|
|
|
|
ChanStatusBorked: true,
|
|
|
|
ChanStatusLocalDataLoss: true,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
test := test
|
|
|
|
|
|
|
|
t.Run(test.name, func(t *testing.T) {
|
|
|
|
c := &OpenChannel{
|
|
|
|
chanStatus: test.status,
|
|
|
|
}
|
|
|
|
|
|
|
|
for status, expHas := range test.expHas {
|
|
|
|
has := c.HasChanStatus(status)
|
|
|
|
if has == expHas {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Fatalf("expected chan status to "+
|
|
|
|
"have %s? %t, got: %t",
|
|
|
|
status, expHas, has)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|