2015-12-26 21:35:15 +03:00
|
|
|
package channeldb
|
2015-12-26 02:00:53 +03:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"io/ioutil"
|
2017-01-23 10:31:01 +03:00
|
|
|
"net"
|
2015-12-26 02:00:53 +03:00
|
|
|
"os"
|
2016-06-21 07:39:50 +03:00
|
|
|
"reflect"
|
2015-12-26 02:00:53 +03:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2016-09-03 04:51:34 +03:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
2016-12-14 17:01:48 +03:00
|
|
|
"github.com/lightningnetwork/lnd/shachain"
|
2016-05-15 17:17:44 +03:00
|
|
|
"github.com/roasbeef/btcd/btcec"
|
|
|
|
"github.com/roasbeef/btcd/chaincfg"
|
2017-01-06 00:56:27 +03:00
|
|
|
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
2016-05-15 17:17:44 +03:00
|
|
|
"github.com/roasbeef/btcd/txscript"
|
|
|
|
"github.com/roasbeef/btcd/wire"
|
|
|
|
"github.com/roasbeef/btcutil"
|
2016-06-21 07:39:50 +03:00
|
|
|
_ "github.com/roasbeef/btcwallet/walletdb/bdb"
|
2015-12-26 02:00:53 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2017-01-06 00:56:27 +03:00
|
|
|
netParams = &chaincfg.TestNet3Params
|
2016-04-24 22:35:52 +03:00
|
|
|
|
2017-01-06 00:56:27 +03:00
|
|
|
key = [chainhash.HashSize]byte{
|
2015-12-26 02:00:53 +03:00
|
|
|
0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x68, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0xd, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
|
|
|
|
0x1e, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
|
|
|
|
}
|
2016-06-21 07:39:50 +03:00
|
|
|
id = &wire.OutPoint{
|
2017-01-06 00:56:27 +03:00
|
|
|
Hash: [chainhash.HashSize]byte{
|
2016-06-21 07:39:50 +03:00
|
|
|
0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0x2d, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
|
|
|
|
0x1f, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
|
|
|
|
},
|
|
|
|
Index: 9,
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2017-01-06 00:56:27 +03:00
|
|
|
rev = [chainhash.HashSize]byte{
|
2015-12-31 09:28:00 +03:00
|
|
|
0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0x2d, 0xe7, 0x93, 0xe4,
|
|
|
|
}
|
2015-12-26 02:00:53 +03:00
|
|
|
testTx = &wire.MsgTx{
|
|
|
|
Version: 1,
|
|
|
|
TxIn: []*wire.TxIn{
|
2017-02-23 22:02:08 +03:00
|
|
|
{
|
2015-12-26 02:00:53 +03:00
|
|
|
PreviousOutPoint: wire.OutPoint{
|
2017-01-06 00:56:27 +03:00
|
|
|
Hash: chainhash.Hash{},
|
2015-12-26 02:00:53 +03:00
|
|
|
Index: 0xffffffff,
|
|
|
|
},
|
|
|
|
SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62},
|
|
|
|
Sequence: 0xffffffff,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
TxOut: []*wire.TxOut{
|
2017-02-23 22:02:08 +03:00
|
|
|
{
|
2015-12-26 02:00:53 +03:00
|
|
|
Value: 5000000000,
|
|
|
|
PkScript: []byte{
|
|
|
|
0x41, // OP_DATA_65
|
|
|
|
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
|
|
|
|
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
|
|
|
|
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
|
|
|
|
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
|
|
|
|
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
|
|
|
|
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
|
|
|
|
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
|
|
|
|
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
|
|
|
|
0xa6, // 65-byte signature
|
|
|
|
0xac, // OP_CHECKSIG
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
LockTime: 5,
|
|
|
|
}
|
2016-06-21 07:39:50 +03:00
|
|
|
testOutpoint = &wire.OutPoint{
|
|
|
|
Hash: key,
|
|
|
|
Index: 0,
|
|
|
|
}
|
2016-09-03 04:51:34 +03:00
|
|
|
privKey, pubKey = btcec.PrivKeyFromBytes(btcec.S256(), key[:])
|
2015-12-26 02:00:53 +03:00
|
|
|
)
|
|
|
|
|
2016-09-03 04:51:34 +03:00
|
|
|
// makeTestDB creates a new instance of the ChannelDB for testing purposes. A
|
|
|
|
// callback which cleans up the created temporary directories is also returned
|
|
|
|
// and intended to be executed after the test completes.
|
|
|
|
func makeTestDB() (*DB, func(), error) {
|
2016-03-24 08:39:52 +03:00
|
|
|
// First, create a temporary directory to be used for the duration of
|
|
|
|
// this test.
|
|
|
|
tempDirName, err := ioutil.TempDir("", "channeldb")
|
2015-12-26 02:00:53 +03:00
|
|
|
if err != nil {
|
2016-09-03 04:51:34 +03:00
|
|
|
return nil, nil, err
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2016-03-24 08:39:52 +03:00
|
|
|
|
2016-11-28 05:32:45 +03:00
|
|
|
// Next, create channeldb for the first time.
|
2016-12-22 23:09:19 +03:00
|
|
|
cdb, err := Open(tempDirName)
|
2015-12-26 02:00:53 +03:00
|
|
|
if err != nil {
|
2016-09-03 04:51:34 +03:00
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanUp := func() {
|
|
|
|
cdb.Close()
|
2016-12-22 23:04:41 +03:00
|
|
|
os.RemoveAll(tempDirName)
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
2016-09-03 04:51:34 +03:00
|
|
|
return cdb, cleanUp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func createTestChannelState(cdb *DB) (*OpenChannel, error) {
|
2016-04-24 22:35:52 +03:00
|
|
|
addr, err := btcutil.NewAddressPubKey(pubKey.SerializeCompressed(), netParams)
|
2015-12-26 02:00:53 +03:00
|
|
|
if err != nil {
|
2016-09-03 04:51:34 +03:00
|
|
|
return nil, err
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
script, err := txscript.MultiSigScript([]*btcutil.AddressPubKey{addr, addr}, 2)
|
|
|
|
if err != nil {
|
2016-09-03 04:51:34 +03:00
|
|
|
return nil, err
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
2016-12-14 17:01:48 +03:00
|
|
|
// Simulate 1000 channel updates.
|
2017-02-24 22:53:49 +03:00
|
|
|
producer, err := shachain.NewRevocationProducerFromBytes(key[:])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-14 17:01:48 +03:00
|
|
|
store := shachain.NewRevocationStore()
|
2016-03-24 08:39:52 +03:00
|
|
|
for i := 0; i < 1000; i++ {
|
2016-12-14 17:01:48 +03:00
|
|
|
preImage, err := producer.AtIndex(uint64(i))
|
2016-03-24 08:39:52 +03:00
|
|
|
if err != nil {
|
2016-09-03 04:51:34 +03:00
|
|
|
return nil, err
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
|
|
|
|
2017-02-24 22:53:49 +03:00
|
|
|
if store.AddNextEntry(preImage); err != nil {
|
2016-09-03 04:51:34 +03:00
|
|
|
return nil, err
|
2016-03-24 08:39:52 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-09 11:40:34 +03:00
|
|
|
var obsfucator [6]byte
|
2016-11-16 05:51:48 +03:00
|
|
|
copy(obsfucator[:], key[:])
|
|
|
|
|
2016-09-03 04:51:34 +03:00
|
|
|
return &OpenChannel{
|
2016-11-16 05:50:24 +03:00
|
|
|
IsInitiator: true,
|
2017-01-23 10:31:01 +03:00
|
|
|
IsPending: true,
|
2016-11-16 04:41:22 +03:00
|
|
|
ChanType: SingleFunder,
|
2016-10-26 02:11:23 +03:00
|
|
|
IdentityPub: pubKey,
|
2016-07-06 02:44:39 +03:00
|
|
|
ChanID: id,
|
2017-05-17 04:43:56 +03:00
|
|
|
FeePerKw: btcutil.Amount(5000),
|
2016-12-06 17:03:14 +03:00
|
|
|
TheirDustLimit: btcutil.Amount(200),
|
|
|
|
OurDustLimit: btcutil.Amount(200),
|
2016-08-13 01:03:18 +03:00
|
|
|
OurCommitKey: privKey.PubKey(),
|
2016-07-06 02:44:39 +03:00
|
|
|
TheirCommitKey: pubKey,
|
|
|
|
Capacity: btcutil.Amount(10000),
|
|
|
|
OurBalance: btcutil.Amount(3000),
|
|
|
|
TheirBalance: btcutil.Amount(9000),
|
|
|
|
OurCommitTx: testTx,
|
|
|
|
OurCommitSig: bytes.Repeat([]byte{1}, 71),
|
2016-12-14 17:01:48 +03:00
|
|
|
RevocationProducer: producer,
|
|
|
|
RevocationStore: store,
|
2016-11-16 05:51:48 +03:00
|
|
|
StateHintObsfucator: obsfucator,
|
2016-07-06 02:44:39 +03:00
|
|
|
FundingOutpoint: testOutpoint,
|
2016-08-13 01:03:18 +03:00
|
|
|
OurMultiSigKey: privKey.PubKey(),
|
2016-07-06 02:44:39 +03:00
|
|
|
TheirMultiSigKey: privKey.PubKey(),
|
2016-10-26 02:11:23 +03:00
|
|
|
FundingWitnessScript: script,
|
2017-01-24 04:15:14 +03:00
|
|
|
NumConfsRequired: 4,
|
2016-07-06 02:44:39 +03:00
|
|
|
TheirCurrentRevocation: privKey.PubKey(),
|
|
|
|
TheirCurrentRevocationHash: key,
|
|
|
|
OurDeliveryScript: script,
|
|
|
|
TheirDeliveryScript: script,
|
|
|
|
LocalCsvDelay: 5,
|
|
|
|
RemoteCsvDelay: 9,
|
2016-09-03 04:51:34 +03:00
|
|
|
NumUpdates: 0,
|
2016-07-06 02:44:39 +03:00
|
|
|
TotalSatoshisSent: 8,
|
|
|
|
TotalSatoshisReceived: 2,
|
|
|
|
CreationTime: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
|
|
|
Db: cdb,
|
2016-09-03 04:51:34 +03:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestOpenChannelPutGetDelete(t *testing.T) {
|
|
|
|
cdb, cleanUp, err := makeTestDB()
|
2016-12-22 23:04:41 +03:00
|
|
|
defer cleanUp()
|
2016-09-03 04:51:34 +03:00
|
|
|
if err != nil {
|
2016-12-22 23:04:41 +03:00
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
2016-09-07 05:17:34 +03:00
|
|
|
// Create the test channel state, then add an additional fake HTLC
|
|
|
|
// before syncing to disk.
|
2016-09-03 04:51:34 +03:00
|
|
|
state, err := createTestChannelState(cdb)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create channel state: %v", err)
|
|
|
|
}
|
2016-09-07 05:17:34 +03:00
|
|
|
state.Htlcs = []*HTLC{
|
2016-12-06 17:03:14 +03:00
|
|
|
{
|
2016-09-07 05:17:34 +03:00
|
|
|
Incoming: true,
|
|
|
|
Amt: 10,
|
|
|
|
RHash: key,
|
|
|
|
RefundTimeout: 1,
|
|
|
|
RevocationDelay: 2,
|
|
|
|
},
|
|
|
|
}
|
2016-03-24 08:39:52 +03:00
|
|
|
if err := state.FullSync(); err != nil {
|
|
|
|
t.Fatalf("unable to save and serialize channel state: %v", err)
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
2016-10-26 02:11:23 +03:00
|
|
|
openChannels, err := cdb.FetchOpenChannels(state.IdentityPub)
|
2016-03-24 08:39:52 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch open channel: %v", err)
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 07:39:50 +03:00
|
|
|
newState := openChannels[0]
|
|
|
|
|
2015-12-26 02:00:53 +03:00
|
|
|
// The decoded channel state should be identical to what we stored
|
|
|
|
// above.
|
2016-10-26 02:11:23 +03:00
|
|
|
if !state.IdentityPub.IsEqual(newState.IdentityPub) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("their id doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2016-06-21 07:39:50 +03:00
|
|
|
if !reflect.DeepEqual(state.ChanID, newState.ChanID) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("chan id's don't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2017-05-17 04:43:56 +03:00
|
|
|
if state.FeePerKw != newState.FeePerKw {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("fee/kb doesn't match")
|
2016-11-16 05:51:48 +03:00
|
|
|
}
|
2016-12-06 17:03:14 +03:00
|
|
|
if state.TheirDustLimit != newState.TheirDustLimit {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("their dust limit doesn't match")
|
2016-12-06 17:03:14 +03:00
|
|
|
}
|
|
|
|
if state.OurDustLimit != newState.OurDustLimit {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("our dust limit doesn't match")
|
2016-12-06 17:03:14 +03:00
|
|
|
}
|
2016-11-16 05:50:24 +03:00
|
|
|
if state.IsInitiator != newState.IsInitiator {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("initiator status doesn't match")
|
2016-11-16 05:50:24 +03:00
|
|
|
}
|
2016-11-16 04:41:22 +03:00
|
|
|
if state.ChanType != newState.ChanType {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("channel type doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
2016-08-13 01:03:18 +03:00
|
|
|
if !bytes.Equal(state.OurCommitKey.SerializeCompressed(),
|
|
|
|
newState.OurCommitKey.SerializeCompressed()) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("our commit key doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
if !bytes.Equal(state.TheirCommitKey.SerializeCompressed(),
|
|
|
|
newState.TheirCommitKey.SerializeCompressed()) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("their commit key doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if state.Capacity != newState.Capacity {
|
2016-03-24 08:39:52 +03:00
|
|
|
t.Fatalf("capacity doesn't match: %v vs %v", state.Capacity,
|
|
|
|
newState.Capacity)
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
if state.OurBalance != newState.OurBalance {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("our balance doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
if state.TheirBalance != newState.TheirBalance {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("their balance doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var b1, b2 bytes.Buffer
|
|
|
|
if err := state.OurCommitTx.Serialize(&b1); err != nil {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("unable to serialize transaction")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
if err := newState.OurCommitTx.Serialize(&b2); err != nil {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("unable to serialize transaction")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("ourCommitTx doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2016-07-06 02:44:39 +03:00
|
|
|
if !bytes.Equal(newState.OurCommitSig, state.OurCommitSig) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("commit sigs don't match")
|
2016-07-06 02:44:39 +03:00
|
|
|
}
|
2015-12-26 02:00:53 +03:00
|
|
|
|
2016-06-21 07:39:50 +03:00
|
|
|
// TODO(roasbeef): replace with a single equal?
|
|
|
|
if !reflect.DeepEqual(state.FundingOutpoint, newState.FundingOutpoint) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("funding outpoint doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
2016-08-13 01:03:18 +03:00
|
|
|
if !bytes.Equal(state.OurMultiSigKey.SerializeCompressed(),
|
|
|
|
newState.OurMultiSigKey.SerializeCompressed()) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("our multisig key doesn't match")
|
2016-06-21 07:39:50 +03:00
|
|
|
}
|
|
|
|
if !bytes.Equal(state.TheirMultiSigKey.SerializeCompressed(),
|
|
|
|
newState.TheirMultiSigKey.SerializeCompressed()) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("their multisig key doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2016-10-16 02:02:09 +03:00
|
|
|
if !bytes.Equal(state.FundingWitnessScript, newState.FundingWitnessScript) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("redeem script doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
2017-02-08 03:27:53 +03:00
|
|
|
// The local and remote delivery scripts should be identical.
|
2016-03-24 08:39:52 +03:00
|
|
|
if !bytes.Equal(state.OurDeliveryScript, newState.OurDeliveryScript) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("our delivery address doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2016-03-24 08:39:52 +03:00
|
|
|
if !bytes.Equal(state.TheirDeliveryScript, newState.TheirDeliveryScript) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("their delivery address doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if state.NumUpdates != newState.NumUpdates {
|
|
|
|
t.Fatalf("num updates doesn't match: %v vs %v",
|
|
|
|
state.NumUpdates, newState.NumUpdates)
|
|
|
|
}
|
2016-03-24 08:39:52 +03:00
|
|
|
if state.RemoteCsvDelay != newState.RemoteCsvDelay {
|
|
|
|
t.Fatalf("csv delay doesn't match: %v vs %v",
|
|
|
|
state.RemoteCsvDelay, newState.RemoteCsvDelay)
|
|
|
|
}
|
|
|
|
if state.LocalCsvDelay != newState.LocalCsvDelay {
|
2015-12-26 02:00:53 +03:00
|
|
|
t.Fatalf("csv delay doesn't match: %v vs %v",
|
2016-03-24 08:39:52 +03:00
|
|
|
state.LocalCsvDelay, newState.LocalCsvDelay)
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
if state.TotalSatoshisSent != newState.TotalSatoshisSent {
|
|
|
|
t.Fatalf("satoshis sent doesn't match: %v vs %v",
|
|
|
|
state.TotalSatoshisSent, newState.TotalSatoshisSent)
|
|
|
|
}
|
|
|
|
if state.TotalSatoshisReceived != newState.TotalSatoshisReceived {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("satoshis received doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2017-01-24 04:15:14 +03:00
|
|
|
if state.NumConfsRequired != newState.NumConfsRequired {
|
|
|
|
t.Fatalf("num confs required doesn't match: %v, vs. %v",
|
|
|
|
state.NumConfsRequired, newState.NumConfsRequired)
|
|
|
|
}
|
2015-12-26 02:00:53 +03:00
|
|
|
|
|
|
|
if state.CreationTime.Unix() != newState.CreationTime.Unix() {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("creation time doesn't match")
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2016-06-23 02:15:07 +03:00
|
|
|
|
2016-12-14 17:01:48 +03:00
|
|
|
// The local and remote producers should be identical.
|
2017-02-24 22:53:49 +03:00
|
|
|
var old bytes.Buffer
|
|
|
|
err = state.RevocationProducer.Encode(&old)
|
2016-12-14 17:01:48 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("can't convert old revocation producer to bytes: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
2017-02-24 22:53:49 +03:00
|
|
|
var new bytes.Buffer
|
|
|
|
err = newState.RevocationProducer.Encode(&new)
|
2016-12-14 17:01:48 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("can't convert new revocation producer to bytes: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
2017-02-24 22:53:49 +03:00
|
|
|
if !bytes.Equal(old.Bytes(), new.Bytes()) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("local producer don't match")
|
2016-06-30 21:39:57 +03:00
|
|
|
}
|
2017-02-24 22:53:49 +03:00
|
|
|
|
|
|
|
old.Reset()
|
|
|
|
new.Reset()
|
|
|
|
|
|
|
|
err = state.RevocationStore.Encode(&old)
|
2016-06-30 21:39:57 +03:00
|
|
|
if err != nil {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatalf("unable to serialize old remote store: %v", err)
|
2016-06-30 21:39:57 +03:00
|
|
|
}
|
2017-02-24 22:53:49 +03:00
|
|
|
err = newState.RevocationStore.Encode(&new)
|
2016-06-30 21:39:57 +03:00
|
|
|
if err != nil {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatalf("unable to serialize new remote store: %v", err)
|
2016-06-30 21:39:57 +03:00
|
|
|
}
|
2017-02-24 22:53:49 +03:00
|
|
|
if !bytes.Equal(old.Bytes(), new.Bytes()) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("remote store don't match")
|
2016-06-30 21:39:57 +03:00
|
|
|
}
|
|
|
|
if !newState.TheirCurrentRevocation.IsEqual(state.TheirCurrentRevocation) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("revocation keys don't match")
|
2016-06-30 21:39:57 +03:00
|
|
|
}
|
2016-07-06 02:48:23 +03:00
|
|
|
if !bytes.Equal(newState.TheirCurrentRevocationHash[:], state.TheirCurrentRevocationHash[:]) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("revocation hashes don't match")
|
2016-07-06 02:48:23 +03:00
|
|
|
}
|
2016-09-07 05:17:34 +03:00
|
|
|
if !reflect.DeepEqual(state.Htlcs[0], newState.Htlcs[0]) {
|
|
|
|
t.Fatalf("htlcs don't match: %v vs %v", spew.Sdump(state.Htlcs[0]),
|
|
|
|
spew.Sdump(newState.Htlcs[0]))
|
|
|
|
}
|
2016-11-16 05:51:48 +03:00
|
|
|
if !bytes.Equal(state.StateHintObsfucator[:],
|
|
|
|
newState.StateHintObsfucator[:]) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("obsfuctators don't match")
|
2016-11-16 05:51:48 +03:00
|
|
|
}
|
2016-06-30 21:39:57 +03:00
|
|
|
|
2016-06-23 02:15:07 +03:00
|
|
|
// Finally to wrap up the test, delete the state of the channel within
|
|
|
|
// the database. This involves "closing" the channel which removes all
|
|
|
|
// written state, and creates a small "summary" elsewhere within the
|
|
|
|
// database.
|
2017-05-05 01:21:56 +03:00
|
|
|
closeSummary := &ChannelCloseSummary{
|
2017-05-15 05:02:59 +03:00
|
|
|
ChanPoint: *state.ChanID,
|
|
|
|
RemotePub: state.IdentityPub,
|
|
|
|
SettledBalance: btcutil.Amount(500),
|
|
|
|
TimeLockedBalance: btcutil.Amount(10000),
|
|
|
|
IsPending: false,
|
|
|
|
CloseType: CooperativeClose,
|
2017-05-05 01:21:56 +03:00
|
|
|
}
|
|
|
|
if err := state.CloseChannel(closeSummary); err != nil {
|
2016-06-23 02:15:07 +03:00
|
|
|
t.Fatalf("unable to close channel: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// As the channel is now closed, attempting to fetch all open channels
|
|
|
|
// for our fake node ID should return an empty slice.
|
2016-10-26 02:11:23 +03:00
|
|
|
openChans, err := cdb.FetchOpenChannels(state.IdentityPub)
|
2016-06-23 02:15:07 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch open channels: %v", err)
|
|
|
|
}
|
2017-02-08 03:27:53 +03:00
|
|
|
if len(openChans) != 0 {
|
|
|
|
t.Fatalf("all channels not deleted, found %v", len(openChans))
|
|
|
|
}
|
2016-06-23 02:15:07 +03:00
|
|
|
|
2017-02-08 03:27:53 +03:00
|
|
|
// Additionally, attempting to fetch all the open channels globally
|
|
|
|
// should yield no results.
|
|
|
|
openChans, err = cdb.FetchAllChannels()
|
|
|
|
if err != nil {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("unable to fetch all open chans")
|
2017-02-08 03:27:53 +03:00
|
|
|
}
|
2016-06-23 02:15:07 +03:00
|
|
|
if len(openChans) != 0 {
|
|
|
|
t.Fatalf("all channels not deleted, found %v", len(openChans))
|
|
|
|
}
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
|
|
|
|
2016-09-07 05:17:34 +03:00
|
|
|
func TestChannelStateTransition(t *testing.T) {
|
2016-09-03 04:51:34 +03:00
|
|
|
cdb, cleanUp, err := makeTestDB()
|
2016-12-22 23:04:41 +03:00
|
|
|
defer cleanUp()
|
2016-09-03 04:51:34 +03:00
|
|
|
if err != nil {
|
2016-11-16 05:50:24 +03:00
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// First create a minimal channel, then perform a full sync in order to
|
|
|
|
// persist the data.
|
|
|
|
channel, err := createTestChannelState(cdb)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create channel state: %v", err)
|
|
|
|
}
|
|
|
|
if err := channel.FullSync(); err != nil {
|
|
|
|
t.Fatalf("unable to save and serialize channel state: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// Add some HTLCs which were added during this new state transition.
|
|
|
|
// Half of the HTLCs are incoming, while the other half are outgoing.
|
2017-02-21 08:58:34 +03:00
|
|
|
var (
|
|
|
|
htlcs []*HTLC
|
|
|
|
htlcAmt btcutil.Amount
|
|
|
|
)
|
2016-09-03 04:51:34 +03:00
|
|
|
for i := uint32(0); i < 10; i++ {
|
|
|
|
var incoming bool
|
|
|
|
if i > 5 {
|
|
|
|
incoming = true
|
|
|
|
}
|
|
|
|
htlc := &HTLC{
|
2016-09-07 05:17:34 +03:00
|
|
|
Incoming: incoming,
|
2017-02-21 08:58:34 +03:00
|
|
|
Amt: 10,
|
2016-09-07 05:17:34 +03:00
|
|
|
RHash: key,
|
|
|
|
RefundTimeout: i,
|
|
|
|
RevocationDelay: i + 2,
|
2016-11-18 05:28:07 +03:00
|
|
|
OutputIndex: uint16(i * 3),
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
|
|
|
htlcs = append(htlcs, htlc)
|
2017-02-21 08:58:34 +03:00
|
|
|
htlcAmt += htlc.Amt
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// Create a new channel delta which includes the above HTLCs, some
|
2016-09-03 04:51:34 +03:00
|
|
|
// balance updates, and an increment of the current commitment height.
|
|
|
|
// Additionally, modify the signature and commitment transaction.
|
|
|
|
newSequence := uint32(129498)
|
|
|
|
newSig := bytes.Repeat([]byte{3}, 71)
|
2016-09-06 02:52:54 +03:00
|
|
|
newTx := channel.OurCommitTx.Copy()
|
|
|
|
newTx.TxIn[0].Sequence = newSequence
|
2016-09-03 04:51:34 +03:00
|
|
|
delta := &ChannelDelta{
|
2016-09-06 02:58:14 +03:00
|
|
|
LocalBalance: btcutil.Amount(1e8),
|
|
|
|
RemoteBalance: btcutil.Amount(1e8),
|
|
|
|
Htlcs: htlcs,
|
|
|
|
UpdateNum: 1,
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
2016-09-07 05:17:34 +03:00
|
|
|
|
|
|
|
// First update the local node's broadcastable state.
|
|
|
|
if err := channel.UpdateCommitment(newTx, newSig, delta); err != nil {
|
|
|
|
t.Fatalf("unable to update commitment: %v", err)
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// The balances, new update, the HTLCs and the changes to the fake
|
2016-09-07 05:17:34 +03:00
|
|
|
// commitment transaction along with the modified signature should all
|
|
|
|
// have been updated.
|
2016-10-26 02:11:23 +03:00
|
|
|
updatedChannel, err := cdb.FetchOpenChannels(channel.IdentityPub)
|
2016-09-03 04:51:34 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch updated channel: %v", err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(updatedChannel[0].OurCommitSig, newSig) {
|
|
|
|
t.Fatalf("sigs don't match %x vs %x",
|
|
|
|
updatedChannel[0].OurCommitSig, newSig)
|
|
|
|
}
|
|
|
|
if updatedChannel[0].OurCommitTx.TxIn[0].Sequence != newSequence {
|
|
|
|
t.Fatalf("sequence numbers don't match: %v vs %v",
|
|
|
|
updatedChannel[0].OurCommitTx.TxIn[0].Sequence, newSequence)
|
|
|
|
}
|
|
|
|
if updatedChannel[0].OurBalance != delta.LocalBalance {
|
|
|
|
t.Fatalf("local balances don't match: %v vs %v",
|
|
|
|
updatedChannel[0].OurBalance, delta.LocalBalance)
|
|
|
|
}
|
|
|
|
if updatedChannel[0].TheirBalance != delta.RemoteBalance {
|
|
|
|
t.Fatalf("remote balances don't match: %v vs %v",
|
|
|
|
updatedChannel[0].TheirBalance, delta.RemoteBalance)
|
|
|
|
}
|
|
|
|
if updatedChannel[0].NumUpdates != uint64(delta.UpdateNum) {
|
|
|
|
t.Fatalf("update # doesn't match: %v vs %v",
|
|
|
|
updatedChannel[0].NumUpdates, delta.UpdateNum)
|
|
|
|
}
|
2016-11-28 06:10:05 +03:00
|
|
|
numDiskUpdates, err := updatedChannel[0].CommitmentHeight()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to read commitment height from disk: %v", err)
|
|
|
|
}
|
|
|
|
if numDiskUpdates != uint64(delta.UpdateNum) {
|
|
|
|
t.Fatalf("num disk updates doesn't match: %v vs %v",
|
|
|
|
numDiskUpdates, delta.UpdateNum)
|
|
|
|
}
|
2016-09-07 05:17:34 +03:00
|
|
|
for i := 0; i < len(updatedChannel[0].Htlcs); i++ {
|
|
|
|
originalHTLC := updatedChannel[0].Htlcs[i]
|
|
|
|
diskHTLC := channel.Htlcs[i]
|
|
|
|
if !reflect.DeepEqual(originalHTLC, diskHTLC) {
|
|
|
|
t.Fatalf("htlc's dont match: %v vs %v",
|
|
|
|
spew.Sdump(originalHTLC),
|
|
|
|
spew.Sdump(diskHTLC))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, write to the log which tracks the necessary revocation state
|
|
|
|
// needed to rectify any fishy behavior by the remote party. Modify the
|
|
|
|
// current uncollapsed revocation state to simulate a state transition
|
|
|
|
// by the remote party.
|
|
|
|
newRevocation := bytes.Repeat([]byte{9}, 32)
|
|
|
|
copy(channel.TheirCurrentRevocationHash[:], newRevocation)
|
|
|
|
if err := channel.AppendToRevocationLog(delta); err != nil {
|
|
|
|
t.Fatalf("unable to append to revocation log: %v", err)
|
|
|
|
}
|
2016-09-03 04:51:34 +03:00
|
|
|
|
|
|
|
// We should be able to fetch the channel delta created above by it's
|
|
|
|
// update number with all the state properly reconstructed.
|
|
|
|
diskDelta, err := channel.FindPreviousState(uint64(delta.UpdateNum))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch past delta: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The two deltas (the original vs the on-disk version) should
|
|
|
|
// identical, and all HTLC data should properly be retained.
|
|
|
|
if delta.LocalBalance != diskDelta.LocalBalance {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("local balances don't match")
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
|
|
|
if delta.RemoteBalance != diskDelta.RemoteBalance {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("remote balances don't match")
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
|
|
|
if delta.UpdateNum != diskDelta.UpdateNum {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("update number doesn't match")
|
2016-09-03 04:51:34 +03:00
|
|
|
}
|
|
|
|
for i := 0; i < len(delta.Htlcs); i++ {
|
|
|
|
originalHTLC := delta.Htlcs[i]
|
|
|
|
diskHTLC := diskDelta.Htlcs[i]
|
|
|
|
if !reflect.DeepEqual(originalHTLC, diskHTLC) {
|
|
|
|
t.Fatalf("htlc's dont match: %v vs %v",
|
|
|
|
spew.Sdump(originalHTLC),
|
|
|
|
spew.Sdump(diskHTLC))
|
|
|
|
}
|
|
|
|
}
|
2017-02-08 03:27:53 +03:00
|
|
|
|
2017-03-25 02:07:34 +03:00
|
|
|
// The state number recovered from the tail of the revocation log
|
|
|
|
// should be identical to this current state.
|
|
|
|
logTail, err := channel.RevocationLogTail()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to retrieve log: %v", err)
|
|
|
|
}
|
|
|
|
if logTail.UpdateNum != delta.UpdateNum {
|
|
|
|
t.Fatal("update number doesn't match")
|
|
|
|
}
|
|
|
|
|
2017-02-21 08:58:34 +03:00
|
|
|
// Next modify the delta slightly, then create a new entry within the
|
|
|
|
// revocation log.
|
|
|
|
delta.UpdateNum = 2
|
|
|
|
delta.LocalBalance -= htlcAmt
|
|
|
|
delta.RemoteBalance += htlcAmt
|
|
|
|
delta.Htlcs = nil
|
|
|
|
if err := channel.AppendToRevocationLog(delta); err != nil {
|
|
|
|
t.Fatalf("unable to append to revocation log: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Once again, fetch the state and ensure it has been properly updated.
|
|
|
|
diskDelta, err = channel.FindPreviousState(uint64(delta.UpdateNum))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch past delta: %v", err)
|
|
|
|
}
|
|
|
|
if len(diskDelta.Htlcs) != 0 {
|
|
|
|
t.Fatalf("expected %v htlcs, got %v", 0, len(diskDelta.Htlcs))
|
|
|
|
}
|
|
|
|
if delta.LocalBalance != 1e8-htlcAmt {
|
|
|
|
t.Fatalf("mismatched balances, expected %v got %v", 1e8-htlcAmt,
|
|
|
|
delta.LocalBalance)
|
|
|
|
}
|
|
|
|
if delta.RemoteBalance != 1e8+htlcAmt {
|
|
|
|
t.Fatalf("mismatched balances, expected %v got %v", 1e8+htlcAmt,
|
|
|
|
delta.RemoteBalance)
|
|
|
|
}
|
|
|
|
|
2017-03-25 02:07:34 +03:00
|
|
|
// Once again, state number recovered from the tail of the revocation
|
|
|
|
// log should be identical to this current state.
|
|
|
|
logTail, err = channel.RevocationLogTail()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to retrieve log: %v", err)
|
|
|
|
}
|
|
|
|
if logTail.UpdateNum != delta.UpdateNum {
|
|
|
|
t.Fatal("update number doesn't match")
|
|
|
|
}
|
|
|
|
|
2016-09-07 05:17:34 +03:00
|
|
|
// The revocation state stored on-disk should now also be identical.
|
2016-10-26 02:11:23 +03:00
|
|
|
updatedChannel, err = cdb.FetchOpenChannels(channel.IdentityPub)
|
2016-09-07 05:17:34 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch updated channel: %v", err)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(updatedChannel[0].TheirCurrentRevocationHash[:],
|
|
|
|
newRevocation) {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("revocation state wasn't synced!")
|
2016-09-07 05:17:34 +03:00
|
|
|
}
|
2017-02-08 03:27:53 +03:00
|
|
|
|
|
|
|
// Now attempt to delete the channel from the database.
|
2017-05-05 01:21:56 +03:00
|
|
|
closeSummary := &ChannelCloseSummary{
|
2017-05-15 05:02:59 +03:00
|
|
|
ChanPoint: *channel.ChanID,
|
|
|
|
RemotePub: channel.IdentityPub,
|
|
|
|
SettledBalance: btcutil.Amount(500),
|
|
|
|
TimeLockedBalance: btcutil.Amount(10000),
|
|
|
|
IsPending: false,
|
|
|
|
CloseType: ForceClose,
|
2017-05-05 01:21:56 +03:00
|
|
|
}
|
|
|
|
if err := updatedChannel[0].CloseChannel(closeSummary); err != nil {
|
2017-02-08 03:27:53 +03:00
|
|
|
t.Fatalf("unable to delete updated channel: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we attempt to fetch the target channel again, it shouldn't be
|
|
|
|
// found.
|
|
|
|
channels, err := cdb.FetchOpenChannels(channel.IdentityPub)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch updated channels: %v", err)
|
|
|
|
}
|
|
|
|
if len(channels) != 0 {
|
|
|
|
t.Fatalf("%v channels, found, but none should be",
|
|
|
|
len(channels))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Attempting to find previous states on the channel should fail as the
|
|
|
|
// revocation log has been deleted.
|
|
|
|
_, err = updatedChannel[0].FindPreviousState(uint64(delta.UpdateNum))
|
|
|
|
if err == nil {
|
2016-12-14 17:01:48 +03:00
|
|
|
t.Fatal("revocation log search should've failed")
|
2017-02-08 03:27:53 +03:00
|
|
|
}
|
2015-12-26 02:00:53 +03:00
|
|
|
}
|
2017-01-23 10:31:01 +03:00
|
|
|
|
|
|
|
func TestFetchPendingChannels(t *testing.T) {
|
|
|
|
cdb, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("uanble to make test database: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// Create first test channel state
|
|
|
|
state, err := createTestChannelState(cdb)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create channel state: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
addr := &net.TCPAddr{
|
|
|
|
IP: net.ParseIP("127.0.0.1"),
|
|
|
|
Port: 18555,
|
|
|
|
}
|
|
|
|
|
2017-06-06 01:02:27 +03:00
|
|
|
const broadcastHeight = 99
|
|
|
|
if err := state.SyncPending(addr, broadcastHeight); err != nil {
|
2017-01-23 10:31:01 +03:00
|
|
|
t.Fatalf("unable to save and serialize channel state: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pendingChannels, err := cdb.FetchPendingChannels()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to list pending channels: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(pendingChannels) != 1 {
|
|
|
|
t.Fatalf("incorrect number of pending channels: expecting %v,"+
|
|
|
|
"got %v", 1, len(pendingChannels))
|
|
|
|
}
|
|
|
|
|
2017-06-06 01:02:27 +03:00
|
|
|
// The broadcast height of the pending channel should've been set
|
|
|
|
// properly.
|
|
|
|
if pendingChannels[0].FundingBroadcastHeight != broadcastHeight {
|
|
|
|
t.Fatalf("broadcast height mismatch: expected %v, got %v",
|
|
|
|
pendingChannels[0].FundingBroadcastHeight,
|
|
|
|
broadcastHeight)
|
|
|
|
}
|
|
|
|
|
2017-05-11 02:44:40 +03:00
|
|
|
const openHeight = 100
|
|
|
|
err = cdb.MarkChannelAsOpen(pendingChannels[0].ChanID, openHeight)
|
|
|
|
if err != nil {
|
2017-01-23 10:31:01 +03:00
|
|
|
t.Fatalf("unable to mark channel as open: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-11 02:44:40 +03:00
|
|
|
// Next, we'll re-fetch the channel to ensure that the open height was
|
|
|
|
// properly set.
|
|
|
|
openChans, err := cdb.FetchAllChannels()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch channels: %v", err)
|
|
|
|
}
|
|
|
|
if openChans[0].OpeningHeight != openHeight {
|
|
|
|
t.Fatalf("channel opening heights don't match: expected %v, "+
|
|
|
|
"got %v", openChans[0].OpeningHeight, openHeight)
|
|
|
|
}
|
2017-06-06 01:02:27 +03:00
|
|
|
if openChans[0].FundingBroadcastHeight != broadcastHeight {
|
|
|
|
t.Fatalf("broadcast height mismatch: expected %v, got %v",
|
|
|
|
openChans[0].FundingBroadcastHeight,
|
|
|
|
broadcastHeight)
|
|
|
|
}
|
2017-05-11 02:44:40 +03:00
|
|
|
|
2017-01-23 10:31:01 +03:00
|
|
|
pendingChannels, err = cdb.FetchPendingChannels()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to list pending channels: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(pendingChannels) != 0 {
|
|
|
|
t.Fatalf("incorrect number of pending channels: expecting %v,"+
|
|
|
|
"got %v", 0, len(pendingChannels))
|
|
|
|
}
|
|
|
|
}
|
2017-05-05 01:21:56 +03:00
|
|
|
|
|
|
|
func TestFetchClosedChannels(t *testing.T) {
|
|
|
|
cdb, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to make test database: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// First create a test channel, that we'll be closing within this pull
|
|
|
|
// request.
|
|
|
|
state, err := createTestChannelState(cdb)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create channel state: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next sync the channel to disk, marking it as being in a pending open
|
|
|
|
// state.
|
|
|
|
addr := &net.TCPAddr{
|
|
|
|
IP: net.ParseIP("127.0.0.1"),
|
|
|
|
Port: 18555,
|
|
|
|
}
|
2017-06-06 01:02:27 +03:00
|
|
|
const broadcastHeight = 99
|
|
|
|
if err := state.SyncPending(addr, broadcastHeight); err != nil {
|
2017-05-05 01:21:56 +03:00
|
|
|
t.Fatalf("unable to save and serialize channel state: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, simulate the confirmation of the channel by marking it as
|
|
|
|
// pending within the database.
|
2017-05-11 02:44:40 +03:00
|
|
|
const openHeight = 100
|
|
|
|
if err := cdb.MarkChannelAsOpen(state.ChanID, openHeight); err != nil {
|
2017-05-05 01:21:56 +03:00
|
|
|
t.Fatalf("unable to mark channel as open: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, close the channel by including a close channel summary in the
|
|
|
|
// database.
|
|
|
|
summary := &ChannelCloseSummary{
|
2017-05-15 05:02:59 +03:00
|
|
|
ChanPoint: *state.ChanID,
|
|
|
|
ClosingTXID: rev,
|
|
|
|
RemotePub: state.IdentityPub,
|
|
|
|
Capacity: state.Capacity,
|
|
|
|
SettledBalance: state.OurBalance,
|
|
|
|
TimeLockedBalance: state.OurBalance + 10000,
|
|
|
|
CloseType: ForceClose,
|
|
|
|
IsPending: true,
|
2017-05-05 01:21:56 +03:00
|
|
|
}
|
|
|
|
if err := state.CloseChannel(summary); err != nil {
|
|
|
|
t.Fatalf("unable to close channel: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Query the database to ensure that the channel has now been properly
|
|
|
|
// closed. We should get the same result whether querying for pending
|
|
|
|
// channels only, or not.
|
|
|
|
pendingClosed, err := cdb.FetchClosedChannels(true)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed fetcing closed channels: %v", err)
|
|
|
|
}
|
|
|
|
if len(pendingClosed) != 1 {
|
|
|
|
t.Fatalf("incorrect number of pending closed channels: expecting %v,"+
|
|
|
|
"got %v", 1, len(pendingClosed))
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(summary, pendingClosed[0]) {
|
|
|
|
t.Fatalf("database summaries don't match: expected %v got %v",
|
|
|
|
spew.Sdump(summary), spew.Sdump(pendingClosed[0]))
|
|
|
|
}
|
|
|
|
closed, err := cdb.FetchClosedChannels(false)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed fetching all closed channels: %v", err)
|
|
|
|
}
|
|
|
|
if len(closed) != 1 {
|
|
|
|
t.Fatalf("incorrect number of closed channels: expecting %v, "+
|
|
|
|
"got %v", 1, len(closed))
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(summary, closed[0]) {
|
|
|
|
t.Fatalf("database summaries don't match: expected %v got %v",
|
|
|
|
spew.Sdump(summary), spew.Sdump(closed[0]))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mark the channel as fully closed
|
|
|
|
if err := cdb.MarkChanFullyClosed(state.ChanID); err != nil {
|
|
|
|
t.Fatalf("failed fully closing channel: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The channel should no longer be considered pending, but should still
|
|
|
|
// be retrieved when fetching all the closed channels.
|
|
|
|
closed, err = cdb.FetchClosedChannels(false)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed fetcing closed channels: %v", err)
|
|
|
|
}
|
|
|
|
if len(closed) != 1 {
|
|
|
|
t.Fatalf("incorrect number of closed channels: expecting %v, "+
|
|
|
|
"got %v", 1, len(closed))
|
|
|
|
}
|
|
|
|
pendingClose, err := cdb.FetchClosedChannels(true)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed fetching channels pending close: %v", err)
|
|
|
|
}
|
|
|
|
if len(pendingClose) != 0 {
|
|
|
|
t.Fatalf("incorrect number of closed channels: expecting %v, "+
|
|
|
|
"got %v", 0, len(closed))
|
|
|
|
}
|
|
|
|
}
|