From e1d8b07735b4a9170b239d7fd3c3dcd7363219a5 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:44 +0100 Subject: [PATCH 01/18] lnd_test: remove unused math/rand rependency --- lnd_test.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lnd_test.go b/lnd_test.go index b0af5066..107933cd 100644 --- a/lnd_test.go +++ b/lnd_test.go @@ -20,7 +20,6 @@ import ( "crypto/rand" "crypto/sha256" - prand "math/rand" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" @@ -8202,9 +8201,6 @@ func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { // Send one more payment in order to cause insufficient capacity error. numInvoices++ - // Initialize seed random in order to generate invoices. - prand.Seed(time.Now().UnixNano()) - // With the channel open, we'll create invoices for Bob that Alice // will pay to in order to advance the state of the channel. bobPayReqs := make([]string, numInvoices) @@ -8385,9 +8381,6 @@ func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) aliceAmt := info.LocalBalance bobAmt := info.RemoteBalance - // Initialize seed random in order to generate invoices. - prand.Seed(time.Now().UnixNano()) - // With the channel open, we'll create invoices for Bob that Alice // will pay to in order to advance the state of the channel. bobPayReqs := make([]string, numInvoices) From eb2f832bba98155ced01faeb836f36eac9f75733 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:44 +0100 Subject: [PATCH 02/18] lnd_test: define createPayReqs helper method --- lnd_test.go | 387 +++++++++++++++++++--------------------------------- 1 file changed, 137 insertions(+), 250 deletions(-) diff --git a/lnd_test.go b/lnd_test.go index 107933cd..fbc1ff32 100644 --- a/lnd_test.go +++ b/lnd_test.go @@ -546,6 +546,40 @@ func makeFakePayHash(t *harnessTest) []byte { return randBuf } +// createPayReqs is a helper method that will create a slice of payment +// requests for the given node. +func createPayReqs(ctx context.Context, node *lntest.HarnessNode, + paymentAmt btcutil.Amount, numInvoices int) ([]string, [][]byte, + []*lnrpc.Invoice, error) { + + payReqs := make([]string, numInvoices) + rHashes := make([][]byte, numInvoices) + invoices := make([]*lnrpc.Invoice, numInvoices) + for i := 0; i < numInvoices; i++ { + preimage := make([]byte, 32) + _, err := rand.Read(preimage) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to generate "+ + "preimage: %v", err) + } + invoice := &lnrpc.Invoice{ + Memo: "testing", + RPreimage: preimage, + Value: int64(paymentAmt), + } + resp, err := node.AddInvoice(ctx, invoice) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to add "+ + "invoice: %v", err) + } + + payReqs[i] = resp.PaymentRequest + rHashes[i] = resp.RHash + invoices[i] = invoice + } + return payReqs, rHashes, invoices, nil +} + const ( AddrTypeWitnessPubkeyHash = lnrpc.NewAddressRequest_WITNESS_PUBKEY_HASH AddrTypeNestedPubkeyHash = lnrpc.NewAddressRequest_NESTED_PUBKEY_HASH @@ -3599,18 +3633,12 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) { // satoshis with a different preimage each time. const numPayments = 5 const paymentAmt = 1000 - payReqs := make([]string, numPayments) - for i := 0; i < numPayments; i++ { - invoice := &lnrpc.Invoice{ - Memo: "testing", - Value: paymentAmt, - } - resp, err := net.Bob.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - payReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + payReqs, _, _, err := createPayReqs( + ctxt, net.Bob, paymentAmt, numPayments, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // We'll wait for all parties to recognize the new channels within the @@ -3828,17 +3856,12 @@ func testSingleHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) { // Create 5 invoices for Bob, which expect a payment from Alice for 1k // satoshis with a different preimage each time. const numPayments = 5 - rHashes := make([][]byte, numPayments) - for i := 0; i < numPayments; i++ { - invoice := &lnrpc.Invoice{ - Value: paymentAmt, - } - resp, err := net.Bob.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - rHashes[i] = resp.RHash + ctxt, _ = context.WithTimeout(ctxb, timeout) + _, rHashes, _, err := createPayReqs( + ctxt, net.Bob, paymentAmt, numPayments, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // We'll wait for all parties to recognize the new channels within the @@ -4016,17 +4039,12 @@ func testMultiHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) { // Create 5 invoices for Carol, which expect a payment from Alice for 1k // satoshis with a different preimage each time. const numPayments = 5 - rHashes := make([][]byte, numPayments) - for i := 0; i < numPayments; i++ { - invoice := &lnrpc.Invoice{ - Value: paymentAmt, - } - resp, err := carol.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - rHashes[i] = resp.RHash + ctxt, _ = context.WithTimeout(ctxb, timeout) + _, rHashes, _, err := createPayReqs( + ctxt, carol, paymentAmt, numPayments, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // We'll wait for all parties to recognize the new channels within the @@ -4532,25 +4550,12 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) { // by only using one of the channels. const numPayments = 2 const paymentAmt = 70000 - payReqs := make([]string, numPayments) - for i := 0; i < numPayments; i++ { - preimage := make([]byte, 32) - _, err := rand.Read(preimage) - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - resp, err := net.Bob.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - payReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + payReqs, _, _, err := createPayReqs( + ctxt, net.Bob, paymentAmt, numPayments, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } time.Sleep(time.Millisecond * 50) @@ -4602,25 +4607,12 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) { // Alice should also be able to route payments using this channel, // so send two payments of 60k back to Carol. const paymentAmt60k = 60000 - payReqs = make([]string, numPayments) - for i := 0; i < numPayments; i++ { - preimage := make([]byte, 32) - _, err := rand.Read(preimage) - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt60k, - } - resp, err := carol.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - payReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + payReqs, _, _, err = createPayReqs( + ctxt, carol, paymentAmt60k, numPayments, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } time.Sleep(time.Millisecond * 50) @@ -5267,22 +5259,12 @@ func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) { // We'll now add 3 more invoices to Bob's invoice registry. const numInvoices = 3 - newInvoices := make([]*lnrpc.Invoice, numInvoices) - payReqs := make([]string, numInvoices) - for i := 0; i < numInvoices; i++ { - preimage := bytes.Repeat([]byte{byte(90 + 1 + i)}, 32) - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - resp, err := net.Bob.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - newInvoices[i] = invoice - payReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + payReqs, _, newInvoices, err := createPayReqs( + ctxt, net.Bob, paymentAmt, numInvoices, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // Now that the set of invoices has been added, we'll re-register for @@ -6104,20 +6086,12 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) { // With the channel open, we'll create a few invoices for Bob that // Carol will pay to in order to advance the state of the channel. - bobPayReqs := make([]string, numInvoices) - for i := 0; i < numInvoices; i++ { - preimage := bytes.Repeat([]byte{byte(255 - i)}, 32) - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - resp, err := net.Bob.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - bobPayReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + bobPayReqs, _, _, err := createPayReqs( + ctxt, net.Bob, paymentAmt, numInvoices, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // As we'll be querying the state of bob's channels frequently we'll @@ -6386,20 +6360,12 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness // With the channel open, we'll create a few invoices for Carol that // Dave will pay to in order to advance the state of the channel. - carolPayReqs := make([]string, numInvoices) - for i := 0; i < numInvoices; i++ { - preimage := bytes.Repeat([]byte{byte(192 - i)}, 32) - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - resp, err := carol.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - carolPayReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + carolPayReqs, _, _, err := createPayReqs( + ctxt, carol, paymentAmt, numInvoices, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // As we'll be querying the state of Carols's channels frequently we'll @@ -6655,20 +6621,12 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness, // With the channel open, we'll create a few invoices for Carol that // Dave will pay to in order to advance the state of the channel. - carolPayReqs := make([]string, numInvoices) - for i := 0; i < numInvoices; i++ { - preimage := bytes.Repeat([]byte{byte(192 - i)}, 32) - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - resp, err := carol.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - carolPayReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + carolPayReqs, _, _, err := createPayReqs( + ctxt, carol, paymentAmt, numInvoices, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // As we'll be querying the state of Carol's channels frequently we'll @@ -6739,20 +6697,12 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness, // At this point, we'll also send over a set of HTLC's from Carol to // Dave. This ensures that the final revoked transaction has HTLC's in // both directions. - davePayReqs := make([]string, numInvoices) - for i := 0; i < numInvoices; i++ { - preimage := bytes.Repeat([]byte{byte(199 - i)}, 32) - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - resp, err := dave.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - davePayReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + davePayReqs, _, _, err := createPayReqs( + ctxt, dave, paymentAmt, numInvoices, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // Send payments from Carol to Dave using 3 of Dave's payment hashes @@ -8168,7 +8118,7 @@ func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { } const ( - timeout = time.Duration(time.Second * 5) + timeout = time.Duration(time.Second * 15) paymentAmt = 100 ) @@ -8203,25 +8153,12 @@ func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { // With the channel open, we'll create invoices for Bob that Alice // will pay to in order to advance the state of the channel. - bobPayReqs := make([]string, numInvoices) - for i := 0; i < numInvoices; i++ { - preimage := make([]byte, 32) - _, err := rand.Read(preimage) - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - resp, err := net.Bob.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - bobPayReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + bobPayReqs, _, _, err := createPayReqs( + ctxt, net.Bob, paymentAmt, numInvoices, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // Wait for Alice to receive the channel edge from the funding manager. @@ -8383,48 +8320,22 @@ func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) // With the channel open, we'll create invoices for Bob that Alice // will pay to in order to advance the state of the channel. - bobPayReqs := make([]string, numInvoices) - for i := 0; i < numInvoices; i++ { - preimage := make([]byte, 32) - _, err := rand.Read(preimage) - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - resp, err := net.Bob.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - bobPayReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + bobPayReqs, _, _, err := createPayReqs( + ctxt, net.Bob, paymentAmt, numInvoices, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // With the channel open, we'll create invoices for Alice that Bob // will pay to in order to advance the state of the channel. - alicePayReqs := make([]string, numInvoices) - for i := 0; i < numInvoices; i++ { - preimage := make([]byte, 32) - _, err := rand.Read(preimage) - if err != nil { - t.Fatalf("unable to generate preimage: %v", err) - } - - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - resp, err := net.Alice.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - alicePayReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + alicePayReqs, _, _, err := createPayReqs( + ctxt, net.Alice, paymentAmt, numInvoices, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // Wait for Alice to receive the channel edge from the funding manager. @@ -10433,18 +10344,12 @@ func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) { // satoshis with a different preimage each time. const numPayments = 5 const paymentAmt = 1000 - payReqs := make([]string, numPayments) - for i := 0; i < numPayments; i++ { - invoice := &lnrpc.Invoice{ - Memo: "testing", - Value: paymentAmt, - } - resp, err := carol.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - payReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + payReqs, _, _, err := createPayReqs( + ctxt, carol, paymentAmt, numPayments, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // We'll wait for all parties to recognize the new channels within the @@ -10775,18 +10680,12 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) { // satoshis with a different preimage each time. const numPayments = 5 const paymentAmt = 1000 - payReqs := make([]string, numPayments) - for i := 0; i < numPayments; i++ { - invoice := &lnrpc.Invoice{ - Memo: "testing", - Value: paymentAmt, - } - resp, err := carol.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - payReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + payReqs, _, _, err := createPayReqs( + ctxt, carol, paymentAmt, numPayments, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // We'll wait for all parties to recognize the new channels within the @@ -11125,18 +11024,12 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness // satoshis with a different preimage each time. const numPayments = 5 const paymentAmt = 1000 - payReqs := make([]string, numPayments) - for i := 0; i < numPayments; i++ { - invoice := &lnrpc.Invoice{ - Memo: "testing", - Value: paymentAmt, - } - resp, err := carol.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - payReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + payReqs, _, _, err := createPayReqs( + ctxt, carol, paymentAmt, numPayments, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // We'll wait for all parties to recognize the new channels within the @@ -11479,18 +11372,12 @@ func testSwitchOfflineDeliveryOutgoingOffline( // satoshis with a different preimage each time. const numPayments = 5 const paymentAmt = 1000 - payReqs := make([]string, numPayments) - for i := 0; i < numPayments; i++ { - invoice := &lnrpc.Invoice{ - Memo: "testing", - Value: paymentAmt, - } - resp, err := carol.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - payReqs[i] = resp.PaymentRequest + ctxt, _ = context.WithTimeout(ctxb, timeout) + payReqs, _, _, err := createPayReqs( + ctxt, carol, paymentAmt, numPayments, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) } // We'll wait for all parties to recognize the new channels within the From 0ca7c8c5f824c2f196e81e314bc66b3d7deb70d0 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:44 +0100 Subject: [PATCH 03/18] lnd_test: define helper getChanInfo --- lnd_test.go | 168 ++++++++++++++++------------------------------------ 1 file changed, 52 insertions(+), 116 deletions(-) diff --git a/lnd_test.go b/lnd_test.go index fbc1ff32..ac3b6344 100644 --- a/lnd_test.go +++ b/lnd_test.go @@ -580,6 +580,24 @@ func createPayReqs(ctx context.Context, node *lntest.HarnessNode, return payReqs, rHashes, invoices, nil } +// getChanInfo is a helper method for getting channel info for a node's sole +// channel. +func getChanInfo(ctx context.Context, node *lntest.HarnessNode) ( + *lnrpc.Channel, error) { + + req := &lnrpc.ListChannelsRequest{} + channelInfo, err := node.ListChannels(ctx, req) + if err != nil { + return nil, err + } + if len(channelInfo.Channels) != 1 { + return nil, fmt.Errorf("node should only have a single "+ + "channel, instead he has %v", len(channelInfo.Channels)) + } + + return channelInfo.Channels[0], nil +} + const ( AddrTypeWitnessPubkeyHash = lnrpc.NewAddressRequest_WITNESS_PUBKEY_HASH AddrTypeNestedPubkeyHash = lnrpc.NewAddressRequest_NESTED_PUBKEY_HASH @@ -2251,23 +2269,6 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("htlc mismatch: %v", predErr) } - // As we'll be querying the state of Alice's channels frequently we'll - // create a closure helper function for the purpose. - getAliceChanInfo := func() (*lnrpc.Channel, error) { - req := &lnrpc.ListChannelsRequest{} - aliceChannelInfo, err := net.Alice.ListChannels(ctxb, req) - if err != nil { - return nil, err - } - if len(aliceChannelInfo.Channels) != 1 { - t.Fatalf("alice should only have a single channel, "+ - "instead he has %v", - len(aliceChannelInfo.Channels)) - } - - return aliceChannelInfo.Channels[0], nil - } - // Fetch starting height of this test so we can compute the block // heights we expect certain events to take place. _, curHeight, err := net.Miner.Node.GetBestBlock() @@ -2284,7 +2285,8 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { htlcCsvMaturityHeight = startHeight + defaultCLTV + 1 + defaultCSV ) - aliceChan, err := getAliceChanInfo() + ctxt, _ = context.WithTimeout(ctxb, timeout) + aliceChan, err := getChanInfo(ctxt, net.Alice) if err != nil { t.Fatalf("unable to get alice's channel info: %v", err) } @@ -6094,22 +6096,6 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to create pay reqs: %v", err) } - // As we'll be querying the state of bob's channels frequently we'll - // create a closure helper function for the purpose. - getBobChanInfo := func() (*lnrpc.Channel, error) { - req := &lnrpc.ListChannelsRequest{} - bobChannelInfo, err := net.Bob.ListChannels(ctxb, req) - if err != nil { - return nil, err - } - if len(bobChannelInfo.Channels) != 1 { - t.Fatalf("bob should only have a single channel, instead he has %v", - len(bobChannelInfo.Channels)) - } - - return bobChannelInfo.Channels[0], nil - } - // Wait for Carol to receive the channel edge from the funding manager. ctxt, _ = context.WithTimeout(ctxb, timeout) err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) @@ -6132,7 +6118,8 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) { var bobChan *lnrpc.Channel var predErr error err = lntest.WaitPredicate(func() bool { - bChan, err := getBobChanInfo() + ctxt, _ = context.WithTimeout(ctxb, timeout) + bChan, err := getChanInfo(ctxt, net.Bob) if err != nil { t.Fatalf("unable to get bob's channel info: %v", err) } @@ -6180,7 +6167,8 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to send payments: %v", err) } - bobChan, err = getBobChanInfo() + ctxt, _ = context.WithTimeout(ctxb, timeout) + bobChan, err = getChanInfo(ctxt, net.Bob) if err != nil { t.Fatalf("unable to get bob chan info: %v", err) } @@ -6197,7 +6185,8 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) { // Now query for Bob's channel state, it should show that he's at a // state number in the past, not the *latest* state. - bobChan, err = getBobChanInfo() + ctxt, _ = context.WithTimeout(ctxb, timeout) + bobChan, err = getChanInfo(ctxt, net.Bob) if err != nil { t.Fatalf("unable to get bob chan info: %v", err) } @@ -6368,22 +6357,6 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness t.Fatalf("unable to create pay reqs: %v", err) } - // As we'll be querying the state of Carols's channels frequently we'll - // create a closure helper function for the purpose. - getCarolChanInfo := func() (*lnrpc.Channel, error) { - req := &lnrpc.ListChannelsRequest{} - carolChannelInfo, err := carol.ListChannels(ctxb, req) - if err != nil { - return nil, err - } - if len(carolChannelInfo.Channels) != 1 { - t.Fatalf("carol should only have a single channel, "+ - "instead he has %v", len(carolChannelInfo.Channels)) - } - - return carolChannelInfo.Channels[0], nil - } - // Wait for Dave to receive the channel edge from the funding manager. ctxt, _ = context.WithTimeout(ctxb, timeout) err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint) @@ -6394,7 +6367,8 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness // Next query for Carol's channel state, as we sent 0 payments, Carol // should now see her balance as being 0 satoshis. - carolChan, err := getCarolChanInfo() + ctxt, _ = context.WithTimeout(ctxb, timeout) + carolChan, err := getChanInfo(ctxt, carol) if err != nil { t.Fatalf("unable to get carol's channel info: %v", err) } @@ -6431,7 +6405,8 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness t.Fatalf("unable to send payments: %v", err) } - carolChan, err = getCarolChanInfo() + ctxt, _ = context.WithTimeout(ctxb, timeout) + carolChan, err = getChanInfo(ctxt, carol) if err != nil { t.Fatalf("unable to get carol chan info: %v", err) } @@ -6448,7 +6423,8 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness // Now query for Carol's channel state, it should show that he's at a // state number in the past, not the *latest* state. - carolChan, err = getCarolChanInfo() + ctxt, _ = context.WithTimeout(ctxb, timeout) + carolChan, err = getChanInfo(ctxt, carol) if err != nil { t.Fatalf("unable to get carol chan info: %v", err) } @@ -6629,26 +6605,11 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness, t.Fatalf("unable to create pay reqs: %v", err) } - // As we'll be querying the state of Carol's channels frequently we'll - // create a closure helper function for the purpose. - getCarolChanInfo := func() (*lnrpc.Channel, error) { - req := &lnrpc.ListChannelsRequest{} - carolChannelInfo, err := carol.ListChannels(ctxb, req) - if err != nil { - return nil, err - } - if len(carolChannelInfo.Channels) != 1 { - t.Fatalf("carol should only have a single channel, instead he has %v", - len(carolChannelInfo.Channels)) - } - - return carolChannelInfo.Channels[0], nil - } - // We'll introduce a closure to validate that Carol's current balance // matches the given expected amount. checkCarolBalance := func(expectedAmt int64) { - carolChan, err := getCarolChanInfo() + ctxt, _ = context.WithTimeout(ctxb, timeout) + carolChan, err := getChanInfo(ctxt, carol) if err != nil { t.Fatalf("unable to get carol's channel info: %v", err) } @@ -6663,7 +6624,8 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness, // number of updates is at least as large as the provided minimum // number. checkCarolNumUpdatesAtLeast := func(minimum uint64) { - carolChan, err := getCarolChanInfo() + ctxt, _ = context.WithTimeout(ctxb, timeout) + carolChan, err := getChanInfo(ctxt, carol) if err != nil { t.Fatalf("unable to get carol's channel info: %v", err) } @@ -6717,7 +6679,8 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness, // Next query for Carol's channel state, as we sent 3 payments of 10k // satoshis each, however Carol should now see her balance as being // equal to the push amount in satoshis since she has not settled. - carolChan, err := getCarolChanInfo() + ctxt, _ = context.WithTimeout(ctxb, timeout) + carolChan, err := getChanInfo(ctxt, carol) if err != nil { t.Fatalf("unable to get carol's channel info: %v", err) } @@ -6786,7 +6749,8 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness, // Now query for Carol's channel state, it should show that she's at a // state number in the past, *not* the latest state. - carolChan, err = getCarolChanInfo() + ctxt, _ = context.WithTimeout(ctxb, timeout) + carolChan, err = getChanInfo(ctxt, carol) if err != nil { t.Fatalf("unable to get carol chan info: %v", err) } @@ -8100,23 +8064,6 @@ func testNodeSignVerify(net *lntest.NetworkHarness, t *harnessTest) { func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() - // As we'll be querying the channels state frequently we'll - // create a closure helper function for the purpose. - getChanInfo := func(node *lntest.HarnessNode) (*lnrpc.Channel, error) { - req := &lnrpc.ListChannelsRequest{} - channelInfo, err := node.ListChannels(ctxb, req) - if err != nil { - return nil, err - } - if len(channelInfo.Channels) != 1 { - t.Fatalf("node should only have a single channel, "+ - "instead he has %v", - len(channelInfo.Channels)) - } - - return channelInfo.Channels[0], nil - } - const ( timeout = time.Duration(time.Second * 15) paymentAmt = 100 @@ -8134,7 +8081,8 @@ func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { }, ) - info, err := getChanInfo(net.Alice) + ctxt, _ = context.WithTimeout(ctxb, timeout) + info, err := getChanInfo(ctxt, net.Alice) if err != nil { t.Fatalf("unable to get alice channel info: %v", err) } @@ -8219,7 +8167,8 @@ func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { // Next query for Bob's and Alice's channel states, in order to confirm // that all payment have been successful transmitted. - aliceChan, err := getChanInfo(net.Alice) + ctxt, _ = context.WithTimeout(ctxb, timeout) + aliceChan, err := getChanInfo(ctxt, net.Alice) if len(aliceChan.PendingHtlcs) != 0 { t.Fatalf("alice's pending htlcs is incorrect, got %v, "+ "expected %v", len(aliceChan.PendingHtlcs), 0) @@ -8239,7 +8188,8 @@ func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { // Wait for Bob to receive revocation from Alice. time.Sleep(2 * time.Second) - bobChan, err := getChanInfo(net.Bob) + ctxt, _ = context.WithTimeout(ctxb, timeout) + bobChan, err := getChanInfo(ctxt, net.Bob) if err != nil { t.Fatalf("unable to get bob's channel info: %v", err) } @@ -8271,23 +8221,6 @@ func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() - // As we'll be querying the channels state frequently we'll - // create a closure helper function for the purpose. - getChanInfo := func(node *lntest.HarnessNode) (*lnrpc.Channel, error) { - req := &lnrpc.ListChannelsRequest{} - channelInfo, err := node.ListChannels(ctxb, req) - if err != nil { - return nil, err - } - if len(channelInfo.Channels) != 1 { - t.Fatalf("node should only have a single channel, "+ - "instead he has %v", - len(channelInfo.Channels)) - } - - return channelInfo.Channels[0], nil - } - const ( timeout = time.Duration(time.Second * 5) paymentAmt = 1000 @@ -8305,7 +8238,8 @@ func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) }, ) - info, err := getChanInfo(net.Alice) + ctxt, _ = context.WithTimeout(ctxb, timeout) + info, err := getChanInfo(ctxt, net.Alice) if err != nil { t.Fatalf("unable to get alice channel info: %v", err) } @@ -8435,7 +8369,8 @@ func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) // states, i.e. balance info. time.Sleep(1 * time.Second) - aliceInfo, err := getChanInfo(net.Alice) + ctxt, _ = context.WithTimeout(ctxb, timeout) + aliceInfo, err := getChanInfo(ctxt, net.Alice) if err != nil { t.Fatalf("unable to get bob's channel info: %v", err) } @@ -8454,7 +8389,8 @@ func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) // Next query for Bob's and Alice's channel states, in order to confirm // that all payment have been successful transmitted. - bobInfo, err := getChanInfo(net.Bob) + ctxt, _ = context.WithTimeout(ctxb, timeout) + bobInfo, err := getChanInfo(ctxt, net.Bob) if err != nil { t.Fatalf("unable to get bob's channel info: %v", err) } From 13098a595a582904356ae6eb865eb1a59a46868e Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:44 +0100 Subject: [PATCH 04/18] lnd test: refactor testDataLossProtection This extracts part of the test into a new helper method timeTravel, which can be used to easily reset a node back to a state where channel state is lost. --- lnd_test.go | 323 +++++++++++++++++++++++++++------------------------- 1 file changed, 165 insertions(+), 158 deletions(-) diff --git a/lnd_test.go b/lnd_test.go index ac3b6344..a3a093e5 100644 --- a/lnd_test.go +++ b/lnd_test.go @@ -7005,12 +7005,6 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { } defer shutdownAndAssert(net, t, dave) - // We must let Dave communicate with Carol before they are able to open - // channel, so we connect them. - if err := net.ConnectNodes(ctxb, carol, dave); err != nil { - t.Fatalf("unable to connect dave to carol: %v", err) - } - // Before we make a channel, we'll load up Carol with some coins sent // directly from the miner. err = net.SendCoins(ctxb, btcutil.SatoshiPerBitcoin, carol) @@ -7018,14 +7012,164 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to send coins to carol: %v", err) } - // We'll first open up a channel between them with a 0.5 BTC value. - ctxt, _ := context.WithTimeout(ctxb, timeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, carol, dave, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) + // timeTravel is a method that will make Carol open a channel to the + // passed node, settle a series of payments, then reset the node back + // to the state before the payments happened. When this method returns + // the node will be unaware of the new state updates. The returned + // function can be used to restart the node in this state. + timeTravel := func(node *lntest.HarnessNode) (func() error, + *lnrpc.ChannelPoint, int64, error) { + + // We must let the node communicate with Carol before they are + // able to open channel, so we connect them. + if err := net.EnsureConnected(ctxb, carol, node); err != nil { + t.Fatalf("unable to connect %v to carol: %v", + node.Name(), err) + } + + // We'll first open up a channel between them with a 0.5 BTC + // value. + ctxt, _ := context.WithTimeout(ctxb, timeout) + chanPoint := openChannelAndAssert( + ctxt, t, net, carol, node, + lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + + // With the channel open, we'll create a few invoices for the + // node that Carol will pay to in order to advance the state of + // the channel. + // TODO(halseth): have dangling HTLCs on the commitment, able to + // retrive funds? + ctxt, _ = context.WithTimeout(ctxb, timeout) + payReqs, _, _, err := createPayReqs( + ctxt, node, paymentAmt, numInvoices, + ) + if err != nil { + t.Fatalf("unable to create pay reqs: %v", err) + } + + // Wait for Carol to receive the channel edge from the funding + // manager. + ctxt, _ = context.WithTimeout(ctxb, timeout) + err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) + if err != nil { + t.Fatalf("carol didn't see the carol->%s channel "+ + "before timeout: %v", node.Name(), err) + } + + // Send payments from Carol using 3 of the payment hashes + // generated above. + ctxt, _ = context.WithTimeout(ctxb, timeout) + err = completePaymentRequests(ctxt, carol, + payReqs[:numInvoices/2], true) + if err != nil { + t.Fatalf("unable to send payments: %v", err) + } + + // Next query for the node's channel state, as we sent 3 + // payments of 10k satoshis each, it should now see his balance + // as being 30k satoshis. + var nodeChan *lnrpc.Channel + var predErr error + err = lntest.WaitPredicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, timeout) + bChan, err := getChanInfo(ctxt, node) + if err != nil { + t.Fatalf("unable to get channel info: %v", err) + } + if bChan.LocalBalance != 30000 { + predErr = fmt.Errorf("balance is incorrect, "+ + "got %v, expected %v", + bChan.LocalBalance, 30000) + return false + } + + nodeChan = bChan + return true + }, time.Second*15) + if err != nil { + t.Fatalf("%v", predErr) + } + + // Grab the current commitment height (update number), we'll + // later revert him to this state after additional updates to + // revoke this state. + stateNumPreCopy := nodeChan.NumUpdates + + // Create a temporary file to house the database state at this + // particular point in history. + tempDbPath, err := ioutil.TempDir("", node.Name()+"-past-state") + if err != nil { + t.Fatalf("unable to create temp db folder: %v", err) + } + tempDbFile := filepath.Join(tempDbPath, "channel.db") + defer os.Remove(tempDbPath) + + // With the temporary file created, copy the current state into + // the temporary file we created above. Later after more + // updates, we'll restore this state. + if err := copyFile(tempDbFile, node.DBPath()); err != nil { + t.Fatalf("unable to copy database files: %v", err) + } + + // Finally, send more payments from , using the remaining + // payment hashes. + ctxt, _ = context.WithTimeout(ctxb, timeout) + err = completePaymentRequests(ctxt, carol, + payReqs[numInvoices/2:], true) + if err != nil { + t.Fatalf("unable to send payments: %v", err) + } + + ctxt, _ = context.WithTimeout(ctxb, timeout) + nodeChan, err = getChanInfo(ctxt, node) + if err != nil { + t.Fatalf("unable to get dave chan info: %v", err) + } + + // Now we shutdown the node, copying over the its temporary + // database state which has the *prior* channel state over his + // current most up to date state. With this, we essentially + // force the node to travel back in time within the channel's + // history. + if err = net.RestartNode(node, func() error { + return os.Rename(tempDbFile, node.DBPath()) + }); err != nil { + t.Fatalf("unable to restart node: %v", err) + } + + // Now query for the channel state, it should show that it's at + // a state number in the past, not the *latest* state. + ctxt, _ = context.WithTimeout(ctxb, timeout) + nodeChan, err = getChanInfo(ctxt, node) + if err != nil { + t.Fatalf("unable to get dave chan info: %v", err) + } + if nodeChan.NumUpdates != stateNumPreCopy { + t.Fatalf("db copy failed: %v", nodeChan.NumUpdates) + } + assertNodeNumChannels(t, ctxb, node, 1) + + balReq := &lnrpc.WalletBalanceRequest{} + balResp, err := node.WalletBalance(ctxb, balReq) + if err != nil { + t.Fatalf("unable to get dave's balance: %v", err) + } + + restart, err := net.SuspendNode(node) + if err != nil { + t.Fatalf("unable to suspend node: %v", err) + } + return restart, chanPoint, balResp.ConfirmedBalance, nil + } + + // Reset Dave to a state where he has an outdated channel state. + restartDave, _, daveStartingBalance, err := timeTravel(dave) + if err != nil { + t.Fatalf("unable to time travel dave: %v", err) + } // We a´make a note of the nodes' current on-chain balances, to make // sure they are able to retrieve the channel funds eventually, @@ -7036,152 +7180,14 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { } carolStartingBalance := carolBalResp.ConfirmedBalance - daveBalResp, err := dave.WalletBalance(ctxb, balReq) - if err != nil { - t.Fatalf("unable to get dave's balance: %v", err) + // Restart Dave to trigger a channel resync. + if err := restartDave(); err != nil { + t.Fatalf("unable to restart dave: %v", err) } - daveStartingBalance := daveBalResp.ConfirmedBalance - - // With the channel open, we'll create a few invoices for Dave that - // Carol will pay to in order to advance the state of the channel. - // TODO(halseth): have dangling HTLCs on the commitment, able to - // retrive funds? - davePayReqs := make([]string, numInvoices) - for i := 0; i < numInvoices; i++ { - preimage := bytes.Repeat([]byte{byte(17 - i)}, 32) - invoice := &lnrpc.Invoice{ - Memo: "testing", - RPreimage: preimage, - Value: paymentAmt, - } - resp, err := dave.AddInvoice(ctxb, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - davePayReqs[i] = resp.PaymentRequest - } - - // As we'll be querying the state of Dave's channels frequently we'll - // create a closure helper function for the purpose. - getDaveChanInfo := func() (*lnrpc.Channel, error) { - req := &lnrpc.ListChannelsRequest{} - daveChannelInfo, err := dave.ListChannels(ctxb, req) - if err != nil { - return nil, err - } - if len(daveChannelInfo.Channels) != 1 { - t.Fatalf("dave should only have a single channel, "+ - "instead he has %v", - len(daveChannelInfo.Channels)) - } - - return daveChannelInfo.Channels[0], nil - } - - // Wait for Carol to receive the channel edge from the funding manager. - ctxt, _ = context.WithTimeout(ctxb, timeout) - err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("carol didn't see the carol->dave channel before "+ - "timeout: %v", err) - } - - // Send payments from Carol to Dave using 3 of Dave's payment hashes - // generated above. - ctxt, _ = context.WithTimeout(ctxb, timeout) - err = completePaymentRequests(ctxt, carol, davePayReqs[:numInvoices/2], - true) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - // Next query for Dave's channel state, as we sent 3 payments of 10k - // satoshis each, Dave should now see his balance as being 30k satoshis. - var daveChan *lnrpc.Channel - var predErr error - err = lntest.WaitPredicate(func() bool { - bChan, err := getDaveChanInfo() - if err != nil { - t.Fatalf("unable to get dave's channel info: %v", err) - } - if bChan.LocalBalance != 30000 { - predErr = fmt.Errorf("dave's balance is incorrect, "+ - "got %v, expected %v", bChan.LocalBalance, - 30000) - return false - } - - daveChan = bChan - return true - }, time.Second*15) - if err != nil { - t.Fatalf("%v", predErr) - } - - // Grab Dave's current commitment height (update number), we'll later - // revert him to this state after additional updates to revoke this - // state. - daveStateNumPreCopy := daveChan.NumUpdates - - // Create a temporary file to house Dave's database state at this - // particular point in history. - daveTempDbPath, err := ioutil.TempDir("", "dave-past-state") - if err != nil { - t.Fatalf("unable to create temp db folder: %v", err) - } - daveTempDbFile := filepath.Join(daveTempDbPath, "channel.db") - defer os.Remove(daveTempDbPath) - - // With the temporary file created, copy Dave's current state into the - // temporary file we created above. Later after more updates, we'll - // restore this state. - if err := copyFile(daveTempDbFile, dave.DBPath()); err != nil { - t.Fatalf("unable to copy database files: %v", err) - } - - // Finally, send payments from Carol to Dave, consuming Dave's remaining - // payment hashes. - ctxt, _ = context.WithTimeout(ctxb, timeout) - err = completePaymentRequests(ctxt, carol, davePayReqs[numInvoices/2:], - true) - if err != nil { - t.Fatalf("unable to send payments: %v", err) - } - - daveChan, err = getDaveChanInfo() - if err != nil { - t.Fatalf("unable to get dave chan info: %v", err) - } - - // Now we shutdown Dave, copying over the his temporary database state - // which has the *prior* channel state over his current most up to date - // state. With this, we essentially force Dave to travel back in time - // within the channel's history. - if err = net.RestartNode(dave, func() error { - return os.Rename(daveTempDbFile, dave.DBPath()) - }); err != nil { - t.Fatalf("unable to restart node: %v", err) - } - - // Now query for Dave's channel state, it should show that he's at a - // state number in the past, not the *latest* state. - daveChan, err = getDaveChanInfo() - if err != nil { - t.Fatalf("unable to get dave chan info: %v", err) - } - if daveChan.NumUpdates != daveStateNumPreCopy { - t.Fatalf("db copy failed: %v", daveChan.NumUpdates) - } - assertNodeNumChannels(t, ctxb, dave, 1) // Upon reconnection, the nodes should detect that Dave is out of sync. - if err := net.ConnectNodes(ctxb, carol, dave); err != nil { - t.Fatalf("unable to connect dave to carol: %v", err) - } - // Carol should force close the channel using her latest commitment. - forceClose, err := waitForTxInMempool(net.Miner.Node, 5*time.Second) + forceClose, err := waitForTxInMempool(net.Miner.Node, 15*time.Second) if err != nil { t.Fatalf("unable to find Carol's force close tx in mempool: %v", err) @@ -7229,10 +7235,11 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { // We query Dave's balance to make sure it increased after the channel // closed. This checks that he was able to sweep the funds he had in // the channel. - daveBalResp, err = dave.WalletBalance(ctxb, balReq) + daveBalResp, err := dave.WalletBalance(ctxb, balReq) if err != nil { t.Fatalf("unable to get dave's balance: %v", err) } + daveBalance := daveBalResp.ConfirmedBalance if daveBalance <= daveStartingBalance { t.Fatalf("expected dave to have balance above %d, intead had %v", @@ -7241,7 +7248,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { // After the Carol's output matures, she should also reclaim her funds. mineBlocks(t, net, defaultCSV-1) - carolSweep, err := waitForTxInMempool(net.Miner.Node, 5*time.Second) + carolSweep, err := waitForTxInMempool(net.Miner.Node, 15*time.Second) if err != nil { t.Fatalf("unable to find Carol's sweep tx in mempool: %v", err) } From 780e1bbeadf6e180a3e29ddcd9e14efc5a57d8f3 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:44 +0100 Subject: [PATCH 05/18] peer: correct variable name, print error --- peer.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/peer.go b/peer.go index 3cac60f9..f8a541c9 100644 --- a/peer.go +++ b/peer.go @@ -836,11 +836,11 @@ func newChanMsgStream(p *peer, cid lnwire.ChannelID) *msgStream { fmt.Sprintf("Update stream for ChannelID(%x) exiting", cid[:]), 1000, func(msg lnwire.Message) { - _, isChanSycMsg := msg.(*lnwire.ChannelReestablish) + _, isChanSyncMsg := msg.(*lnwire.ChannelReestablish) // If this is the chanSync message, then we'll deliver // it immediately to the active link. - if !isChanSycMsg { + if !isChanSyncMsg { // We'll send a message to the funding manager // and wait iff an active funding process for // this channel hasn't yet completed. We do @@ -875,8 +875,9 @@ func newChanMsgStream(p *peer, cid lnwire.ChannelID) *msgStream { if chanLink == nil { link, err := p.server.htlcSwitch.GetLink(cid) if err != nil { - peerLog.Errorf("recv'd update for unknown "+ - "channel %v from %v", cid, p) + peerLog.Errorf("recv'd update for "+ + "unknown channel %v from %v: "+ + "%v", cid, p, err) return } chanLink = link From 149a8ce94f493acdd2fe97750a2971de2b6fb3c6 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:45 +0100 Subject: [PATCH 06/18] channeldb/legacy_serialization: add deserializeCloseChannelSummaryV6 This commit adds a new file legacy_serialization.go, where a copy of the current deserializeCloseChannelSummary is made, called deserializeCloseChannelSummaryV6. The rationale is to keep old deserialization code around to be used during migration, as it is hard maintaining compatibility with the old format while changing the code in use. --- channeldb/legacy_serialization.go | 53 +++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 channeldb/legacy_serialization.go diff --git a/channeldb/legacy_serialization.go b/channeldb/legacy_serialization.go new file mode 100644 index 00000000..2abb3f04 --- /dev/null +++ b/channeldb/legacy_serialization.go @@ -0,0 +1,53 @@ +package channeldb + +import "io" + +// deserializeCloseChannelSummaryV6 reads the v6 database format for +// ChannelCloseSummary. +// +// NOTE: deprecated, only for migration. +func deserializeCloseChannelSummaryV6(r io.Reader) (*ChannelCloseSummary, error) { + c := &ChannelCloseSummary{} + + err := ReadElements(r, + &c.ChanPoint, &c.ShortChanID, &c.ChainHash, &c.ClosingTXID, + &c.CloseHeight, &c.RemotePub, &c.Capacity, &c.SettledBalance, + &c.TimeLockedBalance, &c.CloseType, &c.IsPending, + ) + if err != nil { + return nil, err + } + + // We'll now check to see if the channel close summary was encoded with + // any of the additional optional fields. + err = ReadElements(r, &c.RemoteCurrentRevocation) + switch { + case err == io.EOF: + return c, nil + + // If we got a non-eof error, then we know there's an actually issue. + // Otherwise, it may have been the case that this summary didn't have + // the set of optional fields. + case err != nil: + return nil, err + } + + if err := readChanConfig(r, &c.LocalChanConfig); err != nil { + return nil, err + } + + // Finally, we'll attempt to read the next unrevoked commitment point + // for the remote party. If we closed the channel before receiving a + // funding locked message, then this can be nil. As a result, we'll use + // the same technique to read the field, only if there's still data + // left in the buffer. + err = ReadElements(r, &c.RemoteNextRevocation) + if err != nil && err != io.EOF { + // If we got a non-eof error, then we know there's an actually + // issue. Otherwise, it may have been the case that this + // summary didn't have the set of optional fields. + return nil, err + } + + return c, nil +} From 28b15dcbbb287cdef1c6c4bf3c06bb54bdd27973 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:45 +0100 Subject: [PATCH 07/18] channeldb/channel: write boolean to indicate presence of ChannelCloseSummary fields --- channeldb/channel.go | 68 +++++++++++++++++++++++++++++--------------- 1 file changed, 45 insertions(+), 23 deletions(-) diff --git a/channeldb/channel.go b/channeldb/channel.go index a284509a..99df5258 100644 --- a/channeldb/channel.go +++ b/channeldb/channel.go @@ -2059,7 +2059,12 @@ func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error { // If this is a close channel summary created before the addition of // the new fields, then we can exit here. if cs.RemoteCurrentRevocation == nil { - return nil + return WriteElements(w, false) + } + + // If fields are present, write boolean to indicate this, and continue. + if err := WriteElements(w, true); err != nil { + return err } if err := WriteElements(w, cs.RemoteCurrentRevocation); err != nil { @@ -2070,14 +2075,22 @@ func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error { return err } - // We'll write this field last, as it's possible for a channel to be - // closed before we learn of the next unrevoked revocation point for - // the remote party. - if cs.RemoteNextRevocation == nil { - return nil + // The RemoteNextRevocation field is optional, as it's possible for a + // channel to be closed before we learn of the next unrevoked + // revocation point for the remote party. Write a boolen indicating + // whether this field is present or not. + if err := WriteElements(w, cs.RemoteNextRevocation != nil); err != nil { + return err } - return WriteElements(w, cs.RemoteNextRevocation) + // Write the field, if present. + if cs.RemoteNextRevocation != nil { + if err = WriteElements(w, cs.RemoteNextRevocation); err != nil { + return err + } + } + + return nil } func fetchChannelCloseSummary(tx *bolt.Tx, @@ -2111,15 +2124,19 @@ func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, error) { // We'll now check to see if the channel close summary was encoded with // any of the additional optional fields. - err = ReadElements(r, &c.RemoteCurrentRevocation) - switch { - case err == io.EOF: - return c, nil + var hasNewFields bool + err = ReadElements(r, &hasNewFields) + if err != nil { + return nil, err + } - // If we got a non-eof error, then we know there's an actually issue. - // Otherwise, it may have been the case that this summary didn't have - // the set of optional fields. - case err != nil: + // If fields are not present, we can return. + if !hasNewFields { + return c, nil + } + + // Otherwise read the new fields. + if err := ReadElements(r, &c.RemoteCurrentRevocation); err != nil { return nil, err } @@ -2129,17 +2146,22 @@ func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, error) { // Finally, we'll attempt to read the next unrevoked commitment point // for the remote party. If we closed the channel before receiving a - // funding locked message, then this can be nil. As a result, we'll use - // the same technique to read the field, only if there's still data - // left in the buffer. - err = ReadElements(r, &c.RemoteNextRevocation) - if err != nil && err != io.EOF { - // If we got a non-eof error, then we know there's an actually - // issue. Otherwise, it may have been the case that this - // summary didn't have the set of optional fields. + // funding locked message then this might not be present. A boolean + // indicating whether the field is present will come first. + var hasRemoteNextRevocation bool + err = ReadElements(r, &hasRemoteNextRevocation) + if err != nil { return nil, err } + // If this field was written, read it. + if hasRemoteNextRevocation { + err = ReadElements(r, &c.RemoteNextRevocation) + if err != nil { + return nil, err + } + } + return c, nil } From 46c961aa17ba9a3e66b2cfd5bb810ff76994bd4c Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:45 +0100 Subject: [PATCH 08/18] channeldb/migrations: migrate ChannelCloseSummary --- channeldb/db.go | 7 +++++++ channeldb/migrations.go | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/channeldb/db.go b/channeldb/db.go index 2c8fc69c..a635db3b 100644 --- a/channeldb/db.go +++ b/channeldb/db.go @@ -80,6 +80,13 @@ var ( number: 6, migration: migratePruneEdgeUpdateIndex, }, + { + // The DB version that migrates the ChannelCloseSummary + // to a format where optional fields are indicated with + // boolean flags. + number: 7, + migration: migrateOptionalChannelCloseSummaryFields, + }, } // Big endian is the preferred byte order, due to cursor scans over diff --git a/channeldb/migrations.go b/channeldb/migrations.go index 518271a8..cddf6278 100644 --- a/channeldb/migrations.go +++ b/channeldb/migrations.go @@ -573,3 +573,40 @@ func migratePruneEdgeUpdateIndex(tx *bolt.Tx) error { return nil } + +// migrateOptionalChannelCloseSummaryFields migrates the serialized format of +// ChannelCloseSummary to a format where optional fields' presence is indicated +// with boolean markers. +func migrateOptionalChannelCloseSummaryFields(tx *bolt.Tx) error { + closedChanBucket := tx.Bucket(closedChannelBucket) + if closedChanBucket == nil { + return nil + } + + log.Info("Migrating to new closed channel format...") + err := closedChanBucket.ForEach(func(chanID, summary []byte) error { + r := bytes.NewReader(summary) + + // Read the old (v6) format from the database. + c, err := deserializeCloseChannelSummaryV6(r) + if err != nil { + return err + } + + // Serialize using the new format, and put back into the + // bucket. + var b bytes.Buffer + if err := serializeChannelCloseSummary(&b, c); err != nil { + return err + } + + return closedChanBucket.Put(chanID, b.Bytes()) + }) + if err != nil { + return fmt.Errorf("unable to update closed channels: %v", err) + } + + log.Info("Migration to new closed channel format complete!") + + return nil +} From 0b9a323fcbb972dffe2166fa345ff17b3350df38 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:45 +0100 Subject: [PATCH 09/18] channeldb/channel: add LastChanSync field to CloseChannelSummary This commit adds an optional field LastChanSyncMsg to the CloseChannelSummary, which will be used to save the ChannelReestablish message for the channel at the point of channel close. --- channeldb/channel.go | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/channeldb/channel.go b/channeldb/channel.go index 99df5258..5a6245b8 100644 --- a/channeldb/channel.go +++ b/channeldb/channel.go @@ -3,6 +3,7 @@ package channeldb import ( "bytes" "encoding/binary" + "errors" "fmt" "io" "net" @@ -1832,6 +1833,10 @@ type ChannelCloseSummary struct { // LocalChanCfg is the channel configuration for the local node. LocalChanConfig ChannelConfig + + // LastChanSyncMsg is the ChannelReestablish message for this channel + // for the state at the point where it was closed. + LastChanSyncMsg *lnwire.ChannelReestablish } // CloseChannel closes a previously active Lightning channel. Closing a channel @@ -2090,6 +2095,18 @@ func serializeChannelCloseSummary(w io.Writer, cs *ChannelCloseSummary) error { } } + // Write whether the channel sync message is present. + if err := WriteElements(w, cs.LastChanSyncMsg != nil); err != nil { + return err + } + + // Write the channel sync message, if present. + if cs.LastChanSyncMsg != nil { + if err := WriteElements(w, cs.LastChanSyncMsg); err != nil { + return err + } + } + return nil } @@ -2162,6 +2179,32 @@ func deserializeCloseChannelSummary(r io.Reader) (*ChannelCloseSummary, error) { } } + // Check if we have a channel sync message to read. + var hasChanSyncMsg bool + err = ReadElements(r, &hasChanSyncMsg) + if err == io.EOF { + return c, nil + } else if err != nil { + return nil, err + } + + // If a chan sync message is present, read it. + if hasChanSyncMsg { + // We must pass in reference to a lnwire.Message for the codec + // to support it. + var msg lnwire.Message + if err := ReadElements(r, &msg); err != nil { + return nil, err + } + + chanSync, ok := msg.(*lnwire.ChannelReestablish) + if !ok { + return nil, errors.New("unable cast db Message to " + + "ChannelReestablish") + } + c.LastChanSyncMsg = chanSync + } + return c, nil } From 676a1b140754b63998d75ee9fc61cb557eddc71b Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:45 +0100 Subject: [PATCH 10/18] lnwallet+link: make ChanSyncMsg take channel state as arg This lets us get the channel reestablish message without creating the LightningChannel struct first. --- htlcswitch/link.go | 2 +- lnwallet/channel.go | 17 +++++++++------- lnwallet/channel_test.go | 42 ++++++++++++++++++++-------------------- 3 files changed, 32 insertions(+), 29 deletions(-) diff --git a/htlcswitch/link.go b/htlcswitch/link.go index 71563592..3ee614c7 100644 --- a/htlcswitch/link.go +++ b/htlcswitch/link.go @@ -516,7 +516,7 @@ func (l *channelLink) syncChanStates() error { // First, we'll generate our ChanSync message to send to the other // side. Based on this message, the remote party will decide if they // need to retransmit any data or not. - localChanSyncMsg, err := l.channel.ChanSyncMsg() + localChanSyncMsg, err := lnwallet.ChanSyncMsg(l.channel.State()) if err != nil { return fmt.Errorf("unable to generate chan sync message for "+ "ChannelPoint(%v)", l.channel.ChannelPoint()) diff --git a/lnwallet/channel.go b/lnwallet/channel.go index 34e815e7..d45ba296 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -3446,19 +3446,22 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // it. // 3. We didn't get the last RevokeAndAck message they sent, so they'll // re-send it. -func (lc *LightningChannel) ChanSyncMsg() (*lnwire.ChannelReestablish, error) { +func ChanSyncMsg(c *channeldb.OpenChannel) (*lnwire.ChannelReestablish, error) { + c.Lock() + defer c.Unlock() + // The remote commitment height that we'll send in the // ChannelReestablish message is our current commitment height plus // one. If the receiver thinks that our commitment height is actually // *equal* to this value, then they'll re-send the last commitment that // they sent but we never fully processed. - localHeight := lc.localCommitChain.tip().height + localHeight := c.LocalCommitment.CommitHeight nextLocalCommitHeight := localHeight + 1 // The second value we'll send is the height of the remote commitment // from our PoV. If the receiver thinks that their height is actually // *one plus* this value, then they'll re-send their last revocation. - remoteChainTipHeight := lc.remoteCommitChain.tail().height + remoteChainTipHeight := c.RemoteCommitment.CommitHeight // If this channel has undergone a commitment update, then in order to // prove to the remote party our knowledge of their prior commitment @@ -3466,7 +3469,7 @@ func (lc *LightningChannel) ChanSyncMsg() (*lnwire.ChannelReestablish, error) { // remote party sent. var lastCommitSecret [32]byte if remoteChainTipHeight != 0 { - remoteSecret, err := lc.channelState.RevocationStore.LookUp( + remoteSecret, err := c.RevocationStore.LookUp( remoteChainTipHeight - 1, ) if err != nil { @@ -3477,7 +3480,7 @@ func (lc *LightningChannel) ChanSyncMsg() (*lnwire.ChannelReestablish, error) { // Additionally, we'll send over the current unrevoked commitment on // our local commitment transaction. - currentCommitSecret, err := lc.channelState.RevocationProducer.AtIndex( + currentCommitSecret, err := c.RevocationProducer.AtIndex( localHeight, ) if err != nil { @@ -3486,7 +3489,7 @@ func (lc *LightningChannel) ChanSyncMsg() (*lnwire.ChannelReestablish, error) { return &lnwire.ChannelReestablish{ ChanID: lnwire.NewChanIDFromOutPoint( - &lc.channelState.FundingOutpoint, + &c.FundingOutpoint, ), NextLocalCommitHeight: nextLocalCommitHeight, RemoteCommitTailHeight: remoteChainTipHeight, @@ -6188,7 +6191,7 @@ func (lc *LightningChannel) IsPending() bool { return lc.channelState.IsPending } -// State provides access to the channel's internal state for testing. +// State provides access to the channel's internal state. func (lc *LightningChannel) State() *channeldb.OpenChannel { return lc.channelState } diff --git a/lnwallet/channel_test.go b/lnwallet/channel_test.go index 4586d75e..24701f28 100644 --- a/lnwallet/channel_test.go +++ b/lnwallet/channel_test.go @@ -2433,7 +2433,7 @@ func assertNoChanSyncNeeded(t *testing.T, aliceChannel *LightningChannel, _, _, line, _ := runtime.Caller(1) - aliceChanSyncMsg, err := aliceChannel.ChanSyncMsg() + aliceChanSyncMsg, err := ChanSyncMsg(aliceChannel.channelState) if err != nil { t.Fatalf("line #%v: unable to produce chan sync msg: %v", line, err) @@ -2448,7 +2448,7 @@ func assertNoChanSyncNeeded(t *testing.T, aliceChannel *LightningChannel, "instead wants to send: %v", line, spew.Sdump(bobMsgsToSend)) } - bobChanSyncMsg, err := bobChannel.ChanSyncMsg() + bobChanSyncMsg, err := ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("line #%v: unable to produce chan sync msg: %v", line, err) @@ -2681,11 +2681,11 @@ func TestChanSyncOweCommitment(t *testing.T) { // Bob doesn't get this message so upon reconnection, they need to // synchronize. Alice should conclude that she owes Bob a commitment, // while Bob should think he's properly synchronized. - aliceSyncMsg, err := aliceChannel.ChanSyncMsg() + aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } - bobSyncMsg, err := bobChannel.ChanSyncMsg() + bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } @@ -2995,11 +2995,11 @@ func TestChanSyncOweRevocation(t *testing.T) { // If we fetch the channel sync messages at this state, then Alice // should report that she owes Bob a revocation message, while Bob // thinks they're fully in sync. - aliceSyncMsg, err := aliceChannel.ChanSyncMsg() + aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } - bobSyncMsg, err := bobChannel.ChanSyncMsg() + bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } @@ -3164,11 +3164,11 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) { // If we now attempt to resync, then Alice should conclude that she // doesn't need any further updates, while Bob concludes that he needs // to re-send both his revocation and commit sig message. - aliceSyncMsg, err := aliceChannel.ChanSyncMsg() + aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } - bobSyncMsg, err := bobChannel.ChanSyncMsg() + bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } @@ -3374,11 +3374,11 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) { // Now if we attempt to synchronize states at this point, Alice should // detect that she owes nothing, while Bob should re-send both his // RevokeAndAck as well as his commitment message. - aliceSyncMsg, err := aliceChannel.ChanSyncMsg() + aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } - bobSyncMsg, err := bobChannel.ChanSyncMsg() + bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } @@ -3573,11 +3573,11 @@ func TestChanSyncFailure(t *testing.T) { // assertLocalDataLoss checks that aliceOld and bobChannel detects that // Alice has lost state during sync. assertLocalDataLoss := func(aliceOld *LightningChannel) { - aliceSyncMsg, err := aliceOld.ChanSyncMsg() + aliceSyncMsg, err := ChanSyncMsg(aliceOld.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } - bobSyncMsg, err := bobChannel.ChanSyncMsg() + bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } @@ -3629,7 +3629,7 @@ func TestChanSyncFailure(t *testing.T) { // If we remove the recovery options from Bob's message, Alice cannot // tell if she lost state, since Bob might be lying. She still should // be able to detect that chains cannot be synced. - bobSyncMsg, err := bobChannel.ChanSyncMsg() + bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } @@ -3643,7 +3643,7 @@ func TestChanSyncFailure(t *testing.T) { // If Bob lies about the NextLocalCommitHeight, making it greater than // what Alice expect, she cannot tell for sure whether she lost state, // but should detect the desync. - bobSyncMsg, err = bobChannel.ChanSyncMsg() + bobSyncMsg, err = ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } @@ -3656,7 +3656,7 @@ func TestChanSyncFailure(t *testing.T) { // If Bob's NextLocalCommitHeight is lower than what Alice expects, Bob // probably lost state. - bobSyncMsg, err = bobChannel.ChanSyncMsg() + bobSyncMsg, err = ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } @@ -3669,7 +3669,7 @@ func TestChanSyncFailure(t *testing.T) { // If Alice and Bob's states are in sync, but Bob is sending the wrong // LocalUnrevokedCommitPoint, Alice should detect this. - bobSyncMsg, err = bobChannel.ChanSyncMsg() + bobSyncMsg, err = ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } @@ -3695,7 +3695,7 @@ func TestChanSyncFailure(t *testing.T) { // when there's a pending remote commit. halfAdvance() - bobSyncMsg, err = bobChannel.ChanSyncMsg() + bobSyncMsg, err = ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } @@ -3785,11 +3785,11 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) { // Bob doesn't get this message so upon reconnection, they need to // synchronize. Alice should conclude that she owes Bob a commitment, // while Bob should think he's properly synchronized. - aliceSyncMsg, err := aliceChannel.ChanSyncMsg() + aliceSyncMsg, err := ChanSyncMsg(aliceChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } - bobSyncMsg, err := bobChannel.ChanSyncMsg() + bobSyncMsg, err := ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("unable to produce chan sync msg: %v", err) } @@ -4015,11 +4015,11 @@ func TestChanSyncInvalidLastSecret(t *testing.T) { } // Next, we'll produce the ChanSync messages for both parties. - aliceChanSync, err := aliceChannel.ChanSyncMsg() + aliceChanSync, err := ChanSyncMsg(aliceChannel.channelState) if err != nil { t.Fatalf("unable to generate chan sync msg: %v", err) } - bobChanSync, err := bobChannel.ChanSyncMsg() + bobChanSync, err := ChanSyncMsg(bobChannel.channelState) if err != nil { t.Fatalf("unable to generate chan sync msg: %v", err) } From a9f93b29889a60295d344532fe7d47c1ef0d4031 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:45 +0100 Subject: [PATCH 11/18] contractcourt/chain_watcher: add channel sync message to CloseChannelSummary --- contractcourt/chain_watcher.go | 27 +++++++++++++++++++++++++++ lnwallet/channel.go | 9 +++++++++ 2 files changed, 36 insertions(+) diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index c183788f..2d60b782 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -522,6 +522,15 @@ func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDet LocalChanConfig: c.cfg.chanState.LocalChanCfg, } + // Attempt to add a channel sync message to the close summary. + chanSync, err := lnwallet.ChanSyncMsg(c.cfg.chanState) + if err != nil { + log.Errorf("ChannelPoint(%v): unable to create channel sync "+ + "message: %v", c.cfg.chanState.FundingOutpoint, err) + } else { + closeSummary.LastChanSyncMsg = chanSync + } + // Create a summary of all the information needed to handle the // cooperative closure. closeInfo := &CooperativeCloseInfo{ @@ -590,6 +599,15 @@ func (c *chainWatcher) dispatchLocalForceClose( closeSummary.TimeLockedBalance += htlcValue } + // Attempt to add a channel sync message to the close summary. + chanSync, err := lnwallet.ChanSyncMsg(c.cfg.chanState) + if err != nil { + log.Errorf("ChannelPoint(%v): unable to create channel sync "+ + "message: %v", c.cfg.chanState.FundingOutpoint, err) + } else { + closeSummary.LastChanSyncMsg = chanSync + } + // With the event processed, we'll now notify all subscribers of the // event. closeInfo := &LocalUnilateralCloseInfo{ @@ -749,6 +767,15 @@ func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail LocalChanConfig: c.cfg.chanState.LocalChanCfg, } + // Attempt to add a channel sync message to the close summary. + chanSync, err := lnwallet.ChanSyncMsg(c.cfg.chanState) + if err != nil { + log.Errorf("ChannelPoint(%v): unable to create channel sync "+ + "message: %v", c.cfg.chanState.FundingOutpoint, err) + } else { + closeSummary.LastChanSyncMsg = chanSync + } + if err := c.cfg.chanState.CloseChannel(&closeSummary); err != nil { return err } diff --git a/lnwallet/channel.go b/lnwallet/channel.go index d45ba296..ca6b2e10 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -5116,6 +5116,15 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer Signer, LocalChanConfig: chanState.LocalChanCfg, } + // Attempt to add a channel sync message to the close summary. + chanSync, err := ChanSyncMsg(chanState) + if err != nil { + walletLog.Errorf("ChannelPoint(%v): unable to create channel sync "+ + "message: %v", chanState.FundingOutpoint, err) + } else { + closeSummary.LastChanSyncMsg = chanSync + } + return &UnilateralCloseSummary{ SpendDetail: commitSpend, ChannelCloseSummary: closeSummary, From 45b132a95470153327c22c927216d4c0dc2e562a Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:46 +0100 Subject: [PATCH 12/18] channeldb/db: define FetchClosedChannelForID FetchClosedChannelForID is used to find the channel close summary given only a channel ID. --- channeldb/db.go | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/channeldb/db.go b/channeldb/db.go index a635db3b..e36dcf84 100644 --- a/channeldb/db.go +++ b/channeldb/db.go @@ -12,6 +12,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/coreos/bbolt" "github.com/go-errors/errors" + "github.com/lightningnetwork/lnd/lnwire" ) const ( @@ -616,6 +617,54 @@ func (d *DB) FetchClosedChannel(chanID *wire.OutPoint) (*ChannelCloseSummary, er return chanSummary, nil } +// FetchClosedChannelForID queries for a channel close summary using the +// channel ID of the channel in question. +func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) ( + *ChannelCloseSummary, error) { + + var chanSummary *ChannelCloseSummary + if err := d.View(func(tx *bolt.Tx) error { + closeBucket := tx.Bucket(closedChannelBucket) + if closeBucket == nil { + return ErrClosedChannelNotFound + } + + // The first 30 bytes of the channel ID and outpoint will be + // equal. + cursor := closeBucket.Cursor() + op, c := cursor.Seek(cid[:30]) + + // We scan over all possible candidates for this channel ID. + for ; op != nil && bytes.Compare(cid[:30], op[:30]) <= 0; op, c = cursor.Next() { + var outPoint wire.OutPoint + err := readOutpoint(bytes.NewReader(op), &outPoint) + if err != nil { + return err + } + + // If the found outpoint does not correspond to this + // channel ID, we continue. + if !cid.IsChanPoint(&outPoint) { + continue + } + + // Deserialize the close summary and return. + r := bytes.NewReader(c) + chanSummary, err = deserializeCloseChannelSummary(r) + if err != nil { + return err + } + + return nil + } + return ErrClosedChannelNotFound + }); err != nil { + return nil, err + } + + return chanSummary, nil +} + // MarkChanFullyClosed marks a channel as fully closed within the database. A // channel should be marked as fully closed if the channel was initially // cooperatively closed and it's reached a single confirmation, or after all From da3f515f122646b038e5e4ce682e1e881bd8bea6 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:46 +0100 Subject: [PATCH 13/18] channeldb/db_test: add TestFetchClosedChannelForID --- channeldb/channel_test.go | 4 +-- channeldb/db_test.go | 76 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+), 2 deletions(-) diff --git a/channeldb/channel_test.go b/channeldb/channel_test.go index 4252b721..97ab7d5b 100644 --- a/channeldb/channel_test.go +++ b/channeldb/channel_test.go @@ -248,10 +248,10 @@ func TestOpenChannelPutGetDelete(t *testing.T) { t.Parallel() cdb, cleanUp, err := makeTestDB() - defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) } + defer cleanUp() // Create the test channel state, then add an additional fake HTLC // before syncing to disk. @@ -368,10 +368,10 @@ func TestChannelStateTransition(t *testing.T) { t.Parallel() cdb, cleanUp, err := makeTestDB() - defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) } + defer cleanUp() // First create a minimal channel, then perform a full sync in order to // persist the data. diff --git a/channeldb/db_test.go b/channeldb/db_test.go index 872e6307..794b1fcf 100644 --- a/channeldb/db_test.go +++ b/channeldb/db_test.go @@ -5,6 +5,9 @@ import ( "os" "path/filepath" "testing" + + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/lnwire" ) func TestOpenWithCreate(t *testing.T) { @@ -71,3 +74,76 @@ func TestWipe(t *testing.T) { ErrNoClosedChannels, err) } } + +// TestFetchClosedChannelForID tests that we are able to properly retrieve a +// ChannelCloseSummary from the DB given a ChannelID. +func TestFetchClosedChannelForID(t *testing.T) { + t.Parallel() + + const numChans = 101 + + cdb, cleanUp, err := makeTestDB() + if err != nil { + t.Fatalf("unable to make test database: %v", err) + } + defer cleanUp() + + // Create the test channel state, that we will mutate the index of the + // funding point. + state, err := createTestChannelState(cdb) + if err != nil { + t.Fatalf("unable to create channel state: %v", err) + } + + // Now run through the number of channels, and modify the outpoint index + // to create new channel IDs. + for i := uint32(0); i < numChans; i++ { + // Save the open channel to disk. + state.FundingOutpoint.Index = i + if err := state.FullSync(); err != nil { + t.Fatalf("unable to save and serialize channel "+ + "state: %v", err) + } + + // Close the channel. To make sure we retrieve the correct + // summary later, we make them differ in the SettledBalance. + closeSummary := &ChannelCloseSummary{ + ChanPoint: state.FundingOutpoint, + RemotePub: state.IdentityPub, + SettledBalance: btcutil.Amount(500 + i), + } + if err := state.CloseChannel(closeSummary); err != nil { + t.Fatalf("unable to close channel: %v", err) + } + } + + // Now run though them all again and make sure we are able to retrieve + // summaries from the DB. + for i := uint32(0); i < numChans; i++ { + state.FundingOutpoint.Index = i + + // We calculate the ChannelID and use it to fetch the summary. + cid := lnwire.NewChanIDFromOutPoint(&state.FundingOutpoint) + fetchedSummary, err := cdb.FetchClosedChannelForID(cid) + if err != nil { + t.Fatalf("unable to fetch close summary: %v", err) + } + + // Make sure we retrieved the correct one by checking the + // SettledBalance. + if fetchedSummary.SettledBalance != btcutil.Amount(500+i) { + t.Fatalf("summaries don't match: expected %v got %v", + btcutil.Amount(500+i), + fetchedSummary.SettledBalance) + } + } + + // As a final test we make sure that we get ErrClosedChannelNotFound + // for a ChannelID we didn't add to the DB. + state.FundingOutpoint.Index++ + cid := lnwire.NewChanIDFromOutPoint(&state.FundingOutpoint) + _, err = cdb.FetchClosedChannelForID(cid) + if err != ErrClosedChannelNotFound { + t.Fatalf("expected ErrClosedChannelNotFound, instead got: %v", err) + } +} From 0dd7eed64ea7a6b6398503e57fae93018b1297c3 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:46 +0100 Subject: [PATCH 14/18] peer: define resendChanSyncMsg This method is used to fetch channel sync messages for closed channels from the db, and respond to the peer. --- peer.go | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 56 insertions(+), 1 deletion(-) diff --git a/peer.go b/peer.go index f8a541c9..90ecec10 100644 --- a/peer.go +++ b/peer.go @@ -874,7 +874,33 @@ func newChanMsgStream(p *peer, cid lnwire.ChannelID) *msgStream { // active goroutine dedicated to this channel. if chanLink == nil { link, err := p.server.htlcSwitch.GetLink(cid) - if err != nil { + switch { + + // If we failed to find the link in question, + // and the message received was a channel sync + // message, then this might be a peer trying to + // resync closed channel. In this case we'll + // try to resend our last channel sync message, + // such that the peer can recover funds from + // the closed channel. + case err != nil && isChanSyncMsg: + peerLog.Debugf("Unable to find "+ + "link(%v) to handle channel "+ + "sync, attempting to resend "+ + "last ChanSync message", cid) + + err := p.resendChanSyncMsg(cid) + if err != nil { + // TODO(halseth): send error to + // peer? + peerLog.Errorf( + "resend failed: %v", + err, + ) + } + return + + case err != nil: peerLog.Errorf("recv'd update for "+ "unknown channel %v from %v: "+ "%v", cid, p, err) @@ -2144,6 +2170,35 @@ func (p *peer) sendInitMsg() error { return p.writeMessage(msg) } +// resendChanSyncMsg will attempt to find a channel sync message for the closed +// channel and resend it to our peer. +func (p *peer) resendChanSyncMsg(cid lnwire.ChannelID) error { + // Check if we have any channel sync messages stored for this channel. + c, err := p.server.chanDB.FetchClosedChannelForID(cid) + if err != nil { + return fmt.Errorf("unable to fetch channel sync messages for "+ + "peer %v: %v", p, err) + } + + if c.LastChanSyncMsg == nil { + return fmt.Errorf("no chan sync message stored for channel %v", + cid) + } + + peerLog.Debugf("Re-sending channel sync message for channel %v to "+ + "peer %v", cid, p) + + if err := p.SendMessage(true, c.LastChanSyncMsg); err != nil { + return fmt.Errorf("Failed resending channel sync "+ + "message to peer %v: %v", p, err) + } + + peerLog.Debugf("Re-sent channel sync message for channel %v to peer "+ + "%v", cid, p) + + return nil +} + // SendMessage sends a variadic number of message to remote peer. The first // argument denotes if the method should block until the message has been sent // to the remote peer. From 9e81b1fe536d750786a1b723f7b14e9b56a83c7f Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:46 +0100 Subject: [PATCH 15/18] chain_watcher: poll for commit point in case of failure We pool the database for the channel commit point with an exponential backoff. This is meant to handle the case where we are in process of handling a channel sync, and the case where we detect a channel close and must wait for the peer to come online to start channel sync before we can proceed. --- contractcourt/chain_watcher.go | 49 ++++++++++++++++++++++++++++------ 1 file changed, 41 insertions(+), 8 deletions(-) diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index 2d60b782..1bc1ee18 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -4,6 +4,7 @@ import ( "fmt" "sync" "sync/atomic" + "time" "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/chaincfg" @@ -16,6 +17,16 @@ import ( "github.com/lightningnetwork/lnd/lnwallet" ) +const ( + // minCommitPointPollTimeout is the minimum time we'll wait before + // polling the database for a channel's commitpoint. + minCommitPointPollTimeout = 1 * time.Second + + // maxCommitPointPollTimeout is the maximum time we'll wait before + // polling the database for a channel's commitpoint. + maxCommitPointPollTimeout = 10 * time.Minute +) + // LocalUnilateralCloseInfo encapsulates all the informnation we need to act // on a local force close that gets confirmed. type LocalUnilateralCloseInfo struct { @@ -402,16 +413,38 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { // If we are lucky, the remote peer sent us the correct // commitment point during channel sync, such that we - // can sweep our funds. - // TODO(halseth): must handle the case where we haven't - // yet processed the chan sync message. - commitPoint, err := c.cfg.chanState.DataLossCommitPoint() - if err != nil { + // can sweep our funds. If we cannot find the commit + // point, there's not much we can do other than wait + // for us to retrieve it. We will attempt to retrieve + // it from the peer each time we connect to it. + // TODO(halseth): actively initiate re-connection to + // the peer? + var commitPoint *btcec.PublicKey + backoff := minCommitPointPollTimeout + for { + commitPoint, err = c.cfg.chanState.DataLossCommitPoint() + if err == nil { + break + } + log.Errorf("Unable to retrieve commitment "+ "point for channel(%v) with lost "+ - "state: %v", - c.cfg.chanState.FundingOutpoint, err) - return + "state: %v. Retrying in %v.", + c.cfg.chanState.FundingOutpoint, + err, backoff) + + select { + // Wait before retrying, with an exponential + // backoff. + case <-time.After(backoff): + backoff = 2 * backoff + if backoff > maxCommitPointPollTimeout { + backoff = maxCommitPointPollTimeout + } + + case <-c.quit: + return + } } log.Infof("Recovered commit point(%x) for "+ From 0c4948b40bd09758bcafa0ee9e86fdb34c6d84bb Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:46 +0100 Subject: [PATCH 16/18] lnd test: add offline scenario to testDataLossProtection This adds the scenario where a channel is closed while the node is offline, the node loses state and comes back online. In this case the node should attempt to resync the channel, and the peer should resend a channel sync message for the closed channel, such that the node can retrieve its funds. --- lnd_test.go | 85 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 85 insertions(+) diff --git a/lnd_test.go b/lnd_test.go index a3a093e5..71938818 100644 --- a/lnd_test.go +++ b/lnd_test.go @@ -7272,6 +7272,91 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { assertNodeNumChannels(t, ctxb, dave, 0) assertNodeNumChannels(t, ctxb, carol, 0) + + // As a second part of this test, we will test the the scenario where a + // channel is closed while Dave is offline, loses his state and comes + // back online. In this case the node should attempt to resync the + // channel, and the peer should resend a channel sync message for the + // closed channel, such that Dave can retrieve his funds. + // + // We start by letting Dave time travel back to an outdated state. + restartDave, chanPoint2, daveStartingBalance, err := timeTravel(dave) + if err != nil { + t.Fatalf("unable to time travel eve: %v", err) + } + + carolBalResp, err = carol.WalletBalance(ctxb, balReq) + if err != nil { + t.Fatalf("unable to get carol's balance: %v", err) + } + carolStartingBalance = carolBalResp.ConfirmedBalance + + // Now let Carol force close the channel while Dave is offline. + ctxt, _ := context.WithTimeout(ctxb, timeout) + closeChannelAndAssert(ctxt, t, net, carol, chanPoint2, true) + + // Wait for the channel to be marked pending force close. + ctxt, _ = context.WithTimeout(ctxb, timeout) + err = waitForChannelPendingForceClose(ctxt, carol, chanPoint2) + if err != nil { + t.Fatalf("channel not pending force close: %v", err) + } + + // Mine enough blocks for Carol to sweep her funds. + mineBlocks(t, net, defaultCSV) + + carolSweep, err = waitForTxInMempool(net.Miner.Node, 15*time.Second) + if err != nil { + t.Fatalf("unable to find Carol's sweep tx in mempool: %v", err) + } + block = mineBlocks(t, net, 1)[0] + assertTxInBlock(t, block, carolSweep) + + // Now the channel should be fully closed also from Carol's POV. + assertNumPendingChannels(t, carol, 0, 0) + + // Make sure Carol got her balance back. + carolBalResp, err = carol.WalletBalance(ctxb, balReq) + if err != nil { + t.Fatalf("unable to get carol's balance: %v", err) + } + carolBalance = carolBalResp.ConfirmedBalance + if carolBalance <= carolStartingBalance { + t.Fatalf("expected carol to have balance above %d, "+ + "instead had %v", carolStartingBalance, + carolBalance) + } + + assertNodeNumChannels(t, ctxb, carol, 0) + + // When Dave comes online, he will reconnect to Carol, try to resync + // the channel, but it will already be closed. Carol should resend the + // information Dave needs to sweep his funds. + if err := restartDave(); err != nil { + t.Fatalf("unabel to restart Eve: %v", err) + } + + // Dave should sweep his funds. + _, err = waitForTxInMempool(net.Miner.Node, 15*time.Second) + if err != nil { + t.Fatalf("unable to find Dave's sweep tx in mempool: %v", err) + } + + // Mine a block to confirm the sweep, and make sure Dave got his + // balance back. + mineBlocks(t, net, 1) + assertNodeNumChannels(t, ctxb, dave, 0) + + daveBalResp, err = dave.WalletBalance(ctxb, balReq) + if err != nil { + t.Fatalf("unable to get dave's balance: %v", err) + } + + daveBalance = daveBalResp.ConfirmedBalance + if daveBalance <= daveStartingBalance { + t.Fatalf("expected dave to have balance above %d, intead had %v", + daveStartingBalance, daveBalance) + } } // assertNodeNumChannels polls the provided node's list channels rpc until it From a9bd6100ffc2278d880175b53f9456b94c64f415 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Tue, 20 Nov 2018 15:09:46 +0100 Subject: [PATCH 17/18] htlcswitch/link: remove handled TODO --- htlcswitch/link.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/htlcswitch/link.go b/htlcswitch/link.go index 3ee614c7..9bda1e65 100644 --- a/htlcswitch/link.go +++ b/htlcswitch/link.go @@ -789,9 +789,6 @@ func (l *channelLink) htlcManager() { // We failed syncing the commit chains, probably // because the remote has lost state. We should force // close the channel. - // TODO(halseth): store sent chanSync message to - // database, such that it can be resent to peer in case - // it tries to sync the channel again. case err == lnwallet.ErrCommitSyncRemoteDataLoss: fallthrough From b1a35fc8f40be6aee52205c60810ef7d46d12fc9 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 21 Nov 2018 13:36:42 +0100 Subject: [PATCH 18/18] channeldb/migrations_test: add TestMigrateOptionalChannelCloseSummaryFields --- channeldb/migrations_test.go | 279 +++++++++++++++++++++++++++++++++++ 1 file changed, 279 insertions(+) diff --git a/channeldb/migrations_test.go b/channeldb/migrations_test.go index 6fbefd0c..02c61b22 100644 --- a/channeldb/migrations_test.go +++ b/channeldb/migrations_test.go @@ -1,11 +1,17 @@ package channeldb import ( + "bytes" "crypto/sha256" "encoding/binary" + "fmt" + "reflect" "testing" + "github.com/btcsuite/btcutil" "github.com/coreos/bbolt" + "github.com/davecgh/go-spew/spew" + "github.com/go-errors/errors" ) // TestPaymentStatusesMigration checks that already completed payments will have @@ -189,3 +195,276 @@ func TestPaymentStatusesMigration(t *testing.T) { paymentStatusesMigration, false) } + +// TestMigrateOptionalChannelCloseSummaryFields properly converts a +// ChannelCloseSummary to the v7 format, where optional fields have their +// presence indicated with boolean markers. +func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) { + t.Parallel() + + chanState, err := createTestChannelState(nil) + if err != nil { + t.Fatalf("unable to create channel state: %v", err) + } + + var chanPointBuf bytes.Buffer + err = writeOutpoint(&chanPointBuf, &chanState.FundingOutpoint) + if err != nil { + t.Fatalf("unable to write outpoint: %v", err) + } + + chanID := chanPointBuf.Bytes() + + testCases := []struct { + closeSummary *ChannelCloseSummary + oldSerialization func(c *ChannelCloseSummary) []byte + }{ + { + // A close summary where none of the new fields are + // set. + closeSummary: &ChannelCloseSummary{ + ChanPoint: chanState.FundingOutpoint, + ShortChanID: chanState.ShortChanID(), + ChainHash: chanState.ChainHash, + ClosingTXID: testTx.TxHash(), + CloseHeight: 100, + RemotePub: chanState.IdentityPub, + Capacity: chanState.Capacity, + SettledBalance: btcutil.Amount(50000), + CloseType: RemoteForceClose, + IsPending: true, + + // The last fields will be unset. + RemoteCurrentRevocation: nil, + LocalChanConfig: ChannelConfig{}, + RemoteNextRevocation: nil, + }, + + // In the old format the last field written is the + // IsPendingField. It should be converted by adding an + // extra boolean marker at the end to indicate that the + // remaining fields are not there. + oldSerialization: func(cs *ChannelCloseSummary) []byte { + var buf bytes.Buffer + err := WriteElements(&buf, cs.ChanPoint, + cs.ShortChanID, cs.ChainHash, + cs.ClosingTXID, cs.CloseHeight, + cs.RemotePub, cs.Capacity, + cs.SettledBalance, cs.TimeLockedBalance, + cs.CloseType, cs.IsPending, + ) + if err != nil { + t.Fatal(err) + } + + // For the old format, these are all the fields + // that are written. + return buf.Bytes() + }, + }, + { + // A close summary where the new fields are present, + // but the optional RemoteNextRevocation field is not + // set. + closeSummary: &ChannelCloseSummary{ + ChanPoint: chanState.FundingOutpoint, + ShortChanID: chanState.ShortChanID(), + ChainHash: chanState.ChainHash, + ClosingTXID: testTx.TxHash(), + CloseHeight: 100, + RemotePub: chanState.IdentityPub, + Capacity: chanState.Capacity, + SettledBalance: btcutil.Amount(50000), + CloseType: RemoteForceClose, + IsPending: true, + RemoteCurrentRevocation: chanState.RemoteCurrentRevocation, + LocalChanConfig: chanState.LocalChanCfg, + + // RemoteNextRevocation is optional, and here + // it is not set. + RemoteNextRevocation: nil, + }, + + // In the old format the last field written is the + // LocalChanConfig. This indicates that the optional + // RemoteNextRevocation field is not present. It should + // be converted by adding boolean markers for all these + // fields. + oldSerialization: func(cs *ChannelCloseSummary) []byte { + var buf bytes.Buffer + err := WriteElements(&buf, cs.ChanPoint, + cs.ShortChanID, cs.ChainHash, + cs.ClosingTXID, cs.CloseHeight, + cs.RemotePub, cs.Capacity, + cs.SettledBalance, cs.TimeLockedBalance, + cs.CloseType, cs.IsPending, + ) + if err != nil { + t.Fatal(err) + } + + err = WriteElements(&buf, cs.RemoteCurrentRevocation) + if err != nil { + t.Fatal(err) + } + + err = writeChanConfig(&buf, &cs.LocalChanConfig) + if err != nil { + t.Fatal(err) + } + + // RemoteNextRevocation is not written. + return buf.Bytes() + }, + }, + { + // A close summary where all fields are present. + closeSummary: &ChannelCloseSummary{ + ChanPoint: chanState.FundingOutpoint, + ShortChanID: chanState.ShortChanID(), + ChainHash: chanState.ChainHash, + ClosingTXID: testTx.TxHash(), + CloseHeight: 100, + RemotePub: chanState.IdentityPub, + Capacity: chanState.Capacity, + SettledBalance: btcutil.Amount(50000), + CloseType: RemoteForceClose, + IsPending: true, + RemoteCurrentRevocation: chanState.RemoteCurrentRevocation, + LocalChanConfig: chanState.LocalChanCfg, + + // RemoteNextRevocation is optional, and in + // this case we set it. + RemoteNextRevocation: chanState.RemoteNextRevocation, + }, + + // In the old format all the fields are written. It + // should be converted by adding boolean markers for + // all these fields. + oldSerialization: func(cs *ChannelCloseSummary) []byte { + var buf bytes.Buffer + err := WriteElements(&buf, cs.ChanPoint, + cs.ShortChanID, cs.ChainHash, + cs.ClosingTXID, cs.CloseHeight, + cs.RemotePub, cs.Capacity, + cs.SettledBalance, cs.TimeLockedBalance, + cs.CloseType, cs.IsPending, + ) + if err != nil { + t.Fatal(err) + } + + err = WriteElements(&buf, cs.RemoteCurrentRevocation) + if err != nil { + t.Fatal(err) + } + + err = writeChanConfig(&buf, &cs.LocalChanConfig) + if err != nil { + t.Fatal(err) + } + + err = WriteElements(&buf, cs.RemoteNextRevocation) + if err != nil { + t.Fatal(err) + } + + return buf.Bytes() + }, + }, + } + + for _, test := range testCases { + + // Before the migration we must add the old format to the DB. + beforeMigrationFunc := func(d *DB) { + + // Get the old serialization format for this test's + // close summary, and it to the closed channel bucket. + old := test.oldSerialization(test.closeSummary) + err = d.Update(func(tx *bolt.Tx) error { + closedChanBucket, err := tx.CreateBucketIfNotExists( + closedChannelBucket, + ) + if err != nil { + return err + } + return closedChanBucket.Put(chanID, old) + }) + if err != nil { + t.Fatalf("unable to add old serialization: %v", + err) + } + } + + // After the migration it should be found in the new format. + afterMigrationFunc := func(d *DB) { + meta, err := d.FetchMeta(nil) + if err != nil { + t.Fatal(err) + } + + if meta.DbVersionNumber != 1 { + t.Fatal("migration wasn't applied") + } + + // We generate the new serialized version, to check + // against what is found in the DB. + var b bytes.Buffer + err = serializeChannelCloseSummary(&b, test.closeSummary) + if err != nil { + t.Fatalf("unable to serialize: %v", err) + } + newSerialization := b.Bytes() + + var dbSummary []byte + err = d.View(func(tx *bolt.Tx) error { + closedChanBucket := tx.Bucket(closedChannelBucket) + if closedChanBucket == nil { + return errors.New("unable to find bucket") + } + + // Get the serialized verision from the DB and + // make sure it matches what we expected. + dbSummary = closedChanBucket.Get(chanID) + if !bytes.Equal(dbSummary, newSerialization) { + return fmt.Errorf("unexpected new " + + "serialization") + } + return nil + }) + if err != nil { + t.Fatalf("unable to view DB: %v", err) + } + + // Finally we fetch the deserialized summary from the + // DB and check that it is equal to our original one. + dbChannels, err := d.FetchClosedChannels(false) + if err != nil { + t.Fatalf("unable to fetch closed channels: %v", + err) + } + + if len(dbChannels) != 1 { + t.Fatalf("expected 1 closed channels, found %v", + len(dbChannels)) + } + + dbChan := dbChannels[0] + if !reflect.DeepEqual(dbChan, test.closeSummary) { + dbChan.RemotePub.Curve = nil + test.closeSummary.RemotePub.Curve = nil + t.Fatalf("not equal: %v vs %v", + spew.Sdump(dbChan), + spew.Sdump(test.closeSummary)) + } + + } + + applyMigration(t, + beforeMigrationFunc, + afterMigrationFunc, + migrateOptionalChannelCloseSummaryFields, + false) + } +}