From 538f3baac8102b238ff5799a4a0ae8d7e61dbfad Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 4 Mar 2020 13:21:27 +0100 Subject: [PATCH 1/9] itest: print wrong balance in correct format --- lntest/itest/lnd_test.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index d84ed114..a46ef329 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -1053,14 +1053,19 @@ func basicChannelFundingTest(t *harnessTest, net *lntest.NetworkHarness, return nil, nil, nil, fmt.Errorf("unable to get bobs's "+ "balance: %v", err) } - if aliceBal.Balance != int64(chanAmt-pushAmt-calcStaticFee(0)) { + + expBalanceAlice := chanAmt - pushAmt - calcStaticFee(0) + aliceBalance := btcutil.Amount(aliceBal.Balance) + if aliceBalance != expBalanceAlice { return nil, nil, nil, fmt.Errorf("alice's balance is "+ "incorrect: expected %v got %v", - chanAmt-pushAmt-calcStaticFee(0), aliceBal) + expBalanceAlice, aliceBalance) } - if bobBal.Balance != int64(pushAmt) { + + bobBalance := btcutil.Amount(bobBal.Balance) + if bobBalance != pushAmt { return nil, nil, nil, fmt.Errorf("bob's balance is incorrect: "+ - "expected %v got %v", pushAmt, bobBal.Balance) + "expected %v got %v", pushAmt, bobBalance) } req := &lnrpc.ListChannelsRequest{} From 651bb09c251d8aa2b0ca353954e8527b0799bc0c Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 4 Mar 2020 13:21:27 +0100 Subject: [PATCH 2/9] itest: extract channel force closure test into subtest To make it possible to run the for close test for multiple commit types, we extract it into a subtest, where the two nodes get passed in. --- lntest/itest/lnd_test.go | 75 +++++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 32 deletions(-) diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index a46ef329..193dc716 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -2983,6 +2983,25 @@ func padCLTV(cltv uint32) uint32 { // // TODO(roasbeef): also add an unsettled HTLC before force closing. func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { + t.t.Run("channelForceClosure", func(t *testing.T) { + ht := newHarnessTest(t, net) + + // Since we'd like to test failure scenarios with outstanding + // htlcs, we'll introduce another node into our test network: + // Carol. + carol, err := net.NewNode("Carol", []string{"--hodl.exit-settle"}) + if err != nil { + t.Fatalf("unable to create new nodes: %v", err) + } + defer shutdownAndAssert(net, ht, carol) + + channelForceClosureTest(net, ht, net.Alice, carol) + }) +} + +func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, + alice, carol *lntest.HarnessNode) { + ctxb := context.Background() const ( @@ -2996,18 +3015,10 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // instead, or make delay a param defaultCLTV := uint32(lnd.DefaultBitcoinTimeLockDelta) - // Since we'd like to test failure scenarios with outstanding htlcs, - // we'll introduce another node into our test network: Carol. - carol, err := net.NewNode("Carol", []string{"--hodl.exit-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) - } - defer shutdownAndAssert(net, t, carol) - // We must let Alice have an open channel before she can send a node // announcement, so we open a channel with Carol, ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Alice, carol); err != nil { + if err := net.ConnectNodes(ctxt, alice, carol); err != nil { t.Fatalf("unable to connect alice to carol: %v", err) } @@ -3025,7 +3036,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, + ctxt, t, net, alice, carol, lntest.OpenChannelParams{ Amt: chanAmt, PushAmt: pushAmt, @@ -3035,7 +3046,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Wait for Alice and Carol to receive the channel edge from the // funding manager. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint) + err = alice.WaitForNetworkChannelOpen(ctxt, chanPoint) if err != nil { t.Fatalf("alice didn't see the alice->carol channel before "+ "timeout: %v", err) @@ -3052,7 +3063,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { ctx, cancel := context.WithCancel(ctxb) defer cancel() - alicePayStream, err := net.Alice.SendPayment(ctx) + alicePayStream, err := alice.SendPayment(ctx) if err != nil { t.Fatalf("unable to create payment stream for alice: %v", err) } @@ -3072,7 +3083,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Once the HTLC has cleared, all the nodes n our mini network should // show that the HTLC has been locked in. - nodes := []*lntest.HarnessNode{net.Alice, carol} + nodes := []*lntest.HarnessNode{alice, carol} var predErr error err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, numInvoices) @@ -3102,7 +3113,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { ) ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - aliceChan, err := getChanInfo(ctxt, net.Alice) + aliceChan, err := getChanInfo(ctxt, alice) if err != nil { t.Fatalf("unable to get alice's channel info: %v", err) } @@ -3115,7 +3126,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // the commitment transaction was immediately broadcast in order to // fulfill the force closure request. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - _, closingTxID, err := net.CloseChannel(ctxt, net.Alice, chanPoint, true) + _, closingTxID, err := net.CloseChannel(ctxt, alice, chanPoint, true) if err != nil { t.Fatalf("unable to execute force channel closure: %v", err) } @@ -3124,7 +3135,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // PendingChannels RPC under the waiting close section. pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels(ctxt, pendingChansRequest) + pendingChanResp, err := alice.PendingChannels(ctxt, pendingChansRequest) if err != nil { t.Fatalf("unable to query for pending channels: %v", err) } @@ -3160,7 +3171,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // when the system comes back on line. This restart tests state // persistence at the beginning of the process, when the commitment // transaction has been broadcast but not yet confirmed in a block. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3179,7 +3190,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // marked as force closed. err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( + pendingChanResp, err := alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -3232,7 +3243,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // force close commitment transaction have been persisted once the // transaction has been confirmed, but before the outputs are spendable // (the "kindergarten" bucket.) - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3255,7 +3266,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // The following restart checks to ensure that outputs in the // kindergarten bucket are persisted while waiting for the required // number of confirmations to be reported. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3263,7 +3274,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // channels with her funds still in limbo. err = wait.NoError(func() error { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( + pendingChanResp, err := alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -3341,7 +3352,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Restart Alice to ensure that she resumes watching the finalized // commitment sweep txid. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3369,7 +3380,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Now that the commit output has been fully swept, check to see // that the channel remains open for the pending htlc outputs. ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( + pendingChanResp, err := alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -3432,7 +3443,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // We now restart Alice, to ensure that she will broadcast the presigned // htlc timeout txns after the delay expires after experiencing a while // waiting for the htlc outputs to incubate. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3440,7 +3451,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // channels with one pending HTLC. err = wait.NoError(func() error { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( + pendingChanResp, err := alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -3533,7 +3544,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // With the htlc timeout txns still in the mempool, we restart Alice to // verify that she can resume watching the htlc txns she broadcasted // before crashing. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3547,7 +3558,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Alice is restarted here to ensure that she promptly moved the crib // outputs to the kindergarten bucket after the htlc timeout txns were // confirmed. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3559,7 +3570,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Restart Alice to ensure that she can recover from a failure before // having graduated the htlc outputs in the kindergarten bucket. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3568,7 +3579,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // as pending force closed. err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = net.Alice.PendingChannels( + pendingChanResp, err = alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -3660,7 +3671,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // The following restart checks to ensure that the nursery store is // storing the txid of the previously broadcast htlc sweep txn, and that // it begins watching that txid after restarting. - if err := net.RestartNode(net.Alice, nil); err != nil { + if err := net.RestartNode(alice, nil); err != nil { t.Fatalf("Node restart failed: %v", err) } @@ -3669,7 +3680,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // as pending force closed. err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( + pendingChanResp, err := alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -3718,7 +3729,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // up within the pending channels RPC. err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Alice.PendingChannels( + pendingChanResp, err := alice.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { From d81c8bbda76f4902eb8a4ef3f696348497459b2b Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 4 Mar 2020 13:21:27 +0100 Subject: [PATCH 3/9] itest: run force closure test for all commit types Now that the force closure test has been extracted, spin up new nodes for each commit type, and ensure the test succeed for all types. --- lntest/itest/lnd_test.go | 59 ++++++++++++++++++++++++++++++++-------- 1 file changed, 47 insertions(+), 12 deletions(-) diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index 193dc716..a25475fa 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -2983,20 +2983,55 @@ func padCLTV(cltv uint32) uint32 { // // TODO(roasbeef): also add an unsettled HTLC before force closing. func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { - t.t.Run("channelForceClosure", func(t *testing.T) { - ht := newHarnessTest(t, net) + // We'll test the scenario for some of the commitment types, to ensure + // outputs can be swept. + commitTypes := []commitType{ + commitTypeLegacy, + } - // Since we'd like to test failure scenarios with outstanding - // htlcs, we'll introduce another node into our test network: - // Carol. - carol, err := net.NewNode("Carol", []string{"--hodl.exit-settle"}) - if err != nil { - t.Fatalf("unable to create new nodes: %v", err) + for _, channelType := range commitTypes { + testName := fmt.Sprintf("committype=%v", channelType) + + success := t.t.Run(testName, func(t *testing.T) { + ht := newHarnessTest(t, net) + + args := channelType.Args() + alice, err := net.NewNode("Alice", args) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, ht, alice) + + // Since we'd like to test failure scenarios with + // outstanding htlcs, we'll introduce another node into + // our test network: Carol. + carolArgs := []string{"--hodl.exit-settle"} + carolArgs = append(carolArgs, args...) + carol, err := net.NewNode("Carol", carolArgs) + if err != nil { + t.Fatalf("unable to create new nodes: %v", err) + } + defer shutdownAndAssert(net, ht, carol) + + // Each time, we'll send Alice new set of coins in + // order to fund the channel. + ctxt, _ := context.WithTimeout( + context.Background(), defaultTimeout, + ) + err = net.SendCoins( + ctxt, btcutil.SatoshiPerBitcoin, alice, + ) + if err != nil { + t.Fatalf("unable to send coins to Alice: %v", + err) + } + + channelForceClosureTest(net, ht, alice, carol) + }) + if !success { + return } - defer shutdownAndAssert(net, ht, carol) - - channelForceClosureTest(net, ht, net.Alice, carol) - }) + } } func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, From 4b5d91d24d661164237a4085915db20b3dd77bff Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 4 Mar 2020 13:21:27 +0100 Subject: [PATCH 4/9] itest: move multi hop tests to own files PURE CODE MOVE. --- ...d_multi-hop_htlc_local_chain_claim_test.go | 73 ++ .../lnd_multi-hop_htlc_local_timeout_test.go | 250 ++++++ ..._force_close_on_chain_htlc_timeout_test.go | 278 ++++++ ..._force_close_on_chain_htlc_timeout_test.go | 236 ++++++ lntest/itest/lnd_test.go | 790 ------------------ 5 files changed, 837 insertions(+), 790 deletions(-) create mode 100644 lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go create mode 100644 lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go create mode 100644 lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go diff --git a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go index 123359dc..dd84105c 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -498,3 +498,76 @@ func checkPaymentStatus(ctxt context.Context, node *lntest.HarnessNode, return nil } + +func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, + carolHodl bool) (*lnrpc.ChannelPoint, *lnrpc.ChannelPoint, + *lntest.HarnessNode) { + + ctxb := context.Background() + + // We'll start the test by creating a channel between Alice and Bob, + // which will act as the first leg for out multi-hop HTLC. + const chanAmt = 1000000 + ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) + aliceChanPoint := openChannelAndAssert( + ctxt, t, net, net.Alice, net.Bob, + lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err := net.Alice.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) + if err != nil { + t.Fatalf("alice didn't report channel: %v", err) + } + + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = net.Bob.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) + if err != nil { + t.Fatalf("bob didn't report channel: %v", err) + } + + // Next, we'll create a new node "carol" and have Bob connect to her. If + // the carolHodl flag is set, we'll make carol always hold onto the + // HTLC, this way it'll force Bob to go to chain to resolve the HTLC. + carolFlags := []string{} + if carolHodl { + carolFlags = append(carolFlags, "--hodl.exit-settle") + } + carol, err := net.NewNode("Carol", carolFlags) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, net.Bob, carol); err != nil { + t.Fatalf("unable to connect bob to carol: %v", err) + } + + // We'll then create a channel from Bob to Carol. After this channel is + // open, our topology looks like: A -> B -> C. + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) + bobChanPoint := openChannelAndAssert( + ctxt, t, net, net.Bob, carol, + lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = net.Bob.WaitForNetworkChannelOpen(ctxt, bobChanPoint) + if err != nil { + t.Fatalf("alice didn't report channel: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = carol.WaitForNetworkChannelOpen(ctxt, bobChanPoint) + if err != nil { + t.Fatalf("bob didn't report channel: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = net.Alice.WaitForNetworkChannelOpen(ctxt, bobChanPoint) + if err != nil { + t.Fatalf("bob didn't report channel: %v", err) + } + + return aliceChanPoint, bobChanPoint, carol +} diff --git a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go new file mode 100644 index 00000000..cb32a0f9 --- /dev/null +++ b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go @@ -0,0 +1,250 @@ +// +build rpctest + +package itest + +import ( + "context" + "fmt" + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" +) + +// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the +// outgoing HTLC is about to time out, then we'll go to chain in order to claim +// it. Any dust HTLC's should be immediately canceled backwards. Once the +// timeout has been reached, then we should sweep it on-chain, and cancel the +// HTLC backwards. +func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + + // First, we'll create a three hop network: Alice -> Bob -> Carol, with + // Carol refusing to actually settle or directly cancel any HTLC's + // self. + aliceChanPoint, bobChanPoint, carol := + createThreeHopNetwork(t, net, true) + + // Clean up carol's node when the test finishes. + defer shutdownAndAssert(net, t, carol) + + time.Sleep(time.Second * 1) + + // Now that our channels are set up, we'll send two HTLC's from Alice + // to Carol. The first HTLC will be universally considered "dust", + // while the second will be a proper fully valued HTLC. + const ( + dustHtlcAmt = btcutil.Amount(100) + htlcAmt = btcutil.Amount(30000) + finalCltvDelta = 40 + ) + + ctx, cancel := context.WithCancel(ctxb) + defer cancel() + + alicePayStream, err := net.Alice.SendPayment(ctx) + if err != nil { + t.Fatalf("unable to create payment stream for alice: %v", err) + } + + // We'll create two random payment hashes unknown to carol, then send + // each of them by manually specifying the HTLC details. + carolPubKey := carol.PubKey[:] + dustPayHash := makeFakePayHash(t) + payHash := makeFakePayHash(t) + err = alicePayStream.Send(&lnrpc.SendRequest{ + Dest: carolPubKey, + Amt: int64(dustHtlcAmt), + PaymentHash: dustPayHash, + FinalCltvDelta: finalCltvDelta, + }) + if err != nil { + t.Fatalf("unable to send alice htlc: %v", err) + } + err = alicePayStream.Send(&lnrpc.SendRequest{ + Dest: carolPubKey, + Amt: int64(htlcAmt), + PaymentHash: payHash, + FinalCltvDelta: finalCltvDelta, + }) + if err != nil { + t.Fatalf("unable to send alice htlc: %v", err) + } + + // Verify that all nodes in the path now have two HTLC's with the + // proper parameters. + var predErr error + nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + err = wait.Predicate(func() bool { + predErr = assertActiveHtlcs(nodes, dustPayHash, payHash) + if predErr != nil { + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("htlc mismatch: %v", predErr) + } + + // We'll now mine enough blocks to trigger Bob's broadcast of his + // commitment transaction due to the fact that the HTLC is about to + // timeout. With the default outgoing broadcast delta of zero, this will + // be the same height as the htlc expiry height. + numBlocks := padCLTV( + uint32(finalCltvDelta - lnd.DefaultOutgoingBroadcastDelta), + ) + if _, err := net.Miner.Node.Generate(numBlocks); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + // Bob's force close transaction should now be found in the mempool. + bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) + if err != nil { + t.Fatalf("unable to get txid: %v", err) + } + closeTxid, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find closing txid: %v", err) + } + assertSpendingTxInMempool( + t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ + Hash: *bobFundingTxid, + Index: bobChanPoint.OutputIndex, + }, + ) + + // Mine a block to confirm the closing transaction. + mineBlocks(t, net, 1, 1) + + // At this point, Bob should have canceled backwards the dust HTLC + // that we sent earlier. This means Alice should now only have a single + // HTLC on her channel. + nodes = []*lntest.HarnessNode{net.Alice} + err = wait.Predicate(func() bool { + predErr = assertActiveHtlcs(nodes, payHash) + if predErr != nil { + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("htlc mismatch: %v", predErr) + } + + // With the closing transaction confirmed, we should expect Bob's HTLC + // timeout transaction to be broadcast due to the expiry being reached. + htlcTimeout, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's htlc timeout tx: %v", err) + } + + // We'll mine the remaining blocks in order to generate the sweep + // transaction of Bob's commitment output. + mineBlocks(t, net, defaultCSV, 1) + assertSpendingTxInMempool( + t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ + Hash: *closeTxid, + Index: 1, + }, + ) + + // Bob's pending channel report should show that he has a commitment + // output awaiting sweeping, and also that there's an outgoing HTLC + // output pending. + pendingChansRequest := &lnrpc.PendingChannelsRequest{} + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := net.Bob.PendingChannels(ctxt, pendingChansRequest) + if err != nil { + t.Fatalf("unable to query for pending channels: %v", err) + } + + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + t.Fatalf("bob should have pending for close chan but doesn't") + } + forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] + if forceCloseChan.LimboBalance == 0 { + t.Fatalf("bob should have nonzero limbo balance instead "+ + "has: %v", forceCloseChan.LimboBalance) + } + if len(forceCloseChan.PendingHtlcs) == 0 { + t.Fatalf("bob should have pending htlc but doesn't") + } + + // Now we'll mine an additional block, which should confirm Bob's commit + // sweep. This block should also prompt Bob to broadcast their second + // layer sweep due to the CSV on the HTLC timeout output. + mineBlocks(t, net, 1, 1) + assertSpendingTxInMempool( + t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ + Hash: *htlcTimeout, + Index: 0, + }, + ) + + // The block should have confirmed Bob's HTLC timeout transaction. + // Therefore, at this point, there should be no active HTLC's on the + // commitment transaction from Alice -> Bob. + nodes = []*lntest.HarnessNode{net.Alice} + err = wait.Predicate(func() bool { + predErr = assertNumActiveHtlcs(nodes, 0) + if predErr != nil { + return false + } + return true + }, time.Second*15) + if err != nil { + t.Fatalf("alice's channel still has active htlc's: %v", predErr) + } + + // At this point, Bob should show that the pending HTLC has advanced to + // the second stage and is to be swept. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err = net.Bob.PendingChannels(ctxt, pendingChansRequest) + if err != nil { + t.Fatalf("unable to query for pending channels: %v", err) + } + forceCloseChan = pendingChanResp.PendingForceClosingChannels[0] + if forceCloseChan.PendingHtlcs[0].Stage != 2 { + t.Fatalf("bob's htlc should have advanced to the second stage: %v", err) + } + + // Next, we'll mine a final block that should confirm the second-layer + // sweeping transaction. + if _, err := net.Miner.Node.Generate(1); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + // Once this transaction has been confirmed, Bob should detect that he + // no longer has any pending channels. + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err = net.Bob.PendingChannels(ctxt, pendingChansRequest) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) != 0 { + predErr = fmt.Errorf("bob still has pending "+ + "channels but shouldn't: %v", + spew.Sdump(pendingChanResp)) + return false + } + + return true + + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) +} diff --git a/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go new file mode 100644 index 00000000..ea5bf3d7 --- /dev/null +++ b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go @@ -0,0 +1,278 @@ +// +build rpctest + +package itest + +import ( + "context" + "fmt" + "time" + + "github.com/btcsuite/btcutil" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" +) + +// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC +// scenario, if the node that extended the HTLC to the final node closes their +// commitment on-chain early, then it eventually recognizes this HTLC as one +// that's timed out. At this point, the node should timeout the HTLC, then +// cancel it backwards as normal. +func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, + t *harnessTest) { + ctxb := context.Background() + + // First, we'll create a three hop network: Alice -> Bob -> Carol, with + // Carol refusing to actually settle or directly cancel any HTLC's + // self. + aliceChanPoint, bobChanPoint, carol := + createThreeHopNetwork(t, net, true) + + // Clean up carol's node when the test finishes. + defer shutdownAndAssert(net, t, carol) + + // With our channels set up, we'll then send a single HTLC from Alice + // to Carol. As Carol is in hodl mode, she won't settle this HTLC which + // opens up the base for out tests. + const ( + finalCltvDelta = 40 + htlcAmt = btcutil.Amount(30000) + ) + ctx, cancel := context.WithCancel(ctxb) + defer cancel() + + alicePayStream, err := net.Alice.SendPayment(ctx) + if err != nil { + t.Fatalf("unable to create payment stream for alice: %v", err) + } + + // We'll now send a single HTLC across our multi-hop network. + carolPubKey := carol.PubKey[:] + payHash := makeFakePayHash(t) + err = alicePayStream.Send(&lnrpc.SendRequest{ + Dest: carolPubKey, + Amt: int64(htlcAmt), + PaymentHash: payHash, + FinalCltvDelta: finalCltvDelta, + }) + if err != nil { + t.Fatalf("unable to send alice htlc: %v", err) + } + + // Once the HTLC has cleared, all channels in our mini network should + // have the it locked in. + var predErr error + nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + err = wait.Predicate(func() bool { + predErr = assertActiveHtlcs(nodes, payHash) + if predErr != nil { + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("htlc mismatch: %v", err) + } + + // Now that all parties have the HTLC locked in, we'll immediately + // force close the Bob -> Carol channel. This should trigger contract + // resolution mode for both of them. + ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert(ctxt, t, net, net.Bob, bobChanPoint, true) + + // At this point, Bob should have a pending force close channel as he + // just went to chain. + pendingChansRequest := &lnrpc.PendingChannelsRequest{} + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := net.Bob.PendingChannels(ctxt, + pendingChansRequest) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + predErr = fmt.Errorf("bob should have pending for " + + "close chan but doesn't") + return false + } + + forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] + if forceCloseChan.LimboBalance == 0 { + predErr = fmt.Errorf("bob should have nonzero limbo "+ + "balance instead has: %v", + forceCloseChan.LimboBalance) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + // We'll mine defaultCSV blocks in order to generate the sweep transaction + // of Bob's funding output. + if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's funding output sweep tx: %v", err) + } + + // We'll now mine enough blocks for the HTLC to expire. After this, Bob + // should hand off the now expired HTLC output to the utxo nursery. + numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV - 1)) + if _, err := net.Miner.Node.Generate(numBlocks); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + // Bob's pending channel report should show that he has a single HTLC + // that's now in stage one. + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := net.Bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + predErr = fmt.Errorf("bob should have pending force " + + "close chan but doesn't") + return false + } + + forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] + if len(forceCloseChan.PendingHtlcs) != 1 { + predErr = fmt.Errorf("bob should have pending htlc " + + "but doesn't") + return false + } + if forceCloseChan.PendingHtlcs[0].Stage != 1 { + predErr = fmt.Errorf("bob's htlc should have "+ + "advanced to the first stage: %v", err) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) + } + + // We should also now find a transaction in the mempool, as Bob should + // have broadcast his second layer timeout transaction. + timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's htlc timeout tx: %v", err) + } + + // Next, we'll mine an additional block. This should serve to confirm + // the second layer timeout transaction. + block := mineBlocks(t, net, 1, 1)[0] + assertTxInBlock(t, block, timeoutTx) + + // With the second layer timeout transaction confirmed, Bob should have + // canceled backwards the HTLC that carol sent. + nodes = []*lntest.HarnessNode{net.Alice} + err = wait.Predicate(func() bool { + predErr = assertNumActiveHtlcs(nodes, 0) + if predErr != nil { + return false + } + return true + }, time.Second*15) + if err != nil { + t.Fatalf("alice's channel still has active htlc's: %v", predErr) + } + + // Additionally, Bob should now show that HTLC as being advanced to the + // second stage. + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := net.Bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + predErr = fmt.Errorf("bob should have pending for " + + "close chan but doesn't") + return false + } + + forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] + if len(forceCloseChan.PendingHtlcs) != 1 { + predErr = fmt.Errorf("bob should have pending htlc " + + "but doesn't") + return false + } + if forceCloseChan.PendingHtlcs[0].Stage != 2 { + predErr = fmt.Errorf("bob's htlc should have "+ + "advanced to the second stage: %v", err) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) + } + + // We'll now mine 4 additional blocks. This should be enough for Bob's + // CSV timelock to expire and the sweeping transaction of the HTLC to be + // broadcast. + if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { + t.Fatalf("unable to mine blocks: %v", err) + } + + sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's htlc sweep tx: %v", err) + } + + // We'll then mine a final block which should confirm this second layer + // sweep transaction. + block = mineBlocks(t, net, 1, 1)[0] + assertTxInBlock(t, block, sweepTx) + + // At this point, Bob should no longer show any channels as pending + // close. + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := net.Bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) != 0 { + predErr = fmt.Errorf("bob still has pending channels "+ + "but shouldn't: %v", spew.Sdump(pendingChanResp)) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) +} diff --git a/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go new file mode 100644 index 00000000..eea8e1f6 --- /dev/null +++ b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go @@ -0,0 +1,236 @@ +// +build rpctest + +package itest + +import ( + "context" + "fmt" + "time" + + "github.com/btcsuite/btcutil" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" +) + +// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a +// multi-hop HTLC, and the final destination of the HTLC force closes the +// channel, then we properly timeout the HTLC on *their* commitment transaction +// once the timeout has expired. Once we sweep the transaction, we should also +// cancel back the initial HTLC. +func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, + t *harnessTest) { + ctxb := context.Background() + + // First, we'll create a three hop network: Alice -> Bob -> Carol, with + // Carol refusing to actually settle or directly cancel any HTLC's + // self. + aliceChanPoint, bobChanPoint, carol := + createThreeHopNetwork(t, net, true) + + // Clean up carol's node when the test finishes. + defer shutdownAndAssert(net, t, carol) + + // With our channels set up, we'll then send a single HTLC from Alice + // to Carol. As Carol is in hodl mode, she won't settle this HTLC which + // opens up the base for out tests. + const ( + finalCltvDelta = 40 + htlcAmt = btcutil.Amount(30000) + ) + + ctx, cancel := context.WithCancel(ctxb) + defer cancel() + + alicePayStream, err := net.Alice.SendPayment(ctx) + if err != nil { + t.Fatalf("unable to create payment stream for alice: %v", err) + } + + // We'll now send a single HTLC across our multi-hop network. + carolPubKey := carol.PubKey[:] + payHash := makeFakePayHash(t) + err = alicePayStream.Send(&lnrpc.SendRequest{ + Dest: carolPubKey, + Amt: int64(htlcAmt), + PaymentHash: payHash, + FinalCltvDelta: finalCltvDelta, + }) + if err != nil { + t.Fatalf("unable to send alice htlc: %v", err) + } + + // Once the HTLC has cleared, all the nodes in our mini network should + // show that the HTLC has been locked in. + var predErr error + nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + err = wait.Predicate(func() bool { + predErr = assertActiveHtlcs(nodes, payHash) + if predErr != nil { + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("htlc mismatch: %v", predErr) + } + + // At this point, we'll now instruct Carol to force close the + // transaction. This will let us exercise that Bob is able to sweep the + // expired HTLC on Carol's version of the commitment transaction. + ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert(ctxt, t, net, carol, bobChanPoint, true) + + // At this point, Bob should have a pending force close channel as + // Carol has gone directly to chain. + pendingChansRequest := &lnrpc.PendingChannelsRequest{} + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := net.Bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for "+ + "pending channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + predErr = fmt.Errorf("bob should have pending " + + "force close channels but doesn't") + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + // Bob can sweep his output immediately. + _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's funding output sweep tx: %v", + err) + } + + // Next, we'll mine enough blocks for the HTLC to expire. At this + // point, Bob should hand off the output to his internal utxo nursery, + // which will broadcast a sweep transaction. + numBlocks := padCLTV(finalCltvDelta - 1) + if _, err := net.Miner.Node.Generate(numBlocks); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + // If we check Bob's pending channel report, it should show that he has + // a single HTLC that's now in the second stage, as skip the initial + // first stage since this is a direct HTLC. + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := net.Bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + predErr = fmt.Errorf("bob should have pending for " + + "close chan but doesn't") + return false + } + + forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] + if len(forceCloseChan.PendingHtlcs) != 1 { + predErr = fmt.Errorf("bob should have pending htlc " + + "but doesn't") + return false + } + if forceCloseChan.PendingHtlcs[0].Stage != 2 { + predErr = fmt.Errorf("bob's htlc should have "+ + "advanced to the second stage: %v", err) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) + } + + // Bob's sweeping transaction should now be found in the mempool at + // this point. + sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + // If Bob's transaction isn't yet in the mempool, then due to + // internal message passing and the low period between blocks + // being mined, it may have been detected as a late + // registration. As a result, we'll mine another block and + // repeat the check. If it doesn't go through this time, then + // we'll fail. + // TODO(halseth): can we use waitForChannelPendingForceClose to + // avoid this hack? + if _, err := net.Miner.Node.Generate(1); err != nil { + t.Fatalf("unable to generate block: %v", err) + } + sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's sweeping transaction: "+ + "%v", err) + } + } + + // If we mine an additional block, then this should confirm Bob's + // transaction which sweeps the direct HTLC output. + block := mineBlocks(t, net, 1, 1)[0] + assertTxInBlock(t, block, sweepTx) + + // Now that the sweeping transaction has been confirmed, Bob should + // cancel back that HTLC. As a result, Alice should not know of any + // active HTLC's. + nodes = []*lntest.HarnessNode{net.Alice} + err = wait.Predicate(func() bool { + predErr = assertNumActiveHtlcs(nodes, 0) + if predErr != nil { + return false + } + return true + }, time.Second*15) + if err != nil { + t.Fatalf("alice's channel still has active htlc's: %v", predErr) + } + + // Now we'll check Bob's pending channel report. Since this was Carol's + // commitment, he doesn't have to wait for any CSV delays. As a result, + // he should show no additional pending transactions. + err = wait.Predicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := net.Bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) != 0 { + predErr = fmt.Errorf("bob still has pending channels "+ + "but shouldn't: %v", spew.Sdump(pendingChanResp)) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + // We'll close out the test by closing the channel from Alice to Bob, + // and then shutting down the new node we created as its no longer + // needed. + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) +} diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index a25475fa..19a631f3 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -10593,796 +10593,6 @@ func assertSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client, } } -func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, - carolHodl bool) (*lnrpc.ChannelPoint, *lnrpc.ChannelPoint, - *lntest.HarnessNode) { - - ctxb := context.Background() - - // We'll start the test by creating a channel between Alice and Bob, - // which will act as the first leg for out multi-hop HTLC. - const chanAmt = 1000000 - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - aliceChanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err := net.Alice.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Bob.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - - // Next, we'll create a new node "carol" and have Bob connect to her. If - // the carolHodl flag is set, we'll make carol always hold onto the - // HTLC, this way it'll force Bob to go to chain to resolve the HTLC. - carolFlags := []string{} - if carolHodl { - carolFlags = append(carolFlags, "--hodl.exit-settle") - } - carol, err := net.NewNode("Carol", carolFlags) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Bob, carol); err != nil { - t.Fatalf("unable to connect bob to carol: %v", err) - } - - // We'll then create a channel from Bob to Carol. After this channel is - // open, our topology looks like: A -> B -> C. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - bobChanPoint := openChannelAndAssert( - ctxt, t, net, net.Bob, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Bob.WaitForNetworkChannelOpen(ctxt, bobChanPoint) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, bobChanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, bobChanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - - return aliceChanPoint, bobChanPoint, carol -} - -// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the -// outgoing HTLC is about to time out, then we'll go to chain in order to claim -// it. Any dust HTLC's should be immediately canceled backwards. Once the -// timeout has been reached, then we should sweep it on-chain, and cancel the -// HTLC backwards. -func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := - createThreeHopNetwork(t, net, true) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - time.Sleep(time.Second * 1) - - // Now that our channels are set up, we'll send two HTLC's from Alice - // to Carol. The first HTLC will be universally considered "dust", - // while the second will be a proper fully valued HTLC. - const ( - dustHtlcAmt = btcutil.Amount(100) - htlcAmt = btcutil.Amount(30000) - finalCltvDelta = 40 - ) - - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - alicePayStream, err := net.Alice.SendPayment(ctx) - if err != nil { - t.Fatalf("unable to create payment stream for alice: %v", err) - } - - // We'll create two random payment hashes unknown to carol, then send - // each of them by manually specifying the HTLC details. - carolPubKey := carol.PubKey[:] - dustPayHash := makeFakePayHash(t) - payHash := makeFakePayHash(t) - err = alicePayStream.Send(&lnrpc.SendRequest{ - Dest: carolPubKey, - Amt: int64(dustHtlcAmt), - PaymentHash: dustPayHash, - FinalCltvDelta: finalCltvDelta, - }) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } - err = alicePayStream.Send(&lnrpc.SendRequest{ - Dest: carolPubKey, - Amt: int64(htlcAmt), - PaymentHash: payHash, - FinalCltvDelta: finalCltvDelta, - }) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } - - // Verify that all nodes in the path now have two HTLC's with the - // proper parameters. - var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, dustPayHash, payHash) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // We'll now mine enough blocks to trigger Bob's broadcast of his - // commitment transaction due to the fact that the HTLC is about to - // timeout. With the default outgoing broadcast delta of zero, this will - // be the same height as the htlc expiry height. - numBlocks := padCLTV( - uint32(finalCltvDelta - lnd.DefaultOutgoingBroadcastDelta), - ) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // Bob's force close transaction should now be found in the mempool. - bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - closeTxid, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find closing txid: %v", err) - } - assertSpendingTxInMempool( - t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ - Hash: *bobFundingTxid, - Index: bobChanPoint.OutputIndex, - }, - ) - - // Mine a block to confirm the closing transaction. - mineBlocks(t, net, 1, 1) - - // At this point, Bob should have canceled backwards the dust HTLC - // that we sent earlier. This means Alice should now only have a single - // HTLC on her channel. - nodes = []*lntest.HarnessNode{net.Alice} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, payHash) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // With the closing transaction confirmed, we should expect Bob's HTLC - // timeout transaction to be broadcast due to the expiry being reached. - htlcTimeout, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's htlc timeout tx: %v", err) - } - - // We'll mine the remaining blocks in order to generate the sweep - // transaction of Bob's commitment output. - mineBlocks(t, net, defaultCSV, 1) - assertSpendingTxInMempool( - t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ - Hash: *closeTxid, - Index: 1, - }, - ) - - // Bob's pending channel report should show that he has a commitment - // output awaiting sweeping, and also that there's an outgoing HTLC - // output pending. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - t.Fatalf("unable to query for pending channels: %v", err) - } - - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - t.Fatalf("bob should have pending for close chan but doesn't") - } - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if forceCloseChan.LimboBalance == 0 { - t.Fatalf("bob should have nonzero limbo balance instead "+ - "has: %v", forceCloseChan.LimboBalance) - } - if len(forceCloseChan.PendingHtlcs) == 0 { - t.Fatalf("bob should have pending htlc but doesn't") - } - - // Now we'll mine an additional block, which should confirm Bob's commit - // sweep. This block should also prompt Bob to broadcast their second - // layer sweep due to the CSV on the HTLC timeout output. - mineBlocks(t, net, 1, 1) - assertSpendingTxInMempool( - t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ - Hash: *htlcTimeout, - Index: 0, - }, - ) - - // The block should have confirmed Bob's HTLC timeout transaction. - // Therefore, at this point, there should be no active HTLC's on the - // commitment transaction from Alice -> Bob. - nodes = []*lntest.HarnessNode{net.Alice} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("alice's channel still has active htlc's: %v", predErr) - } - - // At this point, Bob should show that the pending HTLC has advanced to - // the second stage and is to be swept. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = net.Bob.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - t.Fatalf("unable to query for pending channels: %v", err) - } - forceCloseChan = pendingChanResp.PendingForceClosingChannels[0] - if forceCloseChan.PendingHtlcs[0].Stage != 2 { - t.Fatalf("bob's htlc should have advanced to the second stage: %v", err) - } - - // Next, we'll mine a final block that should confirm the second-layer - // sweeping transaction. - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // Once this transaction has been confirmed, Bob should detect that he - // no longer has any pending channels. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = net.Bob.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending "+ - "channels but shouldn't: %v", - spew.Sdump(pendingChanResp)) - return false - } - - return true - - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) -} - -// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC -// scenario, if the node that extended the HTLC to the final node closes their -// commitment on-chain early, then it eventually recognizes this HTLC as one -// that's timed out. At this point, the node should timeout the HTLC, then -// cancel it backwards as normal. -func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, - t *harnessTest) { - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := - createThreeHopNetwork(t, net, true) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - // With our channels set up, we'll then send a single HTLC from Alice - // to Carol. As Carol is in hodl mode, she won't settle this HTLC which - // opens up the base for out tests. - const ( - finalCltvDelta = 40 - htlcAmt = btcutil.Amount(30000) - ) - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - alicePayStream, err := net.Alice.SendPayment(ctx) - if err != nil { - t.Fatalf("unable to create payment stream for alice: %v", err) - } - - // We'll now send a single HTLC across our multi-hop network. - carolPubKey := carol.PubKey[:] - payHash := makeFakePayHash(t) - err = alicePayStream.Send(&lnrpc.SendRequest{ - Dest: carolPubKey, - Amt: int64(htlcAmt), - PaymentHash: payHash, - FinalCltvDelta: finalCltvDelta, - }) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } - - // Once the HTLC has cleared, all channels in our mini network should - // have the it locked in. - var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, payHash) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", err) - } - - // Now that all parties have the HTLC locked in, we'll immediately - // force close the Bob -> Carol channel. This should trigger contract - // resolution mode for both of them. - ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Bob, bobChanPoint, true) - - // At this point, Bob should have a pending force close channel as he - // just went to chain. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels(ctxt, - pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending for " + - "close chan but doesn't") - return false - } - - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if forceCloseChan.LimboBalance == 0 { - predErr = fmt.Errorf("bob should have nonzero limbo "+ - "balance instead has: %v", - forceCloseChan.LimboBalance) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } - - // We'll mine defaultCSV blocks in order to generate the sweep transaction - // of Bob's funding output. - if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's funding output sweep tx: %v", err) - } - - // We'll now mine enough blocks for the HTLC to expire. After this, Bob - // should hand off the now expired HTLC output to the utxo nursery. - numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV - 1)) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // Bob's pending channel report should show that he has a single HTLC - // that's now in stage one. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending force " + - "close chan but doesn't") - return false - } - - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if len(forceCloseChan.PendingHtlcs) != 1 { - predErr = fmt.Errorf("bob should have pending htlc " + - "but doesn't") - return false - } - if forceCloseChan.PendingHtlcs[0].Stage != 1 { - predErr = fmt.Errorf("bob's htlc should have "+ - "advanced to the first stage: %v", err) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) - } - - // We should also now find a transaction in the mempool, as Bob should - // have broadcast his second layer timeout transaction. - timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's htlc timeout tx: %v", err) - } - - // Next, we'll mine an additional block. This should serve to confirm - // the second layer timeout transaction. - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, timeoutTx) - - // With the second layer timeout transaction confirmed, Bob should have - // canceled backwards the HTLC that carol sent. - nodes = []*lntest.HarnessNode{net.Alice} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("alice's channel still has active htlc's: %v", predErr) - } - - // Additionally, Bob should now show that HTLC as being advanced to the - // second stage. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending for " + - "close chan but doesn't") - return false - } - - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if len(forceCloseChan.PendingHtlcs) != 1 { - predErr = fmt.Errorf("bob should have pending htlc " + - "but doesn't") - return false - } - if forceCloseChan.PendingHtlcs[0].Stage != 2 { - predErr = fmt.Errorf("bob's htlc should have "+ - "advanced to the second stage: %v", err) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) - } - - // We'll now mine 4 additional blocks. This should be enough for Bob's - // CSV timelock to expire and the sweeping transaction of the HTLC to be - // broadcast. - if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { - t.Fatalf("unable to mine blocks: %v", err) - } - - sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's htlc sweep tx: %v", err) - } - - // We'll then mine a final block which should confirm this second layer - // sweep transaction. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, sweepTx) - - // At this point, Bob should no longer show any channels as pending - // close. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) -} - -// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a -// multi-hop HTLC, and the final destination of the HTLC force closes the -// channel, then we properly timeout the HTLC on *their* commitment transaction -// once the timeout has expired. Once we sweep the transaction, we should also -// cancel back the initial HTLC. -func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, - t *harnessTest) { - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := - createThreeHopNetwork(t, net, true) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - // With our channels set up, we'll then send a single HTLC from Alice - // to Carol. As Carol is in hodl mode, she won't settle this HTLC which - // opens up the base for out tests. - const ( - finalCltvDelta = 40 - htlcAmt = btcutil.Amount(30000) - ) - - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - alicePayStream, err := net.Alice.SendPayment(ctx) - if err != nil { - t.Fatalf("unable to create payment stream for alice: %v", err) - } - - // We'll now send a single HTLC across our multi-hop network. - carolPubKey := carol.PubKey[:] - payHash := makeFakePayHash(t) - err = alicePayStream.Send(&lnrpc.SendRequest{ - Dest: carolPubKey, - Amt: int64(htlcAmt), - PaymentHash: payHash, - FinalCltvDelta: finalCltvDelta, - }) - if err != nil { - t.Fatalf("unable to send alice htlc: %v", err) - } - - // Once the HTLC has cleared, all the nodes in our mini network should - // show that the HTLC has been locked in. - var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = wait.Predicate(func() bool { - predErr = assertActiveHtlcs(nodes, payHash) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // At this point, we'll now instruct Carol to force close the - // transaction. This will let us exercise that Bob is able to sweep the - // expired HTLC on Carol's version of the commitment transaction. - ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, carol, bobChanPoint, true) - - // At this point, Bob should have a pending force close channel as - // Carol has gone directly to chain. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for "+ - "pending channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending " + - "force close channels but doesn't") - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } - - // Bob can sweep his output immediately. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's funding output sweep tx: %v", - err) - } - - // Next, we'll mine enough blocks for the HTLC to expire. At this - // point, Bob should hand off the output to his internal utxo nursery, - // which will broadcast a sweep transaction. - numBlocks := padCLTV(finalCltvDelta - 1) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // If we check Bob's pending channel report, it should show that he has - // a single HTLC that's now in the second stage, as skip the initial - // first stage since this is a direct HTLC. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending for " + - "close chan but doesn't") - return false - } - - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if len(forceCloseChan.PendingHtlcs) != 1 { - predErr = fmt.Errorf("bob should have pending htlc " + - "but doesn't") - return false - } - if forceCloseChan.PendingHtlcs[0].Stage != 2 { - predErr = fmt.Errorf("bob's htlc should have "+ - "advanced to the second stage: %v", err) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) - } - - // Bob's sweeping transaction should now be found in the mempool at - // this point. - sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - // If Bob's transaction isn't yet in the mempool, then due to - // internal message passing and the low period between blocks - // being mined, it may have been detected as a late - // registration. As a result, we'll mine another block and - // repeat the check. If it doesn't go through this time, then - // we'll fail. - // TODO(halseth): can we use waitForChannelPendingForceClose to - // avoid this hack? - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } - sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's sweeping transaction: "+ - "%v", err) - } - } - - // If we mine an additional block, then this should confirm Bob's - // transaction which sweeps the direct HTLC output. - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, sweepTx) - - // Now that the sweeping transaction has been confirmed, Bob should - // cancel back that HTLC. As a result, Alice should not know of any - // active HTLC's. - nodes = []*lntest.HarnessNode{net.Alice} - err = wait.Predicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("alice's channel still has active htlc's: %v", predErr) - } - - // Now we'll check Bob's pending channel report. Since this was Carol's - // commitment, he doesn't have to wait for any CSV delays. As a result, - // he should show no additional pending transactions. - err = wait.Predicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } - - // We'll close out the test by closing the channel from Alice to Bob, - // and then shutting down the new node we created as its no longer - // needed. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) -} - // testSwitchCircuitPersistence creates a multihop network to ensure the sender // and intermediaries are persisting their open payment circuits. After // forwarding a packet via an outgoing link, all are restarted, and expected to From 5a0f2d004a244e4baaa8634b0361b6557c81dfa1 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 4 Mar 2020 13:21:28 +0100 Subject: [PATCH 5/9] itest: update multi hop test case docs To make clear whcih sweep scenarios are actually being tested --- lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go | 6 +++--- lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go | 6 +++--- .../itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go | 7 ++++--- lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go | 3 ++- ...lti-hop_local_force_close_on_chain_htlc_timeout_test.go | 4 ++-- ...ti-hop_remote_force_close_on_chain_htlc_timeout_test.go | 6 +++--- lntest/itest/lnd_test.go | 2 +- 7 files changed, 18 insertions(+), 16 deletions(-) diff --git a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go index dd84105c..1ae63b14 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -18,9 +18,9 @@ import ( ) // testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if -// we're forced to go to chain with an incoming HTLC, then when we find out the -// preimage via the witness beacon, we properly settle the HTLC on-chain in -// order to ensure we don't lose any funds. +// we force close a channel with an incoming HTLC, and later find out the +// preimage via the witness beacon, we properly settle the HTLC on-chain using +// the HTLC success transaction in order to ensure we don't lose any funds. func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() diff --git a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go index cb32a0f9..71c66716 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go @@ -18,9 +18,9 @@ import ( // testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the // outgoing HTLC is about to time out, then we'll go to chain in order to claim -// it. Any dust HTLC's should be immediately canceled backwards. Once the -// timeout has been reached, then we should sweep it on-chain, and cancel the -// HTLC backwards. +// it using the HTLC timeout transaction. Any dust HTLC's should be immediately +// canceled backwards. Once the timeout has been reached, then we should sweep +// it on-chain, and cancel the HTLC backwards. func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() diff --git a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go index 8a017633..0df39250 100644 --- a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go @@ -20,9 +20,10 @@ import ( // testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the // receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC -// off-chain, then it goes on chain to claim the HTLC. In this scenario, the -// node that sent the outgoing HTLC should extract the preimage from the sweep -// transaction, and finish settling the HTLC backwards into the route. +// off-chain, then it goes on chain to claim the HTLC uing the HTLC success +// transaction. In this scenario, the node that sent the outgoing HTLC should +// extract the preimage from the sweep transaction, and finish settling the +// HTLC backwards into the route. func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() diff --git a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go index b96bc17f..8c3a1f28 100644 --- a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go @@ -20,7 +20,8 @@ import ( // testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario, // if the remote party goes to chain while we have an incoming HTLC, then when // we found out the preimage via the witness beacon, we properly settle the -// HTLC on-chain in order to ensure that we don't lose any funds. +// HTLC directly on-chain using the preimage in order to ensure that we don't +// lose any funds. func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() diff --git a/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go index ea5bf3d7..960ff17d 100644 --- a/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go @@ -17,8 +17,8 @@ import ( // testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC // scenario, if the node that extended the HTLC to the final node closes their // commitment on-chain early, then it eventually recognizes this HTLC as one -// that's timed out. At this point, the node should timeout the HTLC, then -// cancel it backwards as normal. +// that's timed out. At this point, the node should timeout the HTLC using the +// HTLC timeout transaction, then cancel it backwards as normal. func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() diff --git a/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go index eea8e1f6..6c6d6eab 100644 --- a/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go @@ -16,9 +16,9 @@ import ( // testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a // multi-hop HTLC, and the final destination of the HTLC force closes the -// channel, then we properly timeout the HTLC on *their* commitment transaction -// once the timeout has expired. Once we sweep the transaction, we should also -// cancel back the initial HTLC. +// channel, then we properly timeout the HTLC directly on *their* commitment +// transaction once the timeout has expired. Once we sweep the transaction, we +// should also cancel back the initial HTLC. func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index 19a631f3..fb93746b 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -2976,7 +2976,7 @@ func padCLTV(cltv uint32) uint32 { // total of 3 + n transactions will be broadcast, representing the commitment // transaction, a transaction sweeping the local CSV delayed output, a // transaction sweeping the CSV delayed 2nd-layer htlcs outputs, and n -// htlc success transactions, where n is the number of payments Alice attempted +// htlc timeout transactions, where n is the number of payments Alice attempted // to send to Carol. This test includes several restarts to ensure that the // transaction output states are persisted throughout the forced closure // process. From 1ce4abc0a9879e47887dcb4524c9a32d55f0320a Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 4 Mar 2020 13:21:28 +0100 Subject: [PATCH 6/9] itest: extract multi-hop tests into sub tests --- ...d_multi-hop_htlc_local_chain_claim_test.go | 63 +++++++++++++++++++ lntest/itest/lnd_test.go | 38 +---------- 2 files changed, 65 insertions(+), 36 deletions(-) diff --git a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go index 1ae63b14..868e8974 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -5,6 +5,7 @@ package itest import ( "context" "fmt" + "testing" "time" "github.com/btcsuite/btcd/wire" @@ -17,6 +18,68 @@ import ( "github.com/lightningnetwork/lnd/lntypes" ) +func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { + + type testCase struct { + name string + test func(net *lntest.NetworkHarness, t *harnessTest) + } + + subTests := []testCase{ + { + // bob: outgoing our commit timeout + // carol: incoming their commit watch and see timeout + name: "local force close immediate expiry", + test: testMultiHopHtlcLocalTimeout, + }, + { + // bob: outgoing watch and see, they sweep on chain + // carol: incoming our commit, know preimage + name: "receiver chain claim", + test: testMultiHopReceiverChainClaim, + }, + { + // bob: outgoing our commit watch and see timeout + // carol: incoming their commit watch and see timeout + name: "local force close on-chain htlc timeout", + test: testMultiHopLocalForceCloseOnChainHtlcTimeout, + }, + { + // bob: outgoing their commit watch and see timeout + // carol: incoming our commit watch and see timeout + name: "remote force close on-chain htlc timeout", + test: testMultiHopRemoteForceCloseOnChainHtlcTimeout, + }, + { + // bob: outgoing our commit watch and see, they sweep on chain + // bob: incoming our commit watch and learn preimage + // carol: incoming their commit know preimage + name: "local chain claim", + test: testMultiHopHtlcLocalChainClaim, + }, + { + // bob: outgoing their commit watch and see, they sweep on chain + // bob: incoming their commit watch and learn preimage + // carol: incoming our commit know preimage + name: "remote chain claim", + test: testMultiHopHtlcRemoteChainClaim, + }, + } + + for _, subTest := range subTests { + subTest := subTest + + success := t.t.Run(subTest.name, func(t *testing.T) { + ht := newHarnessTest(t, net) + + subTest.test(net, ht) + }) + if !success { + return + } + } +} + // testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if // we force close a channel with an incoming HTLC, and later find out the // preimage via the witness beacon, we properly settle the HTLC on-chain using diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index fb93746b..5e7ea5f5 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -14551,42 +14551,8 @@ var testsCases = []*testCase{ test: testBidirectionalAsyncPayments, }, { - // bob: outgoing our commit timeout - // carol: incoming their commit watch and see timeout - name: "test multi-hop htlc local force close immediate expiry", - test: testMultiHopHtlcLocalTimeout, - }, - { - // bob: outgoing watch and see, they sweep on chain - // carol: incoming our commit, know preimage - name: "test multi-hop htlc receiver chain claim", - test: testMultiHopReceiverChainClaim, - }, - { - // bob: outgoing our commit watch and see timeout - // carol: incoming their commit watch and see timeout - name: "test multi-hop local force close on-chain htlc timeout", - test: testMultiHopLocalForceCloseOnChainHtlcTimeout, - }, - { - // bob: outgoing their commit watch and see timeout - // carol: incoming our commit watch and see timeout - name: "test multi-hop remote force close on-chain htlc timeout", - test: testMultiHopRemoteForceCloseOnChainHtlcTimeout, - }, - { - // bob: outgoing our commit watch and see, they sweep on chain - // bob: incoming our commit watch and learn preimage - // carol: incoming their commit know preimage - name: "test multi-hop htlc local chain claim", - test: testMultiHopHtlcLocalChainClaim, - }, - { - // bob: outgoing their commit watch and see, they sweep on chain - // bob: incoming their commit watch and learn preimage - // carol: incoming our commit know preimage - name: "test multi-hop htlc remote chain claim", - test: testMultiHopHtlcRemoteChainClaim, + name: "test multi-hop htlc", + test: testMultiHopHtlcClaims, }, { name: "switch circuit persistence", From 6ed0c83d115899326a51ce1494eb8ab0be69759f Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 4 Mar 2020 13:21:28 +0100 Subject: [PATCH 7/9] itest: spin up new nodes for multi-hop tests This will let us set their commitment type for the subtest. --- ...d_multi-hop_htlc_local_chain_claim_test.go | 86 ++++++++++++++----- .../lnd_multi-hop_htlc_local_timeout_test.go | 25 +++--- ...ulti-hop_htlc_receiver_chain_claim_test.go | 18 ++-- ..._multi-hop_htlc_remote_chain_claim_test.go | 20 +++-- ..._force_close_on_chain_htlc_timeout_test.go | 26 +++--- ..._force_close_on_chain_htlc_timeout_test.go | 22 ++--- 6 files changed, 124 insertions(+), 73 deletions(-) diff --git a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go index 868e8974..d5eade40 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -22,7 +22,8 @@ func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { type testCase struct { name string - test func(net *lntest.NetworkHarness, t *harnessTest) + test func(net *lntest.NetworkHarness, t *harnessTest, alice, + bob *lntest.HarnessNode) } subTests := []testCase{ @@ -66,13 +67,32 @@ func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { }, } + args := []string{} + alice, err := net.NewNode("Alice", args) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, t, alice) + + bob, err := net.NewNode("Bob", args) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, t, bob) + + ctxb := context.Background() + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, alice, bob); err != nil { + t.Fatalf("unable to connect alice to bob: %v", err) + } + for _, subTest := range subTests { subTest := subTest success := t.t.Run(subTest.name, func(t *testing.T) { ht := newHarnessTest(t, net) - subTest.test(net, ht) + subTest.test(net, ht, alice, bob) }) if !success { return @@ -84,14 +104,16 @@ func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { // we force close a channel with an incoming HTLC, and later find out the // preimage via the witness beacon, we properly settle the HTLC on-chain using // the HTLC success transaction in order to ensure we don't lose any funds. -func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) { +func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, + alice, bob *lntest.HarnessNode) { + ctxb := context.Background() // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, false, + t, net, alice, bob, false, ) // Clean up carol's node when the test finishes. @@ -122,7 +144,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) ctx, cancel := context.WithCancel(ctxb) defer cancel() - alicePayStream, err := net.Alice.SendPayment(ctx) + alicePayStream, err := alice.SendPayment(ctx) if err != nil { t.Fatalf("unable to create payment stream for alice: %v", err) } @@ -136,7 +158,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) // At this point, all 3 nodes should now have an active channel with // the created HTLC pending on all of them. var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash[:]) if predErr != nil { @@ -157,7 +179,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) // At this point, Bob decides that he wants to exit the channel // immediately, so he force closes his commitment transaction. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - bobForceClose := closeChannelAndAssert(ctxt, t, net, net.Bob, + bobForceClose := closeChannelAndAssert(ctxt, t, net, bob, aliceChanPoint, true) // Alice will sweep her output immediately. @@ -168,7 +190,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) } // Suspend Bob to force Carol to go to chain. - restartBob, err := net.SuspendNode(net.Bob) + restartBob, err := net.SuspendNode(bob) if err != nil { t.Fatalf("unable to suspend bob: %v", err) } @@ -260,7 +282,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) // At this point we suspend Alice to make sure she'll handle the // on-chain settle after a restart. - restartAlice, err := net.SuspendNode(net.Alice) + restartAlice, err := net.SuspendNode(alice) if err != nil { t.Fatalf("unable to suspend alice: %v", err) } @@ -302,7 +324,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) pendingChansRequest := &lnrpc.PendingChannelsRequest{} err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( + pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -400,7 +422,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( + pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -415,7 +437,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) } req := &lnrpc.ListChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanInfo, err := net.Bob.ListChannels(ctxt, req) + chanInfo, err := bob.ListChannels(ctxt, req) if err != nil { predErr = fmt.Errorf("unable to query for open "+ "channels: %v", err) @@ -474,7 +496,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) // succeeded. ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) err = checkPaymentStatus( - ctxt, net.Alice, preimage, lnrpc.Payment_SUCCEEDED, + ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, ) if err != nil { t.Fatalf(err.Error()) @@ -563,30 +585,48 @@ func checkPaymentStatus(ctxt context.Context, node *lntest.HarnessNode, } func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, - carolHodl bool) (*lnrpc.ChannelPoint, *lnrpc.ChannelPoint, - *lntest.HarnessNode) { + alice, bob *lntest.HarnessNode, carolHodl bool) (*lnrpc.ChannelPoint, + *lnrpc.ChannelPoint, *lntest.HarnessNode) { ctxb := context.Background() + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + err := net.EnsureConnected(ctxt, alice, bob) + if err != nil { + t.Fatalf("unable to connect peers: %v", err) + } + + ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, alice) + if err != nil { + t.Fatalf("unable to send coins to Alice: %v", err) + } + + ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, bob) + if err != nil { + t.Fatalf("unable to send coins to Bob: %v", err) + } + // We'll start the test by creating a channel between Alice and Bob, // which will act as the first leg for out multi-hop HTLC. const chanAmt = 1000000 - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) aliceChanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, net.Bob, + ctxt, t, net, alice, bob, lntest.OpenChannelParams{ Amt: chanAmt, }, ) ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err := net.Alice.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) + err = alice.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) if err != nil { t.Fatalf("alice didn't report channel: %v", err) } ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Bob.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) + err = bob.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) if err != nil { t.Fatalf("bob didn't report channel: %v", err) } @@ -603,7 +643,7 @@ func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, t.Fatalf("unable to create new node: %v", err) } ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, net.Bob, carol); err != nil { + if err := net.ConnectNodes(ctxt, bob, carol); err != nil { t.Fatalf("unable to connect bob to carol: %v", err) } @@ -611,13 +651,13 @@ func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, // open, our topology looks like: A -> B -> C. ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) bobChanPoint := openChannelAndAssert( - ctxt, t, net, net.Bob, carol, + ctxt, t, net, bob, carol, lntest.OpenChannelParams{ Amt: chanAmt, }, ) ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Bob.WaitForNetworkChannelOpen(ctxt, bobChanPoint) + err = bob.WaitForNetworkChannelOpen(ctxt, bobChanPoint) if err != nil { t.Fatalf("alice didn't report channel: %v", err) } @@ -627,7 +667,7 @@ func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, t.Fatalf("bob didn't report channel: %v", err) } ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.Alice.WaitForNetworkChannelOpen(ctxt, bobChanPoint) + err = alice.WaitForNetworkChannelOpen(ctxt, bobChanPoint) if err != nil { t.Fatalf("bob didn't report channel: %v", err) } diff --git a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go index 71c66716..8d6e8fa7 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go @@ -21,14 +21,17 @@ import ( // it using the HTLC timeout transaction. Any dust HTLC's should be immediately // canceled backwards. Once the timeout has been reached, then we should sweep // it on-chain, and cancel the HTLC backwards. -func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { +func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, + alice, bob *lntest.HarnessNode) { + ctxb := context.Background() // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. - aliceChanPoint, bobChanPoint, carol := - createThreeHopNetwork(t, net, true) + aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( + t, net, alice, bob, true, + ) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) @@ -47,7 +50,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { ctx, cancel := context.WithCancel(ctxb) defer cancel() - alicePayStream, err := net.Alice.SendPayment(ctx) + alicePayStream, err := alice.SendPayment(ctx) if err != nil { t.Fatalf("unable to create payment stream for alice: %v", err) } @@ -79,7 +82,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { // Verify that all nodes in the path now have two HTLC's with the // proper parameters. var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, dustPayHash, payHash) if predErr != nil { @@ -125,7 +128,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { // At this point, Bob should have canceled backwards the dust HTLC // that we sent earlier. This means Alice should now only have a single // HTLC on her channel. - nodes = []*lntest.HarnessNode{net.Alice} + nodes = []*lntest.HarnessNode{alice} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash) if predErr != nil { @@ -160,7 +163,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { // output pending. pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels(ctxt, pendingChansRequest) + pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest) if err != nil { t.Fatalf("unable to query for pending channels: %v", err) } @@ -191,7 +194,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { // The block should have confirmed Bob's HTLC timeout transaction. // Therefore, at this point, there should be no active HTLC's on the // commitment transaction from Alice -> Bob. - nodes = []*lntest.HarnessNode{net.Alice} + nodes = []*lntest.HarnessNode{alice} err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { @@ -206,7 +209,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { // At this point, Bob should show that the pending HTLC has advanced to // the second stage and is to be swept. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = net.Bob.PendingChannels(ctxt, pendingChansRequest) + pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest) if err != nil { t.Fatalf("unable to query for pending channels: %v", err) } @@ -225,7 +228,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { // no longer has any pending channels. err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = net.Bob.PendingChannels(ctxt, pendingChansRequest) + pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest) if err != nil { predErr = fmt.Errorf("unable to query for pending "+ "channels: %v", err) @@ -246,5 +249,5 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { } ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) + closeChannelAndAssert(ctxt, t, net, alice, aliceChanPoint, false) } diff --git a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go index 0df39250..845d7998 100644 --- a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go @@ -24,14 +24,16 @@ import ( // transaction. In this scenario, the node that sent the outgoing HTLC should // extract the preimage from the sweep transaction, and finish settling the // HTLC backwards into the route. -func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) { +func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, + alice, bob *lntest.HarnessNode) { + ctxb := context.Background() // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, false, + t, net, alice, bob, false, ) // Clean up carol's node when the test finishes. @@ -62,7 +64,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) ctx, cancel := context.WithCancel(ctxb) defer cancel() - alicePayStream, err := net.Alice.SendPayment(ctx) + alicePayStream, err := alice.SendPayment(ctx) if err != nil { t.Fatalf("unable to create payment stream for alice: %v", err) } @@ -76,7 +78,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // At this point, all 3 nodes should now have an active channel with // the created HTLC pending on all of them. var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash[:]) if predErr != nil { @@ -94,7 +96,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // hop logic. waitForInvoiceAccepted(t, carol, payHash) - restartBob, err := net.SuspendNode(net.Bob) + restartBob, err := net.SuspendNode(bob) if err != nil { t.Fatalf("unable to suspend bob: %v", err) } @@ -231,7 +233,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // Once the second-level transaction confirmed, Bob should have // extracted the preimage from the chain, and sent it back to Alice, // clearing the HTLC off-chain. - nodes = []*lntest.HarnessNode{net.Alice} + nodes = []*lntest.HarnessNode{alice} err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { @@ -303,7 +305,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // succeeded. ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) err = checkPaymentStatus( - ctxt, net.Alice, preimage, lnrpc.Payment_SUCCEEDED, + ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, ) if err != nil { t.Fatalf(err.Error()) @@ -312,5 +314,5 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // We'll close out the channel between Alice and Bob, then shutdown // carol to conclude the test. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) + closeChannelAndAssert(ctxt, t, net, alice, aliceChanPoint, false) } diff --git a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go index 8c3a1f28..1a5db70b 100644 --- a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go @@ -22,14 +22,16 @@ import ( // we found out the preimage via the witness beacon, we properly settle the // HTLC directly on-chain using the preimage in order to ensure that we don't // lose any funds. -func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest) { +func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest, + alice, bob *lntest.HarnessNode) { + ctxb := context.Background() // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, false, + t, net, alice, bob, false, ) // Clean up carol's node when the test finishes. @@ -59,7 +61,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest ctx, cancel := context.WithCancel(ctxb) defer cancel() - alicePayStream, err := net.Alice.SendPayment(ctx) + alicePayStream, err := alice.SendPayment(ctx) if err != nil { t.Fatalf("unable to create payment stream for alice: %v", err) } @@ -73,7 +75,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // At this point, all 3 nodes should now have an active channel with // the created HTLC pending on all of them. var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash[:]) if predErr != nil { @@ -95,12 +97,12 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // immediately force close the channel by broadcast her commitment // transaction. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - aliceForceClose := closeChannelAndAssert(ctxt, t, net, net.Alice, + aliceForceClose := closeChannelAndAssert(ctxt, t, net, alice, aliceChanPoint, true) // Wait for the channel to be marked pending force close. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForChannelPendingForceClose(ctxt, net.Alice, aliceChanPoint) + err = waitForChannelPendingForceClose(ctxt, alice, aliceChanPoint) if err != nil { t.Fatalf("channel not pending force close: %v", err) } @@ -119,7 +121,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest } // Suspend bob, so Carol is forced to go on chain. - restartBob, err := net.SuspendNode(net.Bob) + restartBob, err := net.SuspendNode(bob) if err != nil { t.Fatalf("unable to suspend bob: %v", err) } @@ -256,7 +258,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest pendingChansRequest := &lnrpc.PendingChannelsRequest{} err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( + pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -338,7 +340,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // succeeded. ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) err = checkPaymentStatus( - ctxt, net.Alice, preimage, lnrpc.Payment_SUCCEEDED, + ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, ) if err != nil { t.Fatalf(err.Error()) diff --git a/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go index 960ff17d..b6a8bcd9 100644 --- a/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go @@ -20,14 +20,16 @@ import ( // that's timed out. At this point, the node should timeout the HTLC using the // HTLC timeout transaction, then cancel it backwards as normal. func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, - t *harnessTest) { + t *harnessTest, alice, bob *lntest.HarnessNode) { + ctxb := context.Background() // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. - aliceChanPoint, bobChanPoint, carol := - createThreeHopNetwork(t, net, true) + aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( + t, net, alice, bob, true, + ) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) @@ -42,7 +44,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, ctx, cancel := context.WithCancel(ctxb) defer cancel() - alicePayStream, err := net.Alice.SendPayment(ctx) + alicePayStream, err := alice.SendPayment(ctx) if err != nil { t.Fatalf("unable to create payment stream for alice: %v", err) } @@ -63,7 +65,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // Once the HTLC has cleared, all channels in our mini network should // have the it locked in. var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash) if predErr != nil { @@ -80,14 +82,14 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // force close the Bob -> Carol channel. This should trigger contract // resolution mode for both of them. ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Bob, bobChanPoint, true) + closeChannelAndAssert(ctxt, t, net, bob, bobChanPoint, true) // At this point, Bob should have a pending force close channel as he // just went to chain. pendingChansRequest := &lnrpc.PendingChannelsRequest{} err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels(ctxt, + pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest) if err != nil { predErr = fmt.Errorf("unable to query for pending "+ @@ -136,7 +138,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // that's now in stage one. err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( + pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -183,7 +185,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // With the second layer timeout transaction confirmed, Bob should have // canceled backwards the HTLC that carol sent. - nodes = []*lntest.HarnessNode{net.Alice} + nodes = []*lntest.HarnessNode{alice} err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { @@ -199,7 +201,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // second stage. err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( + pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -253,7 +255,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // close. err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( + pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -274,5 +276,5 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, } ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) + closeChannelAndAssert(ctxt, t, net, alice, aliceChanPoint, false) } diff --git a/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go index 6c6d6eab..7d9cfa56 100644 --- a/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go @@ -20,14 +20,16 @@ import ( // transaction once the timeout has expired. Once we sweep the transaction, we // should also cancel back the initial HTLC. func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, - t *harnessTest) { + t *harnessTest, alice, bob *lntest.HarnessNode) { + ctxb := context.Background() // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. - aliceChanPoint, bobChanPoint, carol := - createThreeHopNetwork(t, net, true) + aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( + t, net, alice, bob, true, + ) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) @@ -43,7 +45,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, ctx, cancel := context.WithCancel(ctxb) defer cancel() - alicePayStream, err := net.Alice.SendPayment(ctx) + alicePayStream, err := alice.SendPayment(ctx) if err != nil { t.Fatalf("unable to create payment stream for alice: %v", err) } @@ -64,7 +66,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // Once the HTLC has cleared, all the nodes in our mini network should // show that the HTLC has been locked in. var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash) if predErr != nil { @@ -88,7 +90,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, pendingChansRequest := &lnrpc.PendingChannelsRequest{} err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( + pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -128,7 +130,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // first stage since this is a direct HTLC. err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( + pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -191,7 +193,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // Now that the sweeping transaction has been confirmed, Bob should // cancel back that HTLC. As a result, Alice should not know of any // active HTLC's. - nodes = []*lntest.HarnessNode{net.Alice} + nodes = []*lntest.HarnessNode{alice} err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { @@ -208,7 +210,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // he should show no additional pending transactions. err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( + pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { @@ -232,5 +234,5 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // and then shutting down the new node we created as its no longer // needed. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) + closeChannelAndAssert(ctxt, t, net, alice, aliceChanPoint, false) } From 1ade912361bb422c834984416ac024b0861cf39e Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 4 Mar 2020 13:21:28 +0100 Subject: [PATCH 8/9] itest: run multi-hop claim tests for all commit types --- ...d_multi-hop_htlc_local_chain_claim_test.go | 70 ++++++++++++------- .../lnd_multi-hop_htlc_local_timeout_test.go | 4 +- ...ulti-hop_htlc_receiver_chain_claim_test.go | 4 +- ..._multi-hop_htlc_remote_chain_claim_test.go | 4 +- ..._force_close_on_chain_htlc_timeout_test.go | 4 +- ..._force_close_on_chain_htlc_timeout_test.go | 4 +- 6 files changed, 53 insertions(+), 37 deletions(-) diff --git a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go index d5eade40..4ebd83e7 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/lnrpc" @@ -23,7 +24,7 @@ func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { type testCase struct { name string test func(net *lntest.NetworkHarness, t *harnessTest, alice, - bob *lntest.HarnessNode) + bob *lntest.HarnessNode, c commitType) } subTests := []testCase{ @@ -67,32 +68,47 @@ func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { }, } - args := []string{} - alice, err := net.NewNode("Alice", args) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, alice) - - bob, err := net.NewNode("Bob", args) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, bob) - - ctxb := context.Background() - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, alice, bob); err != nil { - t.Fatalf("unable to connect alice to bob: %v", err) + commitTypes := []commitType{ + commitTypeLegacy, } - for _, subTest := range subTests { - subTest := subTest + for _, commitType := range commitTypes { + testName := fmt.Sprintf("committype=%v", commitType.String()) - success := t.t.Run(subTest.name, func(t *testing.T) { + success := t.t.Run(testName, func(t *testing.T) { ht := newHarnessTest(t, net) - subTest.test(net, ht, alice, bob) + args := commitType.Args() + alice, err := net.NewNode("Alice", args) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, ht, alice) + + bob, err := net.NewNode("Bob", args) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, ht, bob) + + ctxb := context.Background() + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, alice, bob); err != nil { + t.Fatalf("unable to connect alice to bob: %v", err) + } + + for _, subTest := range subTests { + subTest := subTest + + success := ht.t.Run(subTest.name, func(t *testing.T) { + ht := newHarnessTest(t, net) + + subTest.test(net, ht, alice, bob, commitType) + }) + if !success { + return + } + } }) if !success { return @@ -105,7 +121,7 @@ func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { // preimage via the witness beacon, we properly settle the HTLC on-chain using // the HTLC success transaction in order to ensure we don't lose any funds. func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, - alice, bob *lntest.HarnessNode) { + alice, bob *lntest.HarnessNode, c commitType) { ctxb := context.Background() @@ -113,7 +129,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, alice, bob, false, + t, net, alice, bob, false, c, ) // Clean up carol's node when the test finishes. @@ -585,8 +601,8 @@ func checkPaymentStatus(ctxt context.Context, node *lntest.HarnessNode, } func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, - alice, bob *lntest.HarnessNode, carolHodl bool) (*lnrpc.ChannelPoint, - *lnrpc.ChannelPoint, *lntest.HarnessNode) { + alice, bob *lntest.HarnessNode, carolHodl bool, c commitType) ( + *lnrpc.ChannelPoint, *lnrpc.ChannelPoint, *lntest.HarnessNode) { ctxb := context.Background() @@ -634,7 +650,7 @@ func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, // Next, we'll create a new node "carol" and have Bob connect to her. If // the carolHodl flag is set, we'll make carol always hold onto the // HTLC, this way it'll force Bob to go to chain to resolve the HTLC. - carolFlags := []string{} + carolFlags := c.Args() if carolHodl { carolFlags = append(carolFlags, "--hodl.exit-settle") } diff --git a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go index 8d6e8fa7..98b66bda 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go @@ -22,7 +22,7 @@ import ( // canceled backwards. Once the timeout has been reached, then we should sweep // it on-chain, and cancel the HTLC backwards. func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, - alice, bob *lntest.HarnessNode) { + alice, bob *lntest.HarnessNode, c commitType) { ctxb := context.Background() @@ -30,7 +30,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, alice, bob, true, + t, net, alice, bob, true, c, ) // Clean up carol's node when the test finishes. diff --git a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go index 845d7998..e11b2ed7 100644 --- a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go @@ -25,7 +25,7 @@ import ( // extract the preimage from the sweep transaction, and finish settling the // HTLC backwards into the route. func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, - alice, bob *lntest.HarnessNode) { + alice, bob *lntest.HarnessNode, c commitType) { ctxb := context.Background() @@ -33,7 +33,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, alice, bob, false, + t, net, alice, bob, false, c, ) // Clean up carol's node when the test finishes. diff --git a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go index 1a5db70b..257b7b97 100644 --- a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go @@ -23,7 +23,7 @@ import ( // HTLC directly on-chain using the preimage in order to ensure that we don't // lose any funds. func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest, - alice, bob *lntest.HarnessNode) { + alice, bob *lntest.HarnessNode, c commitType) { ctxb := context.Background() @@ -31,7 +31,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, alice, bob, false, + t, net, alice, bob, false, c, ) // Clean up carol's node when the test finishes. diff --git a/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go index b6a8bcd9..b60cfda4 100644 --- a/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go @@ -20,7 +20,7 @@ import ( // that's timed out. At this point, the node should timeout the HTLC using the // HTLC timeout transaction, then cancel it backwards as normal. func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, - t *harnessTest, alice, bob *lntest.HarnessNode) { + t *harnessTest, alice, bob *lntest.HarnessNode, c commitType) { ctxb := context.Background() @@ -28,7 +28,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, alice, bob, true, + t, net, alice, bob, true, c, ) // Clean up carol's node when the test finishes. diff --git a/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go index 7d9cfa56..2f70fe3c 100644 --- a/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go @@ -20,7 +20,7 @@ import ( // transaction once the timeout has expired. Once we sweep the transaction, we // should also cancel back the initial HTLC. func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, - t *harnessTest, alice, bob *lntest.HarnessNode) { + t *harnessTest, alice, bob *lntest.HarnessNode, c commitType) { ctxb := context.Background() @@ -28,7 +28,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, alice, bob, true, + t, net, alice, bob, true, c, ) // Clean up carol's node when the test finishes. From b4ea34037aa873a74ddde56103e8469a883c7c36 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Mon, 23 Mar 2020 11:19:18 +0100 Subject: [PATCH 9/9] lntest: move multi-hop root test case and common utils to own file --- ...d_multi-hop_htlc_local_chain_claim_test.go | 271 ----------------- lntest/itest/lnd_multi-hop_test.go | 284 ++++++++++++++++++ 2 files changed, 284 insertions(+), 271 deletions(-) create mode 100644 lntest/itest/lnd_multi-hop_test.go diff --git a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go index 4ebd83e7..37634a28 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -5,11 +5,9 @@ package itest import ( "context" "fmt" - "testing" "time" "github.com/btcsuite/btcd/wire" - "github.com/btcsuite/btcutil" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/lnrpc" @@ -19,103 +17,6 @@ import ( "github.com/lightningnetwork/lnd/lntypes" ) -func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { - - type testCase struct { - name string - test func(net *lntest.NetworkHarness, t *harnessTest, alice, - bob *lntest.HarnessNode, c commitType) - } - - subTests := []testCase{ - { - // bob: outgoing our commit timeout - // carol: incoming their commit watch and see timeout - name: "local force close immediate expiry", - test: testMultiHopHtlcLocalTimeout, - }, - { - // bob: outgoing watch and see, they sweep on chain - // carol: incoming our commit, know preimage - name: "receiver chain claim", - test: testMultiHopReceiverChainClaim, - }, - { - // bob: outgoing our commit watch and see timeout - // carol: incoming their commit watch and see timeout - name: "local force close on-chain htlc timeout", - test: testMultiHopLocalForceCloseOnChainHtlcTimeout, - }, - { - // bob: outgoing their commit watch and see timeout - // carol: incoming our commit watch and see timeout - name: "remote force close on-chain htlc timeout", - test: testMultiHopRemoteForceCloseOnChainHtlcTimeout, - }, - { - // bob: outgoing our commit watch and see, they sweep on chain - // bob: incoming our commit watch and learn preimage - // carol: incoming their commit know preimage - name: "local chain claim", - test: testMultiHopHtlcLocalChainClaim, - }, - { - // bob: outgoing their commit watch and see, they sweep on chain - // bob: incoming their commit watch and learn preimage - // carol: incoming our commit know preimage - name: "remote chain claim", - test: testMultiHopHtlcRemoteChainClaim, - }, - } - - commitTypes := []commitType{ - commitTypeLegacy, - } - - for _, commitType := range commitTypes { - testName := fmt.Sprintf("committype=%v", commitType.String()) - - success := t.t.Run(testName, func(t *testing.T) { - ht := newHarnessTest(t, net) - - args := commitType.Args() - alice, err := net.NewNode("Alice", args) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, ht, alice) - - bob, err := net.NewNode("Bob", args) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, ht, bob) - - ctxb := context.Background() - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, alice, bob); err != nil { - t.Fatalf("unable to connect alice to bob: %v", err) - } - - for _, subTest := range subTests { - subTest := subTest - - success := ht.t.Run(subTest.name, func(t *testing.T) { - ht := newHarnessTest(t, net) - - subTest.test(net, ht, alice, bob, commitType) - }) - if !success { - return - } - } - }) - if !success { - return - } - } -} - // testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if // we force close a channel with an incoming HTLC, and later find out the // preimage via the witness beacon, we properly settle the HTLC on-chain using @@ -518,175 +419,3 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, t.Fatalf(err.Error()) } } - -// waitForInvoiceAccepted waits until the specified invoice moved to the -// accepted state by the node. -func waitForInvoiceAccepted(t *harnessTest, node *lntest.HarnessNode, - payHash lntypes.Hash) { - - ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - invoiceUpdates, err := node.SubscribeSingleInvoice(ctx, - &invoicesrpc.SubscribeSingleInvoiceRequest{ - RHash: payHash[:], - }, - ) - if err != nil { - t.Fatalf("subscribe single invoice: %v", err) - } - - for { - update, err := invoiceUpdates.Recv() - if err != nil { - t.Fatalf("invoice update err: %v", err) - } - if update.State == lnrpc.Invoice_ACCEPTED { - break - } - } -} - -// checkPaymentStatus asserts that the given node list a payment with the given -// preimage has the expected status. -func checkPaymentStatus(ctxt context.Context, node *lntest.HarnessNode, - preimage lntypes.Preimage, status lnrpc.Payment_PaymentStatus) error { - - req := &lnrpc.ListPaymentsRequest{ - IncludeIncomplete: true, - } - paymentsResp, err := node.ListPayments(ctxt, req) - if err != nil { - return fmt.Errorf("error when obtaining Alice payments: %v", - err) - } - - payHash := preimage.Hash() - var found bool - for _, p := range paymentsResp.Payments { - if p.PaymentHash != payHash.String() { - continue - } - - found = true - if p.Status != status { - return fmt.Errorf("expected payment status "+ - "%v, got %v", status, p.Status) - } - - switch status { - - // If this expected status is SUCCEEDED, we expect the final preimage. - case lnrpc.Payment_SUCCEEDED: - if p.PaymentPreimage != preimage.String() { - return fmt.Errorf("preimage doesn't match: %v vs %v", - p.PaymentPreimage, preimage.String()) - } - - // Otherwise we expect an all-zero preimage. - default: - if p.PaymentPreimage != (lntypes.Preimage{}).String() { - return fmt.Errorf("expected zero preimage, got %v", - p.PaymentPreimage) - } - } - - } - - if !found { - return fmt.Errorf("payment with payment hash %v not found "+ - "in response", payHash) - } - - return nil -} - -func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, - alice, bob *lntest.HarnessNode, carolHodl bool, c commitType) ( - *lnrpc.ChannelPoint, *lnrpc.ChannelPoint, *lntest.HarnessNode) { - - ctxb := context.Background() - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err := net.EnsureConnected(ctxt, alice, bob) - if err != nil { - t.Fatalf("unable to connect peers: %v", err) - } - - ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) - err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, alice) - if err != nil { - t.Fatalf("unable to send coins to Alice: %v", err) - } - - ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) - err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, bob) - if err != nil { - t.Fatalf("unable to send coins to Bob: %v", err) - } - - // We'll start the test by creating a channel between Alice and Bob, - // which will act as the first leg for out multi-hop HTLC. - const chanAmt = 1000000 - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - aliceChanPoint := openChannelAndAssert( - ctxt, t, net, alice, bob, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = alice.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = bob.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - - // Next, we'll create a new node "carol" and have Bob connect to her. If - // the carolHodl flag is set, we'll make carol always hold onto the - // HTLC, this way it'll force Bob to go to chain to resolve the HTLC. - carolFlags := c.Args() - if carolHodl { - carolFlags = append(carolFlags, "--hodl.exit-settle") - } - carol, err := net.NewNode("Carol", carolFlags) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, bob, carol); err != nil { - t.Fatalf("unable to connect bob to carol: %v", err) - } - - // We'll then create a channel from Bob to Carol. After this channel is - // open, our topology looks like: A -> B -> C. - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - bobChanPoint := openChannelAndAssert( - ctxt, t, net, bob, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = bob.WaitForNetworkChannelOpen(ctxt, bobChanPoint) - if err != nil { - t.Fatalf("alice didn't report channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = carol.WaitForNetworkChannelOpen(ctxt, bobChanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = alice.WaitForNetworkChannelOpen(ctxt, bobChanPoint) - if err != nil { - t.Fatalf("bob didn't report channel: %v", err) - } - - return aliceChanPoint, bobChanPoint, carol -} diff --git a/lntest/itest/lnd_multi-hop_test.go b/lntest/itest/lnd_multi-hop_test.go new file mode 100644 index 00000000..2d4a9e5f --- /dev/null +++ b/lntest/itest/lnd_multi-hop_test.go @@ -0,0 +1,284 @@ +// +build rpctest + +package itest + +import ( + "context" + "fmt" + "testing" + + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntypes" +) + +func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { + + type testCase struct { + name string + test func(net *lntest.NetworkHarness, t *harnessTest, alice, + bob *lntest.HarnessNode, c commitType) + } + + subTests := []testCase{ + { + // bob: outgoing our commit timeout + // carol: incoming their commit watch and see timeout + name: "local force close immediate expiry", + test: testMultiHopHtlcLocalTimeout, + }, + { + // bob: outgoing watch and see, they sweep on chain + // carol: incoming our commit, know preimage + name: "receiver chain claim", + test: testMultiHopReceiverChainClaim, + }, + { + // bob: outgoing our commit watch and see timeout + // carol: incoming their commit watch and see timeout + name: "local force close on-chain htlc timeout", + test: testMultiHopLocalForceCloseOnChainHtlcTimeout, + }, + { + // bob: outgoing their commit watch and see timeout + // carol: incoming our commit watch and see timeout + name: "remote force close on-chain htlc timeout", + test: testMultiHopRemoteForceCloseOnChainHtlcTimeout, + }, + { + // bob: outgoing our commit watch and see, they sweep on chain + // bob: incoming our commit watch and learn preimage + // carol: incoming their commit know preimage + name: "local chain claim", + test: testMultiHopHtlcLocalChainClaim, + }, + { + // bob: outgoing their commit watch and see, they sweep on chain + // bob: incoming their commit watch and learn preimage + // carol: incoming our commit know preimage + name: "remote chain claim", + test: testMultiHopHtlcRemoteChainClaim, + }, + } + + commitTypes := []commitType{ + commitTypeLegacy, + } + + for _, commitType := range commitTypes { + testName := fmt.Sprintf("committype=%v", commitType.String()) + + success := t.t.Run(testName, func(t *testing.T) { + ht := newHarnessTest(t, net) + + args := commitType.Args() + alice, err := net.NewNode("Alice", args) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, ht, alice) + + bob, err := net.NewNode("Bob", args) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, ht, bob) + + ctxb := context.Background() + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, alice, bob); err != nil { + t.Fatalf("unable to connect alice to bob: %v", err) + } + + for _, subTest := range subTests { + subTest := subTest + + success := ht.t.Run(subTest.name, func(t *testing.T) { + ht := newHarnessTest(t, net) + + subTest.test(net, ht, alice, bob, commitType) + }) + if !success { + return + } + } + }) + if !success { + return + } + } +} + +// waitForInvoiceAccepted waits until the specified invoice moved to the +// accepted state by the node. +func waitForInvoiceAccepted(t *harnessTest, node *lntest.HarnessNode, + payHash lntypes.Hash) { + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + invoiceUpdates, err := node.SubscribeSingleInvoice(ctx, + &invoicesrpc.SubscribeSingleInvoiceRequest{ + RHash: payHash[:], + }, + ) + if err != nil { + t.Fatalf("subscribe single invoice: %v", err) + } + + for { + update, err := invoiceUpdates.Recv() + if err != nil { + t.Fatalf("invoice update err: %v", err) + } + if update.State == lnrpc.Invoice_ACCEPTED { + break + } + } +} + +// checkPaymentStatus asserts that the given node list a payment with the given +// preimage has the expected status. +func checkPaymentStatus(ctxt context.Context, node *lntest.HarnessNode, + preimage lntypes.Preimage, status lnrpc.Payment_PaymentStatus) error { + + req := &lnrpc.ListPaymentsRequest{ + IncludeIncomplete: true, + } + paymentsResp, err := node.ListPayments(ctxt, req) + if err != nil { + return fmt.Errorf("error when obtaining Alice payments: %v", + err) + } + + payHash := preimage.Hash() + var found bool + for _, p := range paymentsResp.Payments { + if p.PaymentHash != payHash.String() { + continue + } + + found = true + if p.Status != status { + return fmt.Errorf("expected payment status "+ + "%v, got %v", status, p.Status) + } + + switch status { + + // If this expected status is SUCCEEDED, we expect the final preimage. + case lnrpc.Payment_SUCCEEDED: + if p.PaymentPreimage != preimage.String() { + return fmt.Errorf("preimage doesn't match: %v vs %v", + p.PaymentPreimage, preimage.String()) + } + + // Otherwise we expect an all-zero preimage. + default: + if p.PaymentPreimage != (lntypes.Preimage{}).String() { + return fmt.Errorf("expected zero preimage, got %v", + p.PaymentPreimage) + } + } + + } + + if !found { + return fmt.Errorf("payment with payment hash %v not found "+ + "in response", payHash) + } + + return nil +} + +func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, + alice, bob *lntest.HarnessNode, carolHodl bool, c commitType) ( + *lnrpc.ChannelPoint, *lnrpc.ChannelPoint, *lntest.HarnessNode) { + + ctxb := context.Background() + + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + err := net.EnsureConnected(ctxt, alice, bob) + if err != nil { + t.Fatalf("unable to connect peers: %v", err) + } + + ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, alice) + if err != nil { + t.Fatalf("unable to send coins to Alice: %v", err) + } + + ctxt, _ = context.WithTimeout(context.Background(), defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, bob) + if err != nil { + t.Fatalf("unable to send coins to Bob: %v", err) + } + + // We'll start the test by creating a channel between Alice and Bob, + // which will act as the first leg for out multi-hop HTLC. + const chanAmt = 1000000 + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) + aliceChanPoint := openChannelAndAssert( + ctxt, t, net, alice, bob, + lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = alice.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) + if err != nil { + t.Fatalf("alice didn't report channel: %v", err) + } + + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = bob.WaitForNetworkChannelOpen(ctxt, aliceChanPoint) + if err != nil { + t.Fatalf("bob didn't report channel: %v", err) + } + + // Next, we'll create a new node "carol" and have Bob connect to her. If + // the carolHodl flag is set, we'll make carol always hold onto the + // HTLC, this way it'll force Bob to go to chain to resolve the HTLC. + carolFlags := c.Args() + if carolHodl { + carolFlags = append(carolFlags, "--hodl.exit-settle") + } + carol, err := net.NewNode("Carol", carolFlags) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, bob, carol); err != nil { + t.Fatalf("unable to connect bob to carol: %v", err) + } + + // We'll then create a channel from Bob to Carol. After this channel is + // open, our topology looks like: A -> B -> C. + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) + bobChanPoint := openChannelAndAssert( + ctxt, t, net, bob, carol, + lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = bob.WaitForNetworkChannelOpen(ctxt, bobChanPoint) + if err != nil { + t.Fatalf("alice didn't report channel: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = carol.WaitForNetworkChannelOpen(ctxt, bobChanPoint) + if err != nil { + t.Fatalf("bob didn't report channel: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = alice.WaitForNetworkChannelOpen(ctxt, bobChanPoint) + if err != nil { + t.Fatalf("bob didn't report channel: %v", err) + } + + return aliceChanPoint, bobChanPoint, carol +}