// +build rpctest package itest import ( "context" "fmt" "time" "github.com/btcsuite/btcutil" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest/wait" ) // testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a // multi-hop HTLC, and the final destination of the HTLC force closes the // channel, then we properly timeout the HTLC directly on *their* commitment // transaction once the timeout has expired. Once we sweep the transaction, we // should also cancel back the initial HTLC. func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, t *harnessTest, alice, bob *lntest.HarnessNode, c commitType) { ctxb := context.Background() // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( t, net, alice, bob, true, c, ) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) // With our channels set up, we'll then send a single HTLC from Alice // to Carol. As Carol is in hodl mode, she won't settle this HTLC which // opens up the base for out tests. const ( finalCltvDelta = 40 htlcAmt = btcutil.Amount(30000) ) ctx, cancel := context.WithCancel(ctxb) defer cancel() // We'll now send a single HTLC across our multi-hop network. carolPubKey := carol.PubKey[:] payHash := makeFakePayHash(t) _, err := alice.RouterClient.SendPaymentV2( ctx, &routerrpc.SendPaymentRequest{ Dest: carolPubKey, Amt: int64(htlcAmt), PaymentHash: payHash, FinalCltvDelta: finalCltvDelta, TimeoutSeconds: 60, FeeLimitMsat: noFeeLimitMsat, }, ) if err != nil { t.Fatalf("unable to send alice htlc: %v", err) } // Once the HTLC has cleared, all the nodes in our mini network should // show that the HTLC has been locked in. var predErr error nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash) if predErr != nil { return false } return true }, time.Second*15) if err != nil { t.Fatalf("htlc mismatch: %v", predErr) } // At this point, we'll now instruct Carol to force close the // transaction. This will let us exercise that Bob is able to sweep the // expired HTLC on Carol's version of the commitment transaction. If // Carol has an anchor, it will be swept too. ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) closeChannelAndAssertType( ctxt, t, net, carol, bobChanPoint, c == commitTypeAnchors, true, ) // At this point, Bob should have a pending force close channel as // Carol has gone directly to chain. pendingChansRequest := &lnrpc.PendingChannelsRequest{} err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { predErr = fmt.Errorf("unable to query for "+ "pending channels: %v", err) return false } if len(pendingChanResp.PendingForceClosingChannels) == 0 { predErr = fmt.Errorf("bob should have pending " + "force close channels but doesn't") return false } return true }, time.Second*15) if err != nil { t.Fatalf(predErr.Error()) } // Bob can sweep his output immediately. If there is an anchor, Bob will // sweep that as well. expectedTxes := 1 if c == commitTypeAnchors { expectedTxes = 2 } _, err = waitForNTxsInMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) if err != nil { t.Fatalf("failed to find txes in miner mempool: %v", err) } // Next, we'll mine enough blocks for the HTLC to expire. At this // point, Bob should hand off the output to his internal utxo nursery, // which will broadcast a sweep transaction. numBlocks := padCLTV(finalCltvDelta - 1) if _, err := net.Miner.Node.Generate(numBlocks); err != nil { t.Fatalf("unable to generate blocks: %v", err) } // If we check Bob's pending channel report, it should show that he has // a single HTLC that's now in the second stage, as skip the initial // first stage since this is a direct HTLC. err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { predErr = fmt.Errorf("unable to query for pending "+ "channels: %v", err) return false } if len(pendingChanResp.PendingForceClosingChannels) == 0 { predErr = fmt.Errorf("bob should have pending for " + "close chan but doesn't") return false } forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] if len(forceCloseChan.PendingHtlcs) != 1 { predErr = fmt.Errorf("bob should have pending htlc " + "but doesn't") return false } if forceCloseChan.PendingHtlcs[0].Stage != 2 { predErr = fmt.Errorf("bob's htlc should have "+ "advanced to the second stage: %v", err) return false } return true }, time.Second*15) if err != nil { t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) } // Bob's sweeping transaction should now be found in the mempool at // this point. sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) if err != nil { // If Bob's transaction isn't yet in the mempool, then due to // internal message passing and the low period between blocks // being mined, it may have been detected as a late // registration. As a result, we'll mine another block and // repeat the check. If it doesn't go through this time, then // we'll fail. // TODO(halseth): can we use waitForChannelPendingForceClose to // avoid this hack? if _, err := net.Miner.Node.Generate(1); err != nil { t.Fatalf("unable to generate block: %v", err) } sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) if err != nil { t.Fatalf("unable to find bob's sweeping transaction: "+ "%v", err) } } // If we mine an additional block, then this should confirm Bob's // transaction which sweeps the direct HTLC output. block := mineBlocks(t, net, 1, 1)[0] assertTxInBlock(t, block, sweepTx) // Now that the sweeping transaction has been confirmed, Bob should // cancel back that HTLC. As a result, Alice should not know of any // active HTLC's. nodes = []*lntest.HarnessNode{alice} err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { return false } return true }, time.Second*15) if err != nil { t.Fatalf("alice's channel still has active htlc's: %v", predErr) } // Now we'll check Bob's pending channel report. Since this was Carol's // commitment, he doesn't have to wait for any CSV delays. As a result, // he should show no additional pending transactions. err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := bob.PendingChannels( ctxt, pendingChansRequest, ) if err != nil { predErr = fmt.Errorf("unable to query for pending "+ "channels: %v", err) return false } if len(pendingChanResp.PendingForceClosingChannels) != 0 { predErr = fmt.Errorf("bob still has pending channels "+ "but shouldn't: %v", spew.Sdump(pendingChanResp)) return false } return true }, time.Second*15) if err != nil { t.Fatalf(predErr.Error()) } // We'll close out the test by closing the channel from Alice to Bob, // and then shutting down the new node we created as its no longer // needed. Coop close, no anchors. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) closeChannelAndAssertType( ctxt, t, net, alice, aliceChanPoint, false, false, ) }