test: add comprehensive integration tests for on-chain HTLC handling

In this commit, we add 6 new integration tests to test the various
actions that may need to be performed when either side goes on-chain to
fully resolve HTLC’s. Many of the tests are mirrors of each other as
they test sweeping/resolving HTLC’s from both commitment transactions.
This commit is contained in:
Olaoluwa Osuntokun 2018-01-22 18:59:44 -08:00
parent a1e2560b12
commit f8adab1f1c
No known key found for this signature in database
GPG Key ID: 964EA263DD637C21
5 changed files with 1399 additions and 15 deletions

@ -42,6 +42,8 @@ const (
defaultNoEncryptWallet = false
defaultTrickleDelay = 30 * 1000
defaultBroadcastDelta = 10
// minTimeLockDelta is the minimum timelock we require for incoming
// HTLCs on our channels.
minTimeLockDelta = 4

@ -1345,7 +1345,7 @@ func (c *commitSweepResolver) Resolve() (ContractResolver, error) {
return nil, fmt.Errorf("quitting")
}
log.Infof("ChannelPoint(%v) is fully closed, at height: %v",
log.Infof("ChannelPoint(%v) commit tx is fully resolved, at height: %v",
c.chanPoint, confInfo.BlockHeight)
case <-c.Quit:

@ -941,9 +941,13 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
// add it to to our preimage cache. By doing this, we ensure
// any contested contracts watched by any on-chain arbitrators
// can now sweep this HTLC on-chain.
if err := l.cfg.PreimageCache.AddPreimage(pre[:]); err != nil {
log.Errorf("unable to add preimage=%x to cache", pre[:])
go func() {
err := l.cfg.PreimageCache.AddPreimage(pre[:])
if err != nil {
log.Errorf("unable to add preimage=%x to "+
"cache", pre[:])
}
}()
case *lnwire.UpdateFailMalformedHTLC:
// Convert the failure type encoded within the HTLC fail

@ -3516,7 +3516,7 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness
t.Fatalf("justice tx wasn't mined")
}
assertNumChannels(t, ctxb, net.Alice, 0)
assertNodeNumChannels(t, ctxb, net.Alice, 0)
}
// testRevokedCloseRetributionRemoteHodl tests that Alice properly responds to a
@ -3782,12 +3782,6 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
numInvoices/2, exNumInputs,
len(justiceTx.MsgTx().TxIn))
}
for _, txIn := range justiceTx.MsgTx().TxIn {
if !bytes.Equal(txIn.PreviousOutPoint.Hash[:], breachTXID[:]) {
t.Fatalf("justice tx not spending commitment utxo "+
"instead is: %v", txIn.PreviousOutPoint)
}
}
// Now mine a block, this transaction should include Alice's justice
// transaction which was just accepted into the mempool.
@ -3803,12 +3797,12 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
t.Fatalf("justice tx wasn't mined")
}
assertNumChannels(t, ctxb, net.Alice, 0)
assertNodeNumChannels(t, ctxb, net.Alice, 0)
}
// assertNumChannels polls the provided node's list channels rpc until it
// assertNodeNumChannels polls the provided node's list channels rpc until it
// reaches the desired number of total channels.
func assertNumChannels(t *harnessTest, ctxb context.Context,
func assertNodeNumChannels(t *harnessTest, ctxb context.Context,
node *lntest.HarnessNode, numChannels int) {
// Poll alice for her list of channels.
@ -4921,6 +4915,1351 @@ func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest)
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
}
func assertActiveHtlcs(nodes []*lntest.HarnessNode, payHashes ...[]byte) error {
req := &lnrpc.ListChannelsRequest{}
ctxb := context.Background()
for _, node := range nodes {
nodeChans, err := node.ListChannels(ctxb, req)
if err != nil {
return fmt.Errorf("unable to get node chans: %v", err)
}
for _, channel := range nodeChans.Channels {
if len(channel.PendingHtlcs) == 0 {
return fmt.Errorf("node %x has no htlcs: %v",
node.PubKey[:], spew.Sdump(channel))
}
for _, htlc := range channel.PendingHtlcs {
var htlcIsMatch bool
for _, payHash := range payHashes {
if bytes.Equal(htlc.HashLock, payHash) {
htlcIsMatch = true
}
}
if htlcIsMatch {
continue
}
return fmt.Errorf("node %x doesn't have expected "+
"payment hashes: %v", node.PubKey[:],
spew.Sdump(channel.PendingHtlcs))
}
}
}
return nil
}
func assertNumActiveHtlcs(nodes []*lntest.HarnessNode, numHtlcs int) bool {
req := &lnrpc.ListChannelsRequest{}
ctxb := context.Background()
for _, node := range nodes {
nodeChans, err := node.ListChannels(ctxb, req)
if err != nil {
return false
}
for _, channel := range nodeChans.Channels {
if len(channel.PendingHtlcs) != numHtlcs {
return false
}
}
}
return true
}
func assertSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client,
timeout time.Duration, chanPoint wire.OutPoint) {
breakTimeout := time.After(timeout)
ticker := time.NewTicker(50 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-breakTimeout:
t.Fatalf("didn't find tx in mempool")
case <-ticker.C:
mempool, err := miner.GetRawMempool()
if err != nil {
t.Fatalf("unable to get mempool: %v", err)
}
if len(mempool) == 0 {
continue
}
for _, txid := range mempool {
tx, err := miner.GetRawTransaction(txid)
if err != nil {
t.Fatalf("unable to fetch tx: %v", err)
}
for _, txIn := range tx.MsgTx().TxIn {
if txIn.PreviousOutPoint == chanPoint {
return
}
}
}
}
}
}
func createThreeHopHodlNetwork(t *harnessTest,
net *lntest.NetworkHarness) (*lnrpc.ChannelPoint, *lnrpc.ChannelPoint, *lntest.HarnessNode) {
// We'll start the test by creating a channel between Alice and Bob,
// which will act as the first leg for out multi-hop HTLC.
const chanAmt = 1000000
ctxb := context.Background()
timeout := time.Duration(time.Second * 15)
ctxt, _ := context.WithTimeout(ctxb, timeout)
aliceChanPoint := openChannelAndAssert(
ctxt, t, net, net.Alice, net.Bob, chanAmt, 0,
)
ctxt, _ = context.WithTimeout(ctxb, time.Second*15)
err := net.Alice.WaitForNetworkChannelOpen(ctxt, aliceChanPoint)
if err != nil {
t.Fatalf("alice didn't report channel: %v", err)
}
ctxt, _ = context.WithTimeout(ctxb, time.Second*15)
err = net.Bob.WaitForNetworkChannelOpen(ctxt, aliceChanPoint)
if err != nil {
t.Fatalf("bob didn't report channel: %v", err)
}
// Next, we'll create a new node "carol" and have Bob connect to her.
// In this test, we'll make carol always hold onto the HTLC, this way
// it'll force Bob to go to chain to resolve the HTLC.
carol, err := net.NewNode([]string{"--debughtlc", "--hodlhtlc"})
if err != nil {
t.Fatalf("unable to create new node: %v", err)
}
if err := net.ConnectNodes(ctxb, net.Bob, carol); err != nil {
t.Fatalf("unable to connect bob to carol: %v", err)
}
// We'll then create a channel from Bob to Carol. After this channel is
// open, our topology looks like: A -> B -> C.
ctxt, _ = context.WithTimeout(ctxb, timeout)
bobChanPoint := openChannelAndAssert(
ctxt, t, net, net.Bob, carol, chanAmt, 0,
)
ctxt, _ = context.WithTimeout(ctxb, time.Second*15)
err = net.Bob.WaitForNetworkChannelOpen(ctxt, bobChanPoint)
if err != nil {
t.Fatalf("alice didn't report channel: %v", err)
}
ctxt, _ = context.WithTimeout(ctxb, time.Second*15)
err = carol.WaitForNetworkChannelOpen(ctxt, bobChanPoint)
if err != nil {
t.Fatalf("bob didn't report channel: %v", err)
}
ctxt, _ = context.WithTimeout(ctxb, time.Second*15)
err = net.Alice.WaitForNetworkChannelOpen(ctxt, bobChanPoint)
if err != nil {
t.Fatalf("bob didn't report channel: %v", err)
}
return aliceChanPoint, bobChanPoint, carol
}
// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the
// outgoing HTLC is about to time out, then we'll go to chain in order to claim
// it. Any dust HTLC's should be immediately cancelled backwards. Once the
// timeout has been reached, then we should sweep it on-chain, and cancel the
// HTLC backwards.
func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) {
timeout := time.Duration(time.Second * 15)
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net)
time.Sleep(time.Second * 1)
// Now that our channels are set up, we'll send two HTLC's from Alice
// to Carol. The first HTLC will be universally considered "dust",
// while the second will be a proper fully valued HTLC.
const (
dustHtlcAmt = btcutil.Amount(100)
htlcAmt = btcutil.Amount(30000)
finalCltvDelta = 40
)
alicePayStream, err := net.Alice.SendPayment(ctxb)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
// We'll create two random payment hashes unknown to carol, then send
// each of them by manually specifying the HTLC details.
carolPubKey := carol.PubKey[:]
dustPayHash := bytes.Repeat([]byte{1}, 32)
payHash := bytes.Repeat([]byte{2}, 32)
err = alicePayStream.Send(&lnrpc.SendRequest{
Dest: carolPubKey,
Amt: int64(dustHtlcAmt),
PaymentHash: dustPayHash,
FinalCltvDelta: finalCltvDelta,
})
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
err = alicePayStream.Send(&lnrpc.SendRequest{
Dest: carolPubKey,
Amt: int64(htlcAmt),
PaymentHash: payHash,
FinalCltvDelta: finalCltvDelta,
})
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
// Verify that all nodes in the path now have two HTLC's with the
// proper parameters.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, dustPayHash, payHash)
if predErr != nil {
return false
}
return true
}, time.Second*5)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
// We'll now mine enough blocks to trigger Bob's broadcast of his
// commitment transaction due to the fact that the HTLC is about to
// timeout.
numBlocks := uint32(finalCltvDelta - defaultBroadcastDelta)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// Bob's force close transaction should now be found in the mempool.
bobFundingTxid, err := chainhash.NewHash(bobChanPoint.FundingTxid)
if err != nil {
t.Fatalf("unable to create sha hash: %v", err)
}
assertSpendingTxInMempool(
t, net.Miner.Node, time.Second*10, wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
},
)
// At this point, Bob should have cancelled backwards the dust HTLC
// that we sent earlier. This means Alice should now only have a single
// HTLC on her channel.
nodes = []*lntest.HarnessNode{net.Alice}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash)
if predErr != nil {
return false
}
return true
}, time.Second*5)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
// TODO(roasbeef): need to fix utxn so it can accept incubation for
// timeout that has already past
//
// * remove after solved
time.Sleep(time.Second * 5)
// We'll now mine the remaining blocks to cause the HTLC itself to
// timeout.
if _, err := net.Miner.Node.Generate(defaultBroadcastDelta); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// The second layer HTLC timeout transaction should now have been
// broadcast on-chain.
_, err = waitForTxInMempool(net.Miner.Node, time.Second*10)
if err != nil {
t.Fatalf("unable to find bob's second layer transaction")
}
// Bob's pending channel report should show that he has a commitment
// output awaiting sweeping, and also that there's an outgoing HTLC
// output pending.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := net.Bob.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
t.Fatalf("bob should have pending for close chan but doesn't")
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
t.Fatalf("bob should have nonzero limbo balance instead "+
"has: %v", forceCloseChan.LimboBalance)
}
if len(forceCloseChan.PendingHtlcs) == 0 {
t.Fatalf("bob should have pending htlc but doesn't")
}
// Now we'll mine an additional block.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// The block should have confirmed Bob's second layer sweeping
// transaction. Therefore, at this point, there should be no active
// HTLC's on the commitment transaction from Alice -> Bob.
nodes = []*lntest.HarnessNode{net.Alice}
err = lntest.WaitPredicate(func() bool {
return assertNumActiveHtlcs(nodes, 0)
}, time.Second*4)
if err != nil {
t.Fatalf("alice's channel still has active htlc's")
}
// At this point, Bob should show that the pending HTLC has advanced to
// the second stage and is to be swept.
pendingChanResp, err = net.Bob.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
forceCloseChan = pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
t.Fatalf("bob's htlc should have advanced to the second stage: %v", err)
}
// We'll now mine four more blocks. After the 4th block, a transaction
// sweeping the HTLC output should be broadcast.
if _, err := net.Miner.Node.Generate(4); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = waitForTxInMempool(net.Miner.Node, time.Second*10)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction")
}
// Next, we'll mine a final block that should confirm the second-layer
// sweeping transaction.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// Once this transaction has been confirmed, Bob should detect that he
// no longer has any pending channels.
err = lntest.WaitPredicate(func() bool {
pendingChanResp, err = net.Bob.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending "+
"channels but shouldn't: %v",
spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*5)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ := context.WithTimeout(ctxb, timeout)
closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false)
// Clean up carol's node.
if err := net.ShutdownNode(carol); err != nil {
t.Fatalf("unable to shutdown carol: %v", err)
}
}
// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the
// receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC
// off-chain, then it goes on chain to claim the HTLC. In this scenario, the
// node that sent the outgoing HTLC should extract the preimage from the sweep
// transaction, and finish settling the HTLC backwards into the route.
func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) {
timeout := time.Duration(time.Second * 15)
ctxb := context.Background()
defaultCSV := uint32(4)
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net)
// With the network active, we'll now add a new invoice at Carol's end.
invoiceReq := &lnrpc.Invoice{
Value: 100000,
}
carolInvoice, err := carol.AddInvoice(ctxb, invoiceReq)
if err != nil {
t.Fatalf("unable to generate carol invoice: %v", err)
}
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
// will not immediately settle the payment.
alicePayStream, err := net.Alice.SendPayment(ctxb)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
err = alicePayStream.Send(&lnrpc.SendRequest{
PaymentRequest: carolInvoice.PaymentRequest,
})
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, carolInvoice.RHash)
if predErr != nil {
return false
}
return true
}, time.Second*5)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
// Now we'll mine enough blocks to prompt carol to actually go to the
// chain in order to sweep her HTLC since the value is high enough.
// TODO(roasbeef): modify once go to chain policy changes
numBlocks := uint32(defaultBitcoinTimeLockDelta - (2 * defaultBroadcastDelta))
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
// At this point, Carol should broadcast her active commitment
// transaction in order to go to the chain and sweep her HTLC.
// Additionally, Carol's should have broadcast her second layer sweep
// transaction for the HTLC as well.
txids, err := waitForNTxsInMempool(net.Miner.Node, 2, time.Second*15)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobFundingTxid, err := chainhash.NewHash(bobChanPoint.FundingTxid)
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
}
tx1, err := net.Miner.Node.GetRawTransaction(txids[0])
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
tx1Hash := tx1.MsgTx().TxHash()
tx2, err := net.Miner.Node.GetRawTransaction(txids[1])
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
tx2Hash := tx2.MsgTx().TxHash()
// Of the two transactions, one should be spending from the funding
// transaction, and the second transaction should then be spending from
// the commitment transaction.
var commitHash *chainhash.Hash
if tx1.MsgTx().TxIn[0].PreviousOutPoint == carolFundingPoint {
commitHash = &tx1Hash
if tx2.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash {
t.Fatalf("second transaction not spending commit tx: %v",
spew.Sdump(tx2))
}
}
if tx2.MsgTx().TxIn[0].PreviousOutPoint == carolFundingPoint {
commitHash = &tx2Hash
if tx1.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash {
t.Fatalf("second transaction not spending commit tx: %v",
spew.Sdump(tx1))
}
}
if commitHash == nil {
t.Fatalf("commit tx not found in mempool")
}
// We'll now mine an additional block which should confirm both the
// second layer transaction as well as the commitment transaction
// itself.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
time.Sleep(time.Second * 4)
// TODO(roasbeef): assert bob pending state as well
// Carol's pending channel report should now show two outputs under
// limbo: her commitment output, as well as the second-layer claim
// output.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := carol.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
t.Fatalf("carol should have pending for close chan but doesn't")
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
t.Fatalf("carol should have nonzero limbo balance instead "+
"has: %v", forceCloseChan.LimboBalance)
}
// The pending HTLC carol has should also now be in stage 2.
if len(forceCloseChan.PendingHtlcs) != 1 {
t.Fatalf("carol should have pending htlc but doesn't")
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
t.Fatalf("carol's htlc should have advanced to the second "+
"stage: %v", err)
}
// Once the second-level transaction confirmed, Bob should have
// extracted the preimage from the chain, and sent it back to Alice,
// clearing the HTLC off-chain.
nodes = []*lntest.HarnessNode{net.Alice}
err = lntest.WaitPredicate(func() bool {
return assertNumActiveHtlcs(nodes, 0)
}, time.Second*5)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
// If we mine 4 additional blocks, then both outputs should now be
// mature.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// We should have a new transaction in the mempool.
_, err = waitForTxInMempool(net.Miner.Node, time.Second*10)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction")
}
// Finally, if we mine an additional block to confirm these two sweep
// transactions, Carol should not show a pending channel in her report
// afterwards.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to mine block: %v", err)
}
pendingChanResp, err = carol.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
t.Fatalf("carol still has pending channels: %v",
spew.Sdump(pendingChanResp))
}
// We'll close out the channel between Alice and Bob, then shutdown
// carol to conclude the test.
ctxt, _ := context.WithTimeout(ctxb, timeout)
closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false)
// Clean up carol's node.
if err := net.ShutdownNode(carol); err != nil {
t.Fatalf("unable to shutdown carol: %v", err)
}
}
// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC
// scenario, if the node that extended the HTLC to the final node closes their
// commitment on-chain early, then it eventually recognizes this HTLC as one
// that's timed out. At this point, the node should timeout the HTLC, then
// cancel it backwards as normal.
func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
t *harnessTest) {
timeout := time.Duration(time.Second * 15)
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net)
// With our channels set up, we'll then send a single HTLC from Alice
// to Carol. As Carol is in hodl mode, she won't settle this HTLC which
// opens up the base for out tests.
const (
finalCltvDelta = 40
htlcAmt = btcutil.Amount(30000)
)
alicePayStream, err := net.Alice.SendPayment(ctxb)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
// We'll now send a single HTLC across our multi-hop network.
carolPubKey := carol.PubKey[:]
payHash := bytes.Repeat([]byte{2}, 32)
err = alicePayStream.Send(&lnrpc.SendRequest{
Dest: carolPubKey,
Amt: int64(htlcAmt),
PaymentHash: payHash,
FinalCltvDelta: finalCltvDelta,
})
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
// Once the HTLC has cleared, all channels in our mini network should
// have the it locked in.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash)
if predErr != nil {
return false
}
return true
}, time.Second*5)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
// Now that all parties have the HTLC locked in, we'll immediately
// force close the Bob -> Carol channel. This should trigger contract
// resolution mode for both of them.
ctxt, _ := context.WithTimeout(ctxb, timeout)
closeChannelAndAssert(ctxt, t, net, net.Bob, bobChanPoint, true)
// At this point, Bob should have a pending force close channel as he
// just went to chain.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := net.Bob.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
t.Fatalf("bob should have pending for close chan but doesn't")
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
t.Fatalf("bob should have nonzero limbo balance instead "+
"has: %v", forceCloseChan.LimboBalance)
}
// We'll now mine enough blocks for the HTLC to expire. After this, Bob
// should hand off the now expired HTLC output to the utxo nursery.
if _, err := net.Miner.Node.Generate(finalCltvDelta); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// Bob's pending channel report should show that he has a single HTLC
// that's now in stage one.
err = lntest.WaitPredicate(func() bool {
pendingChanResp, err := net.Bob.PendingChannels(
ctxb, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
forceCloseChan = pendingChanResp.PendingForceClosingChannels[0]
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 1 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the first stage: %v", err)
return false
}
return true
}, time.Second*10)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// We should also now find a transaction in the mempool, as Bob should
// have broadcast his second layer timeout transaction.
_, err = waitForTxInMempool(net.Miner.Node, time.Second*10)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction")
}
// Next, we'll mine an additional block. This should serve to confirm
// the second layer timeout transaction.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// With the second layer timeout transaction confirmed, Bob should have
// cancelled backwards the HTLC that carol sent.
nodes = []*lntest.HarnessNode{net.Alice}
err = lntest.WaitPredicate(func() bool {
return assertNumActiveHtlcs(nodes, 0)
}, time.Second*4)
if err != nil {
t.Fatalf("alice's channel still has active htlc's")
}
// Additionally, Bob should now show that HTLC as being advanced to the
// second stage.
err = lntest.WaitPredicate(func() bool {
pendingChanResp, err := net.Bob.PendingChannels(
ctxb, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
forceCloseChan = pendingChanResp.PendingForceClosingChannels[0]
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
return false
}
return true
}, time.Second*10)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// We'll now mine 4 additional blocks. This should be enough for Bob's
// CSV timelock to expire, and the sweeping transaction to be
// confirmed.
if _, err := net.Miner.Node.Generate(4); err != nil {
t.Fatalf("unable to mine blocks: %v", err)
}
time.Sleep(time.Second * 3)
// We'll then mine a final block which should confirm this second layer
// sweep transaction.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to mine blocks: %v", err)
}
// At this point, Bob should no longer show any channels as pending
// close.
err = lntest.WaitPredicate(func() bool {
pendingChanResp, err = net.Bob.PendingChannels(
ctxb, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*5)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, timeout)
closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false)
// Clean up carol's node.
if err := net.ShutdownNode(carol); err != nil {
t.Fatalf("unable to shutdown carol: %v", err)
}
}
// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a
// multi-hop HTLC, and the final destination of the HTLC force closes the
// channel, then we properly timeout the HTLC on *their* commitment transaction
// once the timeout has expired. Once we sweep the transaction, we should also
// cancel back the initial HTLC.
func testMultHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
t *harnessTest) {
timeout := time.Duration(time.Second * 15)
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net)
// With our channels set up, we'll then send a single HTLC from Alice
// to Carol. As Carol is in hodl mode, she won't settle this HTLC which
// opens up the base for out tests.
const (
finalCltvDelta = 40
htlcAmt = btcutil.Amount(30000)
)
alicePayStream, err := net.Alice.SendPayment(ctxb)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
// We'll now send a single HTLC across our multi-hop network.
carolPubKey := carol.PubKey[:]
payHash := bytes.Repeat([]byte{2}, 32)
err = alicePayStream.Send(&lnrpc.SendRequest{
Dest: carolPubKey,
Amt: int64(htlcAmt),
PaymentHash: payHash,
FinalCltvDelta: finalCltvDelta,
})
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
// Once the HTLC has cleared, all the nodes n our mini network should
// show that the HTLC has been locked in.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash)
if predErr != nil {
return false
}
return true
}, time.Second*5)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
// At this point, we'll now instruct Carol to force close the
// transaction. This will let us exercise that Bob is able to sweep the
// expired HTLC on Carol's version of the commitment transaction.
ctxt, _ := context.WithTimeout(ctxb, timeout)
closeChannelAndAssert(ctxt, t, net, carol, bobChanPoint, true)
// At this point, Bob should have a pending force close channel as
// Carol has gone directly to chain.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := net.Bob.PendingChannels(ctxb, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
t.Fatalf("bob should have pending for close chan but doesn't")
}
// Next, we'll mine enough blocks for the HTLC to expire. At this
// point, Bob should hand off the output to his internal utxo nursery.
if _, err := net.Miner.Node.Generate(finalCltvDelta - 1); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// If we check Bob's pending channel report, it should show that he has
// a single HTLC that's now in the second stage, as skip the initial
// first stage since this is a direct HTLC.
err = lntest.WaitPredicate(func() bool {
pendingChanResp, err := net.Bob.PendingChannels(
ctxb, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
return false
}
return true
}, time.Second*10)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// We'll now mine an additional block to push the HTLC to full
// expiration.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// Bob's sweeping transaction should now be found in the mempool at
// this point.
_, err = waitForTxInMempool(net.Miner.Node, time.Second*10)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction")
}
// If we mine an additional block, then this should confirm Bob's
// transaction which sweeps the direct HTLC output.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// Now that the sweeping transaction has been confirmed, Bob should
// cancel back that HTLC. As a result, Alice should not know of any
// active HTLC's.
nodes = []*lntest.HarnessNode{net.Alice}
err = lntest.WaitPredicate(func() bool {
return assertNumActiveHtlcs(nodes, 0)
}, time.Second*4)
if err != nil {
t.Fatalf("alice's channel still has active htlc's")
}
// Now we'll check Bob's pending channel report. Since this was Carol's
// commitment, he doesn't have to wait for any CSV delays. As a result,
// he should show no additional pending transactions.
err = lntest.WaitPredicate(func() bool {
pendingChanResp, err = net.Bob.PendingChannels(
ctxb, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*5)
if err != nil {
t.Fatalf(predErr.Error())
}
// We'll close out the test by closing the channel from Alice to Bob,
// and then shutting down the new node we created as its no longer
// needed.
ctxt, _ = context.WithTimeout(ctxb, timeout)
closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false)
if err := net.ShutdownNode(carol); err != nil {
t.Fatalf("unable to shutdown carol: %v", err)
}
}
// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if
// we're forced to go to chain with an incoming HTLC, then when we find out the
// preimage via the witness beacon, we properly settle the HTLC on-chain in
// order to ensure we don't lose any funds.
func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) {
timeout := time.Duration(time.Second * 15)
ctxb := context.Background()
defaultCSV := uint32(4)
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net)
// With the network active, we'll now add a new invoice at Carol's end.
invoiceReq := &lnrpc.Invoice{
Value: 100000,
}
carolInvoice, err := carol.AddInvoice(ctxb, invoiceReq)
if err != nil {
t.Fatalf("unable to generate carol invoice: %v", err)
}
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
// will not immediately settle the payment.
alicePayStream, err := net.Alice.SendPayment(ctxb)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
err = alicePayStream.Send(&lnrpc.SendRequest{
PaymentRequest: carolInvoice.PaymentRequest,
})
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
// We'll now wait until all 3 nodes have the HTLC as just sent fully
// locked in.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, carolInvoice.RHash)
if predErr != nil {
return false
}
return true
}, time.Second*5)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
// At this point, Bob decides that he wants to exit the channel
// immediately, so he force closes his commitment transaction.
ctxt, _ := context.WithTimeout(ctxb, timeout)
closeChannelAndAssert(ctxt, t, net, net.Bob, aliceChanPoint, true)
// We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive.
numBlocks := uint32(defaultBitcoinTimeLockDelta - (2 * defaultBroadcastDelta))
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
// Carol's commitment transaction should now be in the mempool. She
// should also have broadcast her second level HTLC transaction.
txids, err := waitForNTxsInMempool(net.Miner.Node, 2, time.Second*15)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobFundingTxid, err := chainhash.NewHash(bobChanPoint.FundingTxid)
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
}
// Of the two transactions, one should be spending from the funding
// transaction, and the second transaction should then be spending from
// the commitment transaction.
var commitHash *chainhash.Hash
tx1, err := net.Miner.Node.GetRawTransaction(txids[0])
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
tx1Hash := tx1.MsgTx().TxHash()
tx2, err := net.Miner.Node.GetRawTransaction(txids[1])
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
tx2Hash := tx2.MsgTx().TxHash()
if tx1.MsgTx().TxIn[0].PreviousOutPoint == carolFundingPoint {
commitHash = &tx1Hash
if tx2.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash {
t.Fatalf("second transaction not spending commit tx: %v",
spew.Sdump(tx2))
}
}
if tx2.MsgTx().TxIn[0].PreviousOutPoint == carolFundingPoint {
commitHash = &tx2Hash
if tx1.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash {
t.Fatalf("second transaction not spending commit tx: %v",
spew.Sdump(tx1))
}
}
if commitHash == nil {
t.Fatalf("commit tx not found in mempool")
}
// We'll now mine a block which should confirm both the second layer
// transaction as well as the commitment transaction.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// At this point, Bob should detect that Carol has revealed the
// preimage on-chain. As a result, he should now attempt to broadcast
// his second-layer claim transaction to claim the output.
_, err = waitForTxInMempool(net.Miner.Node, time.Second*10)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction")
}
// At this point, Bob should have broadcast his second layer success
// transaction, and should have sent it to the nursery for incubation.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = lntest.WaitPredicate(func() bool {
pendingChanResp, err := net.Bob.PendingChannels(
ctxb, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
for _, forceCloseChan := range pendingChanResp.PendingForceClosingChannels {
if forceCloseChan.Channel.LocalBalance != 0 {
continue
}
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 1 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the first stage: %v", err)
return false
}
}
return true
}, time.Second*10)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// If we then mine 4 additional blocks, Bob should pull the output
// destined for him.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// At this point, Bob should detect that he has no pending channels
// anymore, as this just resolved it by the confirmation of the sweep
// transaction we detected above.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
err = lntest.WaitPredicate(func() bool {
pendingChanResp, err := net.Bob.PendingChannels(
ctxb, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*5)
if err != nil {
t.Fatalf(predErr.Error())
}
// Clean up carol's node.
if err := net.ShutdownNode(carol); err != nil {
t.Fatalf("unable to shutdown carol: %v", err)
}
}
// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario,
// if the remote party goes to chain while we have an incoming HTLC, then when
// we found out the preimage via the witness beacon, we properly settle the
// HTLC on-chain in order to ensure that we don't lose any funds.
func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest) {
timeout := time.Duration(time.Second * 15)
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net)
// With the network active, we'll now add a new invoice at Carol's end.
invoiceReq := &lnrpc.Invoice{
Value: 100000,
}
carolInvoice, err := carol.AddInvoice(ctxb, invoiceReq)
if err != nil {
t.Fatalf("unable to generate carol invoice: %v", err)
}
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
// will not immediately settle the payment.
alicePayStream, err := net.Alice.SendPayment(ctxb)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
err = alicePayStream.Send(&lnrpc.SendRequest{
PaymentRequest: carolInvoice.PaymentRequest,
})
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
// We'll now wait until all 3 nodes have the HTLC as just sent fully
// locked in.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, carolInvoice.RHash)
if predErr != nil {
return false
}
return true
}, time.Second*5)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
// Next, Alice decides that she wants to exit the channel, so she'll
// immediately force close the channel by broadcast her commitment
// transaction.
ctxt, _ := context.WithTimeout(ctxb, timeout)
closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, true)
// We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive.
claimDelta := uint32(2 * defaultBroadcastDelta)
numBlocks := uint32(defaultBitcoinTimeLockDelta - claimDelta)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
// Carol's commitment transaction should now be in the mempool. She
// should also have broadcast her second level HTLC transaction.
txids, err := waitForNTxsInMempool(net.Miner.Node, 2, time.Second*15)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobFundingTxid, err := chainhash.NewHash(bobChanPoint.FundingTxid)
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
}
// Of the two transactions, one should be spending from the funding
// transaction, and the second transaction should then be spending from
// the commitment transaction.
var commitHash *chainhash.Hash
tx1, err := net.Miner.Node.GetRawTransaction(txids[0])
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
tx1Hash := tx1.MsgTx().TxHash()
tx2, err := net.Miner.Node.GetRawTransaction(txids[1])
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
tx2Hash := tx2.MsgTx().TxHash()
if tx1.MsgTx().TxIn[0].PreviousOutPoint == carolFundingPoint {
commitHash = &tx1Hash
if tx2.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash {
t.Fatalf("second transaction not spending commit tx: %v",
spew.Sdump(tx2))
}
}
if tx2.MsgTx().TxIn[0].PreviousOutPoint == carolFundingPoint {
commitHash = &tx2Hash
if tx1.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash {
t.Fatalf("second transaction not spending commit tx: %v",
spew.Sdump(tx1))
}
}
if commitHash == nil {
t.Fatalf("commit tx not found in mempool")
}
// We'll now mine a block which should confirm both the second layer
// transaction as well as the commitment transaction.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// With the block mined above, Bob should detect that Carol is
// attempting to sweep the HTLC on-chain, and should obtain the
// preimage.
_, err = waitForTxInMempool(net.Miner.Node, time.Second*10)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction")
}
// We'll now mine another block, this should confirm the sweep
// transaction that Bob broadcast in the prior stage.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
// Now that the sweeping transaction has been confirmed, Bob should now
// recognize that all contracts have been fully resolved, and show no
// pending close channels.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = lntest.WaitPredicate(func() bool {
pendingChanResp, err := net.Bob.PendingChannels(
ctxb, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*5)
if err != nil {
t.Fatalf(predErr.Error())
}
if err := net.ShutdownNode(carol); err != nil {
t.Fatalf("unable to shutdown carol: %v", err)
}
}
type testCase struct {
name string
test func(net *lntest.NetworkHarness, t *harnessTest)
@ -5008,6 +6347,44 @@ var testsCases = []*testCase{
name: "async bidirectional payments",
test: testBidirectionalAsyncPayments,
},
{
// bob: outgoing our commit timeout
// carol: incoming their commit watch and see timeout
name: "test multi-hop htlc local force close immediate expiry",
test: testMultiHopHtlcLocalTimeout,
},
{
// bob: outgoing watch and see, they sweep on chain
// carol: incoming our commit, know preimage
name: "test multi-hop htlc receiver chain claim",
test: testMultiHopReceiverChainClaim,
},
{
// bob: outgoing our commit watch and see timeout
// carol: incoming their commit watch and see timeout
name: "test multi-hop local force close on-chain htlc timeout",
test: testMultiHopLocalForceCloseOnChainHtlcTimeout,
},
{
// bob: outgoing their commit watch and see timeout
// carol: incoming our commit watch and see timeout
name: "test multi-hop remote force close on-chain htlc timeout",
test: testMultHopRemoteForceCloseOnChainHtlcTimeout,
},
{
// bob: outgoing our commit watch and see, they sweep on chain
// bob: incoming our commit watch and learn preimage
// carol: incoming their commit know preimage
name: "test multi-hop htlc local chain claim",
test: testMultiHopHtlcLocalChainClaim,
},
{
// bob: outgoing their commit watch and see, they sweep on chain
// bob: incoming their commit watch and learn preimage
// carol: incoming our commit know preimage
name: "test multi-hop htlc remote chain claim",
test: testMultiHopHtlcRemoteChainClaim,
},
{
// TODO(roasbeef): test always needs to be last as Bob's state
// is borked since we trick him into attempting to cheat Alice?

@ -238,7 +238,8 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
broadcastTxChan := make(chan *wire.MsgTx)
responder, responderChan, initiatorChan, cleanUp, err := createTestPeer(
notifier, broadcastTxChan)
notifier, broadcastTxChan,
)
if err != nil {
t.Fatalf("unable to create test channels: %v", err)
}