itest: cleanup multi-hop tests

As a preparation to fix an issue with the mempool wait, we clean up the
multi-hop itests a bit. We fix the formatting, use the require library
for assertions consistently and simplify some of the wait predicates.
Commonly used code is also extracted into functions.
This commit is contained in:
Oliver Gugger 2020-10-26 14:21:09 +01:00
parent d1b46211d8
commit 34439fbc2a
No known key found for this signature in database
GPG Key ID: 8E4256593F177720
7 changed files with 367 additions and 854 deletions

@ -3,10 +3,8 @@ package itest
import ( import (
"context" "context"
"fmt" "fmt"
"time"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc"
@ -15,6 +13,7 @@ import (
"github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
) )
// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if // testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if
@ -51,9 +50,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel() defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq) carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to add invoice: %v", err)
}
// Now that we've created the invoice, we'll send a single payment from // Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol // Alice to Carol. We won't wait for the response however, as Carol
@ -62,32 +59,21 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
defer cancel() defer cancel()
_, err = alice.RouterClient.SendPaymentV2( _, err = alice.RouterClient.SendPaymentV2(
ctx, ctx, &routerrpc.SendPaymentRequest{
&routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest, PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60, TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat, FeeLimitMsat: noFeeLimitMsat,
}, },
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to send payment: %v", err)
}
// At this point, all 3 nodes should now have an active channel with // At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them. // the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol} nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool { err = wait.NoError(func() error {
predErr = assertActiveHtlcs(nodes, payHash[:]) return assertActiveHtlcs(nodes, payHash[:])
if predErr != nil { }, defaultTimeout)
return false require.NoError(t.t, err)
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// Wait for carol to mark invoice as accepted. There is a small gap to // Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit // bridge between adding the htlc to the channel and executing the exit
@ -101,8 +87,9 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// At this point, Bob decides that he wants to exit the channel // At this point, Bob decides that he wants to exit the channel
// immediately, so he force closes his commitment transaction. // immediately, so he force closes his commitment transaction.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
bobForceClose := closeChannelAndAssertType(ctxt, t, net, bob, bobForceClose := closeChannelAndAssertType(
aliceChanPoint, c == commitTypeAnchors, true) ctxt, t, net, bob, aliceChanPoint, c == commitTypeAnchors, true,
)
// Alice will sweep her commitment output immediately. If there are // Alice will sweep her commitment output immediately. If there are
// anchors, Alice will also sweep hers. // anchors, Alice will also sweep hers.
@ -113,16 +100,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
_, err = waitForNTxsInMempool( _, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout, net.Miner.Node, expectedTxes, minerMempoolTimeout,
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to find alice's sweep tx in miner mempool: %v",
err)
}
// Suspend Bob to force Carol to go to chain. // Suspend Bob to force Carol to go to chain.
restartBob, err := net.SuspendNode(bob) restartBob, err := net.SuspendNode(bob)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to suspend bob: %v", err)
}
// Settle invoice. This will just mark the invoice as settled, as there // Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For // is no link anymore to remove the htlc from the commitment tx. For
@ -134,31 +116,24 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{ _, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:], Preimage: preimage[:],
}) })
if err != nil { require.NoError(t.t, err)
t.Fatalf("settle invoice: %v", err)
}
// We'll now mine enough blocks so Carol decides that she needs to go // We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive. // on-chain to claim the HTLC as Bob has been inactive.
numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry - numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry -
lncfg.DefaultIncomingBroadcastDelta)) lncfg.DefaultIncomingBroadcastDelta))
if _, err := net.Miner.Node.Generate(numBlocks); err != nil { _, err = net.Miner.Node.Generate(numBlocks)
t.Fatalf("unable to generate blocks") require.NoError(t.t, err)
}
// Carol's commitment transaction should now be in the mempool. If there // Carol's commitment transaction should now be in the mempool. If there
// is an anchor, Carol will sweep that too. // is an anchor, Carol will sweep that too.
_, err = waitForNTxsInMempool( _, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout, net.Miner.Node, expectedTxes, minerMempoolTimeout,
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("transactions not found in mempool: %v", err)
}
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to get txid: %v", err)
}
carolFundingPoint := wire.OutPoint{ carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid, Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex, Index: bobChanPoint.OutputIndex,
@ -174,16 +149,12 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// Mine a block that should confirm the commit tx, the anchor if present // Mine a block that should confirm the commit tx, the anchor if present
// and the coinbase. // and the coinbase.
block := mineBlocks(t, net, 1, expectedTxes)[0] block := mineBlocks(t, net, 1, expectedTxes)[0]
if len(block.Transactions) != expectedTxes+1 { require.Len(t.t, block.Transactions, expectedTxes+1)
t.Fatalf("expected %v transactions in block, got %v",
expectedTxes+1, len(block.Transactions))
}
assertTxInBlock(t, block, &closingTxid) assertTxInBlock(t, block, &closingTxid)
// Restart bob again. // Restart bob again.
if err := restartBob(); err != nil { err = restartBob()
t.Fatalf("unable to restart bob: %v", err) require.NoError(t.t, err)
}
// After the force close transacion is mined, Carol should broadcast her // After the force close transacion is mined, Carol should broadcast her
// second level HTLC transacion. Bob will broadcast a sweep tx to sweep // second level HTLC transacion. Bob will broadcast a sweep tx to sweep
@ -198,9 +169,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
txes, err := getNTxsFromMempool( txes, err := getNTxsFromMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout, net.Miner.Node, expectedTxes, minerMempoolTimeout,
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("transactions not found in mempool: %v", err)
}
// Both Carol's second level transaction and Bob's sweep should be // Both Carol's second level transaction and Bob's sweep should be
// spending from the commitment transaction. // spending from the commitment transaction.
@ -209,16 +178,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// At this point we suspend Alice to make sure she'll handle the // At this point we suspend Alice to make sure she'll handle the
// on-chain settle after a restart. // on-chain settle after a restart.
restartAlice, err := net.SuspendNode(alice) restartAlice, err := net.SuspendNode(alice)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to suspend alice: %v", err)
}
// Mine a block to confirm the two transactions (+ the coinbase). // Mine a block to confirm the two transactions (+ the coinbase).
block = mineBlocks(t, net, 1, expectedTxes)[0] block = mineBlocks(t, net, 1, expectedTxes)[0]
if len(block.Transactions) != expectedTxes+1 { require.Len(t.t, block.Transactions, expectedTxes+1)
t.Fatalf("expected 3 transactions in block, got %v",
len(block.Transactions))
}
// Keep track of the second level tx maturity. // Keep track of the second level tx maturity.
carolSecondLevelCSV := uint32(defaultCSV) carolSecondLevelCSV := uint32(defaultCSV)
@ -226,73 +190,48 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// When Bob notices Carol's second level transaction in the block, he // When Bob notices Carol's second level transaction in the block, he
// will extract the preimage and broadcast a second level tx to claim // will extract the preimage and broadcast a second level tx to claim
// the HTLC in his (already closed) channel with Alice. // the HTLC in his (already closed) channel with Alice.
bobSecondLvlTx, err := waitForTxInMempool(net.Miner.Node, bobSecondLvlTx, err := waitForTxInMempool(
minerMempoolTimeout) net.Miner.Node, minerMempoolTimeout,
if err != nil { )
t.Fatalf("transactions not found in mempool: %v", err) require.NoError(t.t, err)
}
// It should spend from the commitment in the channel with Alice. // It should spend from the commitment in the channel with Alice.
tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx) tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobForceClose { require.Equal(
t.Fatalf("tx did not spend from bob's force close tx") t.t, *bobForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
} )
// At this point, Bob should have broadcast his second layer success // At this point, Bob should have broadcast his second layer success
// transaction, and should have sent it to the nursery for incubation. // transaction, and should have sent it to the nursery for incubation.
pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = wait.Predicate(func() bool { err = waitForNumChannelPendingForceClose(
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
pendingChanResp, err := bob.PendingChannels( if c.Channel.LocalBalance != 0 {
ctxt, pendingChansRequest, return nil
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
for _, forceCloseChan := range pendingChanResp.PendingForceClosingChannels {
if forceCloseChan.Channel.LocalBalance != 0 {
continue
} }
if len(forceCloseChan.PendingHtlcs) != 1 { if len(c.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " + return fmt.Errorf("bob should have pending " +
"but doesn't") "htlc but doesn't")
return false
} }
stage := forceCloseChan.PendingHtlcs[0].Stage
if stage != 1 { if c.PendingHtlcs[0].Stage != 1 {
predErr = fmt.Errorf("bob's htlc should have "+ return fmt.Errorf("bob's htlc should have "+
"advanced to the first stage but was "+ "advanced to the first stage but was "+
"stage: %v", stage) "stage: %v", c.PendingHtlcs[0].Stage)
return false
} }
}
return true return nil
}, time.Second*15) },
if err != nil { )
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) require.NoError(t.t, err)
}
// We'll now mine a block which should confirm Bob's second layer // We'll now mine a block which should confirm Bob's second layer
// transaction. // transaction.
block = mineBlocks(t, net, 1, 1)[0] block = mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 { require.Len(t.t, block.Transactions, 2)
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
assertTxInBlock(t, block, bobSecondLvlTx) assertTxInBlock(t, block, bobSecondLvlTx)
// Keep track of Bob's second level maturity, and decrement our track // Keep track of Bob's second level maturity, and decrement our track
@ -302,21 +241,17 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// Now that the preimage from Bob has hit the chain, restart Alice to // Now that the preimage from Bob has hit the chain, restart Alice to
// ensure she'll pick it up. // ensure she'll pick it up.
if err := restartAlice(); err != nil { err = restartAlice()
t.Fatalf("unable to restart alice: %v", err) require.NoError(t.t, err)
}
// If we then mine 3 additional blocks, Carol's second level tx should // If we then mine 3 additional blocks, Carol's second level tx should
// mature, and she can pull the funds from it with a sweep tx. // mature, and she can pull the funds from it with a sweep tx.
if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil { _, err = net.Miner.Node.Generate(carolSecondLevelCSV)
t.Fatalf("unable to generate block: %v", err) require.NoError(t.t, err)
}
bobSecondLevelCSV -= carolSecondLevelCSV bobSecondLevelCSV -= carolSecondLevelCSV
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to find Carol's sweeping transaction: %v", err)
}
// Mining one additional block, Bob's second level tx is mature, and he // Mining one additional block, Bob's second level tx is mature, and he
// can sweep the output. // can sweep the output.
@ -324,18 +259,14 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
assertTxInBlock(t, block, carolSweep) assertTxInBlock(t, block, carolSweep)
bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to find bob's sweeping transaction")
}
// Make sure it spends from the second level tx. // Make sure it spends from the second level tx.
tx, err = net.Miner.Node.GetRawTransaction(bobSweep) tx, err = net.Miner.Node.GetRawTransaction(bobSweep)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to get txn: %v", err) require.Equal(
} t.t, *bobSecondLvlTx, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobSecondLvlTx { )
t.Fatalf("tx did not spend from bob's second level tx")
}
// When we mine one additional block, that will confirm Bob's sweep. // When we mine one additional block, that will confirm Bob's sweep.
// Now Bob should have no pending channels anymore, as this just // Now Bob should have no pending channels anymore, as this just
@ -343,77 +274,15 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
block = mineBlocks(t, net, 1, 1)[0] block = mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, bobSweep) assertTxInBlock(t, block, bobSweep)
err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
pendingChanResp, err := bob.PendingChannels( require.NoError(t.t, err)
ctxt, pendingChansRequest, assertNodeNumChannels(t, bob, 0)
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
req := &lnrpc.ListChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanInfo, err := bob.ListChannels(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query for open "+
"channels: %v", err)
return false
}
if len(chanInfo.Channels) != 0 {
predErr = fmt.Errorf("Bob should have no open "+
"channels, instead he has %v",
len(chanInfo.Channels))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// Also Carol should have no channels left (open nor pending). // Also Carol should have no channels left (open nor pending).
err = wait.Predicate(func() bool { err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) require.NoError(t.t, err)
pendingChanResp, err := carol.PendingChannels( assertNodeNumChannels(t, carol, 0)
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob carol has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
req := &lnrpc.ListChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanInfo, err := carol.ListChannels(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query for open "+
"channels: %v", err)
return false
}
if len(chanInfo.Channels) != 0 {
predErr = fmt.Errorf("carol should have no open "+
"channels, instead she has %v",
len(chanInfo.Channels))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// Finally, check that the Alice's payment is correctly marked // Finally, check that the Alice's payment is correctly marked
// succeeded. // succeeded.
@ -421,7 +290,5 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
err = checkPaymentStatus( err = checkPaymentStatus(
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf(err.Error())
}
} }

@ -2,19 +2,18 @@ package itest
import ( import (
"context" "context"
"fmt"
"time" "time"
"github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil" "github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
) )
// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the // testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the
@ -58,8 +57,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
payHash := makeFakePayHash(t) payHash := makeFakePayHash(t)
_, err := alice.RouterClient.SendPaymentV2( _, err := alice.RouterClient.SendPaymentV2(
ctx, ctx, &routerrpc.SendPaymentRequest{
&routerrpc.SendPaymentRequest{
Dest: carolPubKey, Dest: carolPubKey,
Amt: int64(dustHtlcAmt), Amt: int64(dustHtlcAmt),
PaymentHash: dustPayHash, PaymentHash: dustPayHash,
@ -68,13 +66,10 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
FeeLimitMsat: noFeeLimitMsat, FeeLimitMsat: noFeeLimitMsat,
}, },
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to send alice htlc: %v", err)
}
_, err = alice.RouterClient.SendPaymentV2( _, err = alice.RouterClient.SendPaymentV2(
ctx, ctx, &routerrpc.SendPaymentRequest{
&routerrpc.SendPaymentRequest{
Dest: carolPubKey, Dest: carolPubKey,
Amt: int64(htlcAmt), Amt: int64(htlcAmt),
PaymentHash: payHash, PaymentHash: payHash,
@ -83,21 +78,15 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
FeeLimitMsat: noFeeLimitMsat, FeeLimitMsat: noFeeLimitMsat,
}, },
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to send alice htlc: %v", err)
}
// Verify that all nodes in the path now have two HTLC's with the // Verify that all nodes in the path now have two HTLC's with the
// proper parameters. // proper parameters.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol} nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool { err = wait.NoError(func() error {
predErr = assertActiveHtlcs(nodes, dustPayHash, payHash) return assertActiveHtlcs(nodes, dustPayHash, payHash)
return predErr == nil }, defaultTimeout)
}, time.Second*15) require.NoError(t.t, err)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// Increase the fee estimate so that the following force close tx will // Increase the fee estimate so that the following force close tx will
// be cpfp'ed. // be cpfp'ed.
@ -110,9 +99,8 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
numBlocks := padCLTV( numBlocks := padCLTV(
uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta), uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
) )
if _, err := net.Miner.Node.Generate(numBlocks); err != nil { _, err = net.Miner.Node.Generate(numBlocks)
t.Fatalf("unable to generate blocks: %v", err) require.NoError(t.t, err)
}
// Bob's force close transaction should now be found in the mempool. If // Bob's force close transaction should now be found in the mempool. If
// there are anchors, we also expect Bob's anchor sweep. // there are anchors, we also expect Bob's anchor sweep.
@ -122,15 +110,11 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
} }
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to get txid: %v", err)
}
_, err = waitForNTxsInMempool( _, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout, net.Miner.Node, expectedTxes, minerMempoolTimeout,
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to find closing txid: %v", err)
}
closeTx := getSpendingTxInMempool( closeTx := getSpendingTxInMempool(
t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{ t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{
Hash: *bobFundingTxid, Hash: *bobFundingTxid,
@ -146,30 +130,25 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
// that we sent earlier. This means Alice should now only have a single // that we sent earlier. This means Alice should now only have a single
// HTLC on her channel. // HTLC on her channel.
nodes = []*lntest.HarnessNode{alice} nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool { err = wait.NoError(func() error {
predErr = assertActiveHtlcs(nodes, payHash) return assertActiveHtlcs(nodes, payHash)
return predErr == nil }, defaultTimeout)
}, time.Second*15) require.NoError(t.t, err)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// With the closing transaction confirmed, we should expect Bob's HTLC // With the closing transaction confirmed, we should expect Bob's HTLC
// timeout transaction to be broadcast due to the expiry being reached. // timeout transaction to be broadcast due to the expiry being reached.
// If there are anchors, we also expect Carol's anchor sweep now. // If there are anchors, we also expect Carol's anchor sweep now.
txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout) txes, err := getNTxsFromMempool(
if err != nil { net.Miner.Node, expectedTxes, minerMempoolTimeout,
t.Fatalf("unable to find bob's htlc timeout tx: %v", err) )
} require.NoError(t.t, err)
// Lookup the timeout transaction that is expected to spend from the // Lookup the timeout transaction that is expected to spend from the
// closing tx. We distinguish it from a possibly anchor sweep by value. // closing tx. We distinguish it from a possibly anchor sweep by value.
var htlcTimeout *chainhash.Hash var htlcTimeout *chainhash.Hash
for _, tx := range txes { for _, tx := range txes {
prevOp := tx.TxIn[0].PreviousOutPoint prevOp := tx.TxIn[0].PreviousOutPoint
if prevOp.Hash != closeTxid { require.Equal(t.t, closeTxid, prevOp.Hash)
t.Fatalf("tx not spending from closing tx")
}
// Assume that the timeout tx doesn't spend an output of exactly // Assume that the timeout tx doesn't spend an output of exactly
// the size of the anchor. // the size of the anchor.
@ -178,9 +157,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
htlcTimeout = &hash htlcTimeout = &hash
} }
} }
if htlcTimeout == nil { require.NotNil(t.t, htlcTimeout)
t.Fatalf("htlc timeout tx not found in mempool")
}
// We'll mine the remaining blocks in order to generate the sweep // We'll mine the remaining blocks in order to generate the sweep
// transaction of Bob's commitment output. // transaction of Bob's commitment output.
@ -188,9 +165,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
// Check that the sweep spends from the mined commitment. // Check that the sweep spends from the mined commitment.
txes, err = getNTxsFromMempool(net.Miner.Node, 1, minerMempoolTimeout) txes, err = getNTxsFromMempool(net.Miner.Node, 1, minerMempoolTimeout)
if err != nil { require.NoError(t.t, err)
t.Fatalf("sweep not found: %v", err)
}
assertAllTxesSpendFrom(t, txes, closeTxid) assertAllTxesSpendFrom(t, txes, closeTxid)
// Bob's pending channel report should show that he has a commitment // Bob's pending channel report should show that he has a commitment
@ -199,21 +174,12 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
pendingChansRequest := &lnrpc.PendingChannelsRequest{} pendingChansRequest := &lnrpc.PendingChannelsRequest{}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest) pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to query for pending channels: %v", err)
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 { require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels))
t.Fatalf("bob should have pending for close chan but doesn't")
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 { require.NotZero(t.t, forceCloseChan.LimboBalance)
t.Fatalf("bob should have nonzero limbo balance instead "+ require.NotZero(t.t, len(forceCloseChan.PendingHtlcs))
"has: %v", forceCloseChan.LimboBalance)
}
if len(forceCloseChan.PendingHtlcs) == 0 {
t.Fatalf("bob should have pending htlc but doesn't")
}
// Now we'll mine an additional block, which should confirm Bob's commit // Now we'll mine an additional block, which should confirm Bob's commit
// sweep. This block should also prompt Bob to broadcast their second // sweep. This block should also prompt Bob to broadcast their second
@ -230,60 +196,33 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
// Therefore, at this point, there should be no active HTLC's on the // Therefore, at this point, there should be no active HTLC's on the
// commitment transaction from Alice -> Bob. // commitment transaction from Alice -> Bob.
nodes = []*lntest.HarnessNode{alice} nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool { err = wait.NoError(func() error {
predErr = assertNumActiveHtlcs(nodes, 0) return assertNumActiveHtlcs(nodes, 0)
return predErr == nil }, defaultTimeout)
}, time.Second*15) require.NoError(t.t, err)
if err != nil {
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
}
// At this point, Bob should show that the pending HTLC has advanced to // At this point, Bob should show that the pending HTLC has advanced to
// the second stage and is to be swept. // the second stage and is to be swept.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest) pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to query for pending channels: %v", err)
}
forceCloseChan = pendingChanResp.PendingForceClosingChannels[0] forceCloseChan = pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.PendingHtlcs[0].Stage != 2 { require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage)
t.Fatalf("bob's htlc should have advanced to the second stage: %v", err)
}
// Next, we'll mine a final block that should confirm the second-layer // Next, we'll mine a final block that should confirm the second-layer
// sweeping transaction. // sweeping transaction.
if _, err := net.Miner.Node.Generate(1); err != nil { _, err = net.Miner.Node.Generate(1)
t.Fatalf("unable to generate blocks: %v", err) require.NoError(t.t, err)
}
// Once this transaction has been confirmed, Bob should detect that he // Once this transaction has been confirmed, Bob should detect that he
// no longer has any pending channels. // no longer has any pending channels.
err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest) require.NoError(t.t, err)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending "+
"channels but shouldn't: %v",
spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// Coop close channel, expect no anchors. // Coop close channel, expect no anchors.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssertType( closeChannelAndAssertType(
ctxt, t, net, alice, aliceChanPoint, false, ctxt, t, net, alice, aliceChanPoint, false, false,
false,
) )
} }

@ -2,11 +2,9 @@ package itest
import ( import (
"context" "context"
"fmt"
"time" "time"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc"
@ -15,6 +13,7 @@ import (
"github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
) )
// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the // testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the
@ -53,9 +52,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel() defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq) carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to add invoice: %v", err)
}
// Now that we've created the invoice, we'll send a single payment from // Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol // Alice to Carol. We won't wait for the response however, as Carol
@ -64,32 +61,21 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
defer cancel() defer cancel()
_, err = alice.RouterClient.SendPaymentV2( _, err = alice.RouterClient.SendPaymentV2(
ctx, ctx, &routerrpc.SendPaymentRequest{
&routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest, PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60, TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat, FeeLimitMsat: noFeeLimitMsat,
}, },
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to send payment: %v", err)
}
// At this point, all 3 nodes should now have an active channel with // At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them. // the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol} nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool { err = wait.NoError(func() error {
predErr = assertActiveHtlcs(nodes, payHash[:]) return assertActiveHtlcs(nodes, payHash[:])
if predErr != nil { }, defaultTimeout)
return false require.NoError(t.t, err)
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// Wait for carol to mark invoice as accepted. There is a small gap to // Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit // bridge between adding the htlc to the channel and executing the exit
@ -97,9 +83,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
waitForInvoiceAccepted(t, carol, payHash) waitForInvoiceAccepted(t, carol, payHash)
restartBob, err := net.SuspendNode(bob) restartBob, err := net.SuspendNode(bob)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to suspend bob: %v", err)
}
// Settle invoice. This will just mark the invoice as settled, as there // Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For // is no link anymore to remove the htlc from the commitment tx. For
@ -111,9 +95,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{ _, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:], Preimage: preimage[:],
}) })
if err != nil { require.NoError(t.t, err)
t.Fatalf("settle invoice: %v", err)
}
// Increase the fee estimate so that the following force close tx will // Increase the fee estimate so that the following force close tx will
// be cpfp'ed. // be cpfp'ed.
@ -125,9 +107,8 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
numBlocks := padCLTV(uint32( numBlocks := padCLTV(uint32(
invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta, invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta,
)) ))
if _, err := net.Miner.Node.Generate(numBlocks); err != nil { _, err = net.Miner.Node.Generate(numBlocks)
t.Fatalf("unable to generate blocks") require.NoError(t.t, err)
}
// At this point, Carol should broadcast her active commitment // At this point, Carol should broadcast her active commitment
// transaction in order to go to the chain and sweep her HTLC. If there // transaction in order to go to the chain and sweep her HTLC. If there
@ -139,14 +120,10 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
_, err = getNTxsFromMempool( _, err = getNTxsFromMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout, net.Miner.Node, expectedTxes, minerMempoolTimeout,
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("expected transaction not found in mempool: %v", err)
}
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to get txid: %v", err)
}
carolFundingPoint := wire.OutPoint{ carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid, Hash: *bobFundingTxid,
@ -164,9 +141,8 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
mineBlocks(t, net, 1, expectedTxes) mineBlocks(t, net, 1, expectedTxes)
// Restart bob again. // Restart bob again.
if err := restartBob(); err != nil { err = restartBob()
t.Fatalf("unable to restart bob: %v", err) require.NoError(t.t, err)
}
// After the force close transaction is mined, Carol should broadcast // After the force close transaction is mined, Carol should broadcast
// her second level HTLC transaction. Bob will broadcast a sweep tx to // her second level HTLC transaction. Bob will broadcast a sweep tx to
@ -178,20 +154,18 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
if c == commitTypeAnchors { if c == commitTypeAnchors {
expectedTxes = 3 expectedTxes = 3
} }
txes, err := getNTxsFromMempool(net.Miner.Node, txes, err := getNTxsFromMempool(
expectedTxes, minerMempoolTimeout) net.Miner.Node, expectedTxes, minerMempoolTimeout,
if err != nil { )
t.Fatalf("transactions not found in mempool: %v", err) require.NoError(t.t, err)
}
// All transactions should be spending from the commitment transaction. // All transactions should be spending from the commitment transaction.
assertAllTxesSpendFrom(t, txes, closingTxid) assertAllTxesSpendFrom(t, txes, closingTxid)
// We'll now mine an additional block which should confirm both the // We'll now mine an additional block which should confirm both the
// second layer transactions. // second layer transactions.
if _, err := net.Miner.Node.Generate(1); err != nil { _, err = net.Miner.Node.Generate(1)
t.Fatalf("unable to generate block: %v", err) require.NoError(t.t, err)
}
time.Sleep(time.Second * 4) time.Sleep(time.Second * 4)
@ -203,98 +177,52 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
pendingChansRequest := &lnrpc.PendingChannelsRequest{} pendingChansRequest := &lnrpc.PendingChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(ctxt, pendingChansRequest) pendingChanResp, err := carol.PendingChannels(ctxt, pendingChansRequest)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to query for pending channels: %v", err)
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 { require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels))
t.Fatalf("carol should have pending for close chan but doesn't")
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 { require.NotZero(t.t, forceCloseChan.LimboBalance)
t.Fatalf("carol should have nonzero limbo balance instead "+
"has: %v", forceCloseChan.LimboBalance)
}
// The pending HTLC carol has should also now be in stage 2. // The pending HTLC carol has should also now be in stage 2.
if len(forceCloseChan.PendingHtlcs) != 1 { require.Len(t.t, forceCloseChan.PendingHtlcs, 1)
t.Fatalf("carol should have pending htlc but doesn't") require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage)
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
t.Fatalf("carol's htlc should have advanced to the second "+
"stage: %v", err)
}
// Once the second-level transaction confirmed, Bob should have // Once the second-level transaction confirmed, Bob should have
// extracted the preimage from the chain, and sent it back to Alice, // extracted the preimage from the chain, and sent it back to Alice,
// clearing the HTLC off-chain. // clearing the HTLC off-chain.
nodes = []*lntest.HarnessNode{alice} nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool { err = wait.NoError(func() error {
predErr = assertNumActiveHtlcs(nodes, 0) return assertNumActiveHtlcs(nodes, 0)
if predErr != nil { }, defaultTimeout)
return false require.NoError(t.t, err)
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// If we mine 4 additional blocks, then both outputs should now be // If we mine 4 additional blocks, then both outputs should now be
// mature. // mature.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { _, err = net.Miner.Node.Generate(defaultCSV)
t.Fatalf("unable to generate blocks: %v", err) require.NoError(t.t, err)
}
// We should have a new transaction in the mempool. // We should have a new transaction in the mempool.
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to find bob's sweeping transaction: %v", err)
}
// Finally, if we mine an additional block to confirm these two sweep // Finally, if we mine an additional block to confirm these two sweep
// transactions, Carol should not show a pending channel in her report // transactions, Carol should not show a pending channel in her report
// afterwards. // afterwards.
if _, err := net.Miner.Node.Generate(1); err != nil { _, err = net.Miner.Node.Generate(1)
t.Fatalf("unable to mine block: %v", err) require.NoError(t.t, err)
} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = wait.Predicate(func() bool { err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) require.NoError(t.t, err)
pendingChanResp, err = carol.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
predErr = fmt.Errorf("unable to query for pending channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("carol still has pending channels: %v",
spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// The invoice should show as settled for Carol, indicating that it was // The invoice should show as settled for Carol, indicating that it was
// swept on-chain. // swept on-chain.
invoicesReq := &lnrpc.ListInvoiceRequest{} invoicesReq := &lnrpc.ListInvoiceRequest{}
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq) invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to retrieve invoices: %v", err) require.Len(t.t, invoicesResp.Invoices, 1)
}
if len(invoicesResp.Invoices) != 1 {
t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices))
}
invoice := invoicesResp.Invoices[0] invoice := invoicesResp.Invoices[0]
if invoice.State != lnrpc.Invoice_SETTLED { require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State)
t.Fatalf("expected invoice to be settled on chain") require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat)
}
if invoice.AmtPaidSat != invoiceAmt {
t.Fatalf("expected invoice to be settled with %d sat, got "+
"%d sat", invoiceAmt, invoice.AmtPaidSat)
}
// Finally, check that the Alice's payment is correctly marked // Finally, check that the Alice's payment is correctly marked
// succeeded. // succeeded.
@ -302,9 +230,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
err = checkPaymentStatus( err = checkPaymentStatus(
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf(err.Error())
}
// We'll close out the channel between Alice and Bob, then shutdown // We'll close out the channel between Alice and Bob, then shutdown
// carol to conclude the test. // carol to conclude the test.

@ -2,11 +2,8 @@ package itest
import ( import (
"context" "context"
"fmt"
"time"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc"
@ -15,6 +12,7 @@ import (
"github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
) )
// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario, // testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario,
@ -51,9 +49,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel() defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq) carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to add invoice: %v", err)
}
// Now that we've created the invoice, we'll send a single payment from // Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol // Alice to Carol. We won't wait for the response however, as Carol
@ -62,32 +58,21 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
defer cancel() defer cancel()
_, err = alice.RouterClient.SendPaymentV2( _, err = alice.RouterClient.SendPaymentV2(
ctx, ctx, &routerrpc.SendPaymentRequest{
&routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest, PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60, TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat, FeeLimitMsat: noFeeLimitMsat,
}, },
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to send payment: %v", err)
}
// At this point, all 3 nodes should now have an active channel with // At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them. // the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol} nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool { err = wait.NoError(func() error {
predErr = assertActiveHtlcs(nodes, payHash[:]) return assertActiveHtlcs(nodes, payHash[:])
if predErr != nil { }, defaultTimeout)
return false require.NoError(t.t, err)
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// Wait for carol to mark invoice as accepted. There is a small gap to // Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit // bridge between adding the htlc to the channel and executing the exit
@ -102,22 +87,20 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// immediately force close the channel by broadcast her commitment // immediately force close the channel by broadcast her commitment
// transaction. // transaction.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
aliceForceClose := closeChannelAndAssertType(ctxt, t, net, alice, aliceForceClose := closeChannelAndAssertType(
aliceChanPoint, c == commitTypeAnchors, true) ctxt, t, net, alice, aliceChanPoint, c == commitTypeAnchors,
true,
)
// Wait for the channel to be marked pending force close. // Wait for the channel to be marked pending force close.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForChannelPendingForceClose(ctxt, alice, aliceChanPoint) err = waitForChannelPendingForceClose(ctxt, alice, aliceChanPoint)
if err != nil { require.NoError(t.t, err)
t.Fatalf("channel not pending force close: %v", err)
}
// Mine enough blocks for Alice to sweep her funds from the force // Mine enough blocks for Alice to sweep her funds from the force
// closed channel. // closed channel.
_, err = net.Miner.Node.Generate(defaultCSV) _, err = net.Miner.Node.Generate(defaultCSV)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to generate blocks: %v", err)
}
// Alice should now sweep her funds. If there are anchors, Alice should // Alice should now sweep her funds. If there are anchors, Alice should
// also sweep hers. // also sweep hers.
@ -125,16 +108,14 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
if c == commitTypeAnchors { if c == commitTypeAnchors {
expectedTxes = 2 expectedTxes = 2
} }
_, err = waitForNTxsInMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout) _, err = waitForNTxsInMempool(
if err != nil { net.Miner.Node, expectedTxes, minerMempoolTimeout,
t.Fatalf("unable to find sweeping tx in mempool: %v", err) )
} require.NoError(t.t, err)
// Suspend bob, so Carol is forced to go on chain. // Suspend bob, so Carol is forced to go on chain.
restartBob, err := net.SuspendNode(bob) restartBob, err := net.SuspendNode(bob)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to suspend bob: %v", err)
}
// Settle invoice. This will just mark the invoice as settled, as there // Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For // is no link anymore to remove the htlc from the commitment tx. For
@ -146,31 +127,25 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{ _, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:], Preimage: preimage[:],
}) })
if err != nil { require.NoError(t.t, err)
t.Fatalf("settle invoice: %v", err)
}
// We'll now mine enough blocks so Carol decides that she needs to go // We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive. // on-chain to claim the HTLC as Bob has been inactive.
numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry- numBlocks := padCLTV(uint32(
lncfg.DefaultIncomingBroadcastDelta) - defaultCSV) invoiceReq.CltvExpiry-lncfg.DefaultIncomingBroadcastDelta,
) - defaultCSV)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil { _, err = net.Miner.Node.Generate(numBlocks)
t.Fatalf("unable to generate blocks") require.NoError(t.t, err)
}
// Carol's commitment transaction should now be in the mempool. If there // Carol's commitment transaction should now be in the mempool. If there
// are anchors, Carol also sweeps her anchor. // are anchors, Carol also sweeps her anchor.
_, err = waitForNTxsInMempool( _, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout, net.Miner.Node, expectedTxes, minerMempoolTimeout,
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to find carol's txes: %v", err)
}
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint) bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to get txid: %v", err)
}
carolFundingPoint := wire.OutPoint{ carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid, Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex, Index: bobChanPoint.OutputIndex,
@ -186,16 +161,12 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// Mine a block, which should contain: the commitment, possibly an // Mine a block, which should contain: the commitment, possibly an
// anchor sweep and the coinbase tx. // anchor sweep and the coinbase tx.
block := mineBlocks(t, net, 1, expectedTxes)[0] block := mineBlocks(t, net, 1, expectedTxes)[0]
if len(block.Transactions) != expectedTxes+1 { require.Len(t.t, block.Transactions, expectedTxes+1)
t.Fatalf("expected %v transactions in block, got %v",
expectedTxes, len(block.Transactions))
}
assertTxInBlock(t, block, &closingTxid) assertTxInBlock(t, block, &closingTxid)
// Restart bob again. // Restart bob again.
if err := restartBob(); err != nil { err = restartBob()
t.Fatalf("unable to restart bob: %v", err) require.NoError(t.t, err)
}
// After the force close transacion is mined, Carol should broadcast her // After the force close transacion is mined, Carol should broadcast her
// second level HTLC transacion. Bob will broadcast a sweep tx to sweep // second level HTLC transacion. Bob will broadcast a sweep tx to sweep
@ -206,21 +177,17 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
if c == commitTypeAnchors { if c == commitTypeAnchors {
expectedTxes = 3 expectedTxes = 3
} }
txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes, txes, err := getNTxsFromMempool(
minerMempoolTimeout) net.Miner.Node, expectedTxes, minerMempoolTimeout,
if err != nil { )
t.Fatalf("transactions not found in mempool: %v", err) require.NoError(t.t, err)
}
// All transactions should be pending from the commitment transaction. // All transactions should be pending from the commitment transaction.
assertAllTxesSpendFrom(t, txes, closingTxid) assertAllTxesSpendFrom(t, txes, closingTxid)
// Mine a block to confirm the two transactions (+ coinbase). // Mine a block to confirm the two transactions (+ coinbase).
block = mineBlocks(t, net, 1, expectedTxes)[0] block = mineBlocks(t, net, 1, expectedTxes)[0]
if len(block.Transactions) != expectedTxes+1 { require.Len(t.t, block.Transactions, expectedTxes+1)
t.Fatalf("expected 3 transactions in block, got %v",
len(block.Transactions))
}
// Keep track of the second level tx maturity. // Keep track of the second level tx maturity.
carolSecondLevelCSV := uint32(defaultCSV) carolSecondLevelCSV := uint32(defaultCSV)
@ -228,114 +195,60 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// When Bob notices Carol's second level transaction in the block, he // When Bob notices Carol's second level transaction in the block, he
// will extract the preimage and broadcast a sweep tx to directly claim // will extract the preimage and broadcast a sweep tx to directly claim
// the HTLC in his (already closed) channel with Alice. // the HTLC in his (already closed) channel with Alice.
bobHtlcSweep, err := waitForTxInMempool(net.Miner.Node, bobHtlcSweep, err := waitForTxInMempool(
minerMempoolTimeout) net.Miner.Node, minerMempoolTimeout,
if err != nil { )
t.Fatalf("transactions not found in mempool: %v", err) require.NoError(t.t, err)
}
// It should spend from the commitment in the channel with Alice. // It should spend from the commitment in the channel with Alice.
tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep) tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to get txn: %v", err) require.Equal(
} t.t, *aliceForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *aliceForceClose { )
t.Fatalf("tx did not spend from alice's force close tx")
}
// We'll now mine a block which should confirm Bob's HTLC sweep // We'll now mine a block which should confirm Bob's HTLC sweep
// transaction. // transaction.
block = mineBlocks(t, net, 1, 1)[0] block = mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 { require.Len(t.t, block.Transactions, 2)
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
assertTxInBlock(t, block, bobHtlcSweep) assertTxInBlock(t, block, bobHtlcSweep)
carolSecondLevelCSV-- carolSecondLevelCSV--
// Now that the sweeping transaction has been confirmed, Bob should now // Now that the sweeping transaction has been confirmed, Bob should now
// recognize that all contracts have been fully resolved, and show no // recognize that all contracts have been fully resolved, and show no
// pending close channels. // pending close channels.
pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = wait.Predicate(func() bool { err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) require.NoError(t.t, err)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// If we then mine 3 additional blocks, Carol's second level tx will // If we then mine 3 additional blocks, Carol's second level tx will
// mature, and she should pull the funds. // mature, and she should pull the funds.
if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil { _, err = net.Miner.Node.Generate(carolSecondLevelCSV)
t.Fatalf("unable to generate block: %v", err) require.NoError(t.t, err)
}
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) carolSweep, err := waitForTxInMempool(
if err != nil { net.Miner.Node, minerMempoolTimeout,
t.Fatalf("unable to find Carol's sweeping transaction: %v", err) )
} require.NoError(t.t, err)
// When Carol's sweep gets confirmed, she should have no more pending // When Carol's sweep gets confirmed, she should have no more pending
// channels. // channels.
block = mineBlocks(t, net, 1, 1)[0] block = mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, carolSweep) assertTxInBlock(t, block, carolSweep)
pendingChansRequest = &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = wait.Predicate(func() bool { err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) require.NoError(t.t, err)
pendingChanResp, err := carol.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("carol still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// The invoice should show as settled for Carol, indicating that it was // The invoice should show as settled for Carol, indicating that it was
// swept on-chain. // swept on-chain.
invoicesReq := &lnrpc.ListInvoiceRequest{} invoicesReq := &lnrpc.ListInvoiceRequest{}
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq) invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to retrieve invoices: %v", err) require.Len(t.t, invoicesResp.Invoices, 1)
}
if len(invoicesResp.Invoices) != 1 {
t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices))
}
invoice := invoicesResp.Invoices[0] invoice := invoicesResp.Invoices[0]
if invoice.State != lnrpc.Invoice_SETTLED { require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State)
t.Fatalf("expected invoice to be settled on chain") require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat)
}
if invoice.AmtPaidSat != invoiceAmt {
t.Fatalf("expected invoice to be settled with %d sat, got "+
"%d sat", invoiceAmt, invoice.AmtPaidSat)
}
// Finally, check that the Alice's payment is correctly marked // Finally, check that the Alice's payment is correctly marked
// succeeded. // succeeded.
@ -343,7 +256,5 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
err = checkPaymentStatus( err = checkPaymentStatus(
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED, ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf(err.Error())
}
} }

@ -3,14 +3,12 @@ package itest
import ( import (
"context" "context"
"fmt" "fmt"
"time"
"github.com/btcsuite/btcutil" "github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
) )
// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC // testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC
@ -47,8 +45,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
carolPubKey := carol.PubKey[:] carolPubKey := carol.PubKey[:]
payHash := makeFakePayHash(t) payHash := makeFakePayHash(t)
_, err := alice.RouterClient.SendPaymentV2( _, err := alice.RouterClient.SendPaymentV2(
ctx, ctx, &routerrpc.SendPaymentRequest{
&routerrpc.SendPaymentRequest{
Dest: carolPubKey, Dest: carolPubKey,
Amt: int64(htlcAmt), Amt: int64(htlcAmt),
PaymentHash: payHash, PaymentHash: payHash,
@ -57,21 +54,15 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
FeeLimitMsat: noFeeLimitMsat, FeeLimitMsat: noFeeLimitMsat,
}, },
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to send alice htlc: %v", err)
}
// Once the HTLC has cleared, all channels in our mini network should // Once the HTLC has cleared, all channels in our mini network should
// have the it locked in. // have the it locked in.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol} nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool { err = wait.NoError(func() error {
predErr = assertActiveHtlcs(nodes, payHash) return assertActiveHtlcs(nodes, payHash)
return predErr == nil }, defaultTimeout)
}, time.Second*15) require.NoError(t.t, err)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
// Increase the fee estimate so that the following force close tx will // Increase the fee estimate so that the following force close tx will
// be cpfp'ed. // be cpfp'ed.
@ -87,105 +78,64 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// At this point, Bob should have a pending force close channel as he // At this point, Bob should have a pending force close channel as he
// just went to chain. // just went to chain.
pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = wait.Predicate(func() bool { err = waitForNumChannelPendingForceClose(
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
pendingChanResp, err := bob.PendingChannels(ctxt, if c.LimboBalance == 0 {
pendingChansRequest) return fmt.Errorf("bob should have nonzero "+
if err != nil { "limbo balance instead has: %v",
predErr = fmt.Errorf("unable to query for pending "+ c.LimboBalance)
"channels: %v", err) }
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] return nil
if forceCloseChan.LimboBalance == 0 { },
predErr = fmt.Errorf("bob should have nonzero limbo "+ )
"balance instead has: %v", require.NoError(t.t, err)
forceCloseChan.LimboBalance)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// We'll mine defaultCSV blocks in order to generate the sweep // We'll mine defaultCSV blocks in order to generate the sweep
// transaction of Bob's funding output. If there are anchors, mine // transaction of Bob's funding output. If there are anchors, mine
// Carol's anchor sweep too. // Carol's anchor sweep too.
if c == commitTypeAnchors { if c == commitTypeAnchors {
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to find carol's anchor sweep tx: %v", err)
}
} }
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { _, err = net.Miner.Node.Generate(defaultCSV)
t.Fatalf("unable to generate blocks: %v", err) require.NoError(t.t, err)
}
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to find bob's funding output sweep tx: %v", err)
}
// We'll now mine enough blocks for the HTLC to expire. After this, Bob // We'll now mine enough blocks for the HTLC to expire. After this, Bob
// should hand off the now expired HTLC output to the utxo nursery. // should hand off the now expired HTLC output to the utxo nursery.
numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV - 1)) numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV - 1))
if _, err := net.Miner.Node.Generate(numBlocks); err != nil { _, err = net.Miner.Node.Generate(numBlocks)
t.Fatalf("unable to generate blocks: %v", err) require.NoError(t.t, err)
}
// Bob's pending channel report should show that he has a single HTLC // Bob's pending channel report should show that he has a single HTLC
// that's now in stage one. // that's now in stage one.
err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) err = waitForNumChannelPendingForceClose(
pendingChanResp, err := bob.PendingChannels( ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
ctxt, pendingChansRequest, if len(c.PendingHtlcs) != 1 {
) return fmt.Errorf("bob should have pending " +
if err != nil { "htlc but doesn't")
predErr = fmt.Errorf("unable to query for pending "+ }
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 { if c.PendingHtlcs[0].Stage != 1 {
predErr = fmt.Errorf("bob should have pending force " + return fmt.Errorf("bob's htlc should have "+
"close chan but doesn't") "advanced to the first stage: %v", err)
return false }
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] return nil
if len(forceCloseChan.PendingHtlcs) != 1 { },
predErr = fmt.Errorf("bob should have pending htlc " + )
"but doesn't") require.NoError(t.t, err)
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 1 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the first stage: %v", err)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// We should also now find a transaction in the mempool, as Bob should // We should also now find a transaction in the mempool, as Bob should
// have broadcast his second layer timeout transaction. // have broadcast his second layer timeout transaction.
timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to find bob's htlc timeout tx: %v", err)
}
// Next, we'll mine an additional block. This should serve to confirm // Next, we'll mine an additional block. This should serve to confirm
// the second layer timeout transaction. // the second layer timeout transaction.
@ -195,62 +145,39 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// With the second layer timeout transaction confirmed, Bob should have // With the second layer timeout transaction confirmed, Bob should have
// canceled backwards the HTLC that carol sent. // canceled backwards the HTLC that carol sent.
nodes = []*lntest.HarnessNode{alice} nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool { err = wait.NoError(func() error {
predErr = assertNumActiveHtlcs(nodes, 0) return assertNumActiveHtlcs(nodes, 0)
return predErr == nil }, defaultTimeout)
}, time.Second*15) require.NoError(t.t, err)
if err != nil {
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
}
// Additionally, Bob should now show that HTLC as being advanced to the // Additionally, Bob should now show that HTLC as being advanced to the
// second stage. // second stage.
err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) err = waitForNumChannelPendingForceClose(
pendingChanResp, err := bob.PendingChannels( ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
ctxt, pendingChansRequest, if len(c.PendingHtlcs) != 1 {
) return fmt.Errorf("bob should have pending " +
if err != nil { "htlc but doesn't")
predErr = fmt.Errorf("unable to query for pending "+ }
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 { if c.PendingHtlcs[0].Stage != 2 {
predErr = fmt.Errorf("bob should have pending for " + return fmt.Errorf("bob's htlc should have "+
"close chan but doesn't") "advanced to the second stage: %v", err)
return false }
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] return nil
if len(forceCloseChan.PendingHtlcs) != 1 { },
predErr = fmt.Errorf("bob should have pending htlc " + )
"but doesn't") require.NoError(t.t, err)
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// We'll now mine 4 additional blocks. This should be enough for Bob's // We'll now mine 4 additional blocks. This should be enough for Bob's
// CSV timelock to expire and the sweeping transaction of the HTLC to be // CSV timelock to expire and the sweeping transaction of the HTLC to be
// broadcast. // broadcast.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { _, err = net.Miner.Node.Generate(defaultCSV)
t.Fatalf("unable to mine blocks: %v", err) require.NoError(t.t, err)
}
sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to find bob's htlc sweep tx: %v", err)
}
// We'll then mine a final block which should confirm this second layer // We'll then mine a final block which should confirm this second layer
// sweep transaction. // sweep transaction.
@ -259,27 +186,9 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// At this point, Bob should no longer show any channels as pending // At this point, Bob should no longer show any channels as pending
// close. // close.
err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
pendingChanResp, err := bob.PendingChannels( require.NoError(t.t, err)
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// Coop close, no anchors. // Coop close, no anchors.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)

@ -3,14 +3,12 @@ package itest
import ( import (
"context" "context"
"fmt" "fmt"
"time"
"github.com/btcsuite/btcutil" "github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest" "github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
) )
// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a // testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a
@ -48,8 +46,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
carolPubKey := carol.PubKey[:] carolPubKey := carol.PubKey[:]
payHash := makeFakePayHash(t) payHash := makeFakePayHash(t)
_, err := alice.RouterClient.SendPaymentV2( _, err := alice.RouterClient.SendPaymentV2(
ctx, ctx, &routerrpc.SendPaymentRequest{
&routerrpc.SendPaymentRequest{
Dest: carolPubKey, Dest: carolPubKey,
Amt: int64(htlcAmt), Amt: int64(htlcAmt),
PaymentHash: payHash, PaymentHash: payHash,
@ -58,21 +55,15 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
FeeLimitMsat: noFeeLimitMsat, FeeLimitMsat: noFeeLimitMsat,
}, },
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to send alice htlc: %v", err)
}
// Once the HTLC has cleared, all the nodes in our mini network should // Once the HTLC has cleared, all the nodes in our mini network should
// show that the HTLC has been locked in. // show that the HTLC has been locked in.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol} nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool { err = wait.NoError(func() error {
predErr = assertActiveHtlcs(nodes, payHash) return assertActiveHtlcs(nodes, payHash)
return predErr == nil }, defaultTimeout)
}, time.Second*15) require.NoError(t.t, err)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// Increase the fee estimate so that the following force close tx will // Increase the fee estimate so that the following force close tx will
// be cpfp'ed. // be cpfp'ed.
@ -90,28 +81,9 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// At this point, Bob should have a pending force close channel as // At this point, Bob should have a pending force close channel as
// Carol has gone directly to chain. // Carol has gone directly to chain.
pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = wait.Predicate(func() bool { err = waitForNumChannelPendingForceClose(ctxt, bob, 1, nil)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) require.NoError(t.t, err)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for "+
"pending channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending " +
"force close channels but doesn't")
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// Bob can sweep his output immediately. If there is an anchor, Bob will // Bob can sweep his output immediately. If there is an anchor, Bob will
// sweep that as well. // sweep that as well.
@ -123,55 +95,39 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
_, err = waitForNTxsInMempool( _, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout, net.Miner.Node, expectedTxes, minerMempoolTimeout,
) )
if err != nil { require.NoError(t.t, err)
t.Fatalf("failed to find txes in miner mempool: %v", err)
}
// Next, we'll mine enough blocks for the HTLC to expire. At this // Next, we'll mine enough blocks for the HTLC to expire. At this
// point, Bob should hand off the output to his internal utxo nursery, // point, Bob should hand off the output to his internal utxo nursery,
// which will broadcast a sweep transaction. // which will broadcast a sweep transaction.
numBlocks := padCLTV(finalCltvDelta - 1) numBlocks := padCLTV(finalCltvDelta - 1)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil { _, err = net.Miner.Node.Generate(numBlocks)
t.Fatalf("unable to generate blocks: %v", err) require.NoError(t.t, err)
}
// If we check Bob's pending channel report, it should show that he has // If we check Bob's pending channel report, it should show that he has
// a single HTLC that's now in the second stage, as skip the initial // a single HTLC that's now in the second stage, as skip the initial
// first stage since this is a direct HTLC. // first stage since this is a direct HTLC.
err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) err = waitForNumChannelPendingForceClose(
pendingChanResp, err := bob.PendingChannels( ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
ctxt, pendingChansRequest, if len(c.PendingHtlcs) != 1 {
) return fmt.Errorf("bob should have pending " +
if err != nil { "htlc but doesn't")
predErr = fmt.Errorf("unable to query for pending "+ }
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 { if c.PendingHtlcs[0].Stage != 2 {
predErr = fmt.Errorf("bob should have pending for " + return fmt.Errorf("bob's htlc should have "+
"close chan but doesn't") "advanced to the second stage: %v", err)
return false }
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] return nil
if len(forceCloseChan.PendingHtlcs) != 1 { },
predErr = fmt.Errorf("bob should have pending htlc " + )
"but doesn't") require.NoError(t.t, err)
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
return false
}
return true // We need to generate an additional block to trigger the sweep.
}, time.Second*15) _, err = net.Miner.Node.Generate(1)
if err != nil { require.NoError(t.t, err)
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// Bob's sweeping transaction should now be found in the mempool at // Bob's sweeping transaction should now be found in the mempool at
// this point. // this point.
@ -185,14 +141,10 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// we'll fail. // we'll fail.
// TODO(halseth): can we use waitForChannelPendingForceClose to // TODO(halseth): can we use waitForChannelPendingForceClose to
// avoid this hack? // avoid this hack?
if _, err := net.Miner.Node.Generate(1); err != nil { _, err = net.Miner.Node.Generate(1)
t.Fatalf("unable to generate block: %v", err) require.NoError(t.t, err)
}
sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil { require.NoError(t.t, err)
t.Fatalf("unable to find bob's sweeping transaction: "+
"%v", err)
}
} }
// If we mine an additional block, then this should confirm Bob's // If we mine an additional block, then this should confirm Bob's
@ -204,45 +156,23 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// cancel back that HTLC. As a result, Alice should not know of any // cancel back that HTLC. As a result, Alice should not know of any
// active HTLC's. // active HTLC's.
nodes = []*lntest.HarnessNode{alice} nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool { err = wait.NoError(func() error {
predErr = assertNumActiveHtlcs(nodes, 0) return assertNumActiveHtlcs(nodes, 0)
return predErr == nil }, defaultTimeout)
}, time.Second*15) require.NoError(t.t, err)
if err != nil {
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
}
// Now we'll check Bob's pending channel report. Since this was Carol's // Now we'll check Bob's pending channel report. Since this was Carol's
// commitment, he doesn't have to wait for any CSV delays. As a result, // commitment, he doesn't have to wait for any CSV delays. As a result,
// he should show no additional pending transactions. // he should show no additional pending transactions.
err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
pendingChanResp, err := bob.PendingChannels( require.NoError(t.t, err)
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// We'll close out the test by closing the channel from Alice to Bob, // We'll close out the test by closing the channel from Alice to Bob,
// and then shutting down the new node we created as its no longer // and then shutting down the new node we created as its no longer
// needed. Coop close, no anchors. // needed. Coop close, no anchors.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssertType( closeChannelAndAssertType(
ctxt, t, net, alice, aliceChanPoint, false, ctxt, t, net, alice, aliceChanPoint, false, false,
false,
) )
} }

@ -330,37 +330,68 @@ func waitForChannelPendingForceClose(ctx context.Context,
Index: fundingChanPoint.OutputIndex, Index: fundingChanPoint.OutputIndex,
} }
var predErr error return wait.NoError(func() error {
err = wait.Predicate(func() bool {
pendingChansRequest := &lnrpc.PendingChannelsRequest{} pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := node.PendingChannels( pendingChanResp, err := node.PendingChannels(
ctx, pendingChansRequest, ctx, pendingChansRequest,
) )
if err != nil { if err != nil {
predErr = fmt.Errorf("unable to get pending "+ return fmt.Errorf("unable to get pending channels: %v",
"channels: %v", err) err)
return false
} }
forceClose, err := findForceClosedChannel(pendingChanResp, &op) forceClose, err := findForceClosedChannel(pendingChanResp, &op)
if err != nil { if err != nil {
predErr = err return err
return false
} }
// We must wait until the UTXO nursery has received the channel // We must wait until the UTXO nursery has received the channel
// and is aware of its maturity height. // and is aware of its maturity height.
if forceClose.MaturityHeight == 0 { if forceClose.MaturityHeight == 0 {
predErr = fmt.Errorf("channel had maturity height of 0") return fmt.Errorf("channel had maturity height of 0")
return false
} }
return true
}, time.Second*15)
if err != nil {
return predErr
}
return nil return nil
}, defaultTimeout)
}
// lnrpcForceCloseChannel is a short type alias for a ridiculously long type
// name in the lnrpc package.
type lnrpcForceCloseChannel = lnrpc.PendingChannelsResponse_ForceClosedChannel
// waitForNumChannelPendingForceClose waits for the node to report a certain
// number of channels in state pending force close.
func waitForNumChannelPendingForceClose(ctx context.Context,
node *lntest.HarnessNode, expectedNum int,
perChanCheck func(channel *lnrpcForceCloseChannel) error) error {
return wait.NoError(func() error {
resp, err := node.PendingChannels(
ctx, &lnrpc.PendingChannelsRequest{},
)
if err != nil {
return fmt.Errorf("unable to get pending channels: %v",
err)
}
forceCloseChans := resp.PendingForceClosingChannels
if len(forceCloseChans) != expectedNum {
return fmt.Errorf("bob should have %d pending "+
"force close channels but has %d", expectedNum,
len(forceCloseChans))
}
if perChanCheck != nil {
for _, forceCloseChan := range forceCloseChans {
err := perChanCheck(forceCloseChan)
if err != nil {
return err
}
}
}
return nil
}, defaultTimeout)
} }
// cleanupForceClose mines a force close commitment found in the mempool and // cleanupForceClose mines a force close commitment found in the mempool and