itest: cleanup multi-hop tests

As a preparation to fix an issue with the mempool wait, we clean up the
multi-hop itests a bit. We fix the formatting, use the require library
for assertions consistently and simplify some of the wait predicates.
Commonly used code is also extracted into functions.
This commit is contained in:
Oliver Gugger 2020-10-26 14:21:09 +01:00
parent d1b46211d8
commit 34439fbc2a
No known key found for this signature in database
GPG Key ID: 8E4256593F177720
7 changed files with 367 additions and 854 deletions

@ -3,10 +3,8 @@ package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
@ -15,6 +13,7 @@ import (
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if
@ -51,9 +50,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
require.NoError(t.t, err)
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
@ -62,32 +59,21 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
defer cancel()
_, err = alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
require.NoError(t.t, err)
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash[:])
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash[:])
}, defaultTimeout)
require.NoError(t.t, err)
// Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit
@ -101,8 +87,9 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// At this point, Bob decides that he wants to exit the channel
// immediately, so he force closes his commitment transaction.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
bobForceClose := closeChannelAndAssertType(ctxt, t, net, bob,
aliceChanPoint, c == commitTypeAnchors, true)
bobForceClose := closeChannelAndAssertType(
ctxt, t, net, bob, aliceChanPoint, c == commitTypeAnchors, true,
)
// Alice will sweep her commitment output immediately. If there are
// anchors, Alice will also sweep hers.
@ -113,16 +100,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
_, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("unable to find alice's sweep tx in miner mempool: %v",
err)
}
require.NoError(t.t, err)
// Suspend Bob to force Carol to go to chain.
restartBob, err := net.SuspendNode(bob)
if err != nil {
t.Fatalf("unable to suspend bob: %v", err)
}
require.NoError(t.t, err)
// Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For
@ -134,31 +116,24 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:],
})
if err != nil {
t.Fatalf("settle invoice: %v", err)
}
require.NoError(t.t, err)
// We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive.
numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry -
lncfg.DefaultIncomingBroadcastDelta))
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
_, err = net.Miner.Node.Generate(numBlocks)
require.NoError(t.t, err)
// Carol's commitment transaction should now be in the mempool. If there
// is an anchor, Carol will sweep that too.
_, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
require.NoError(t.t, err)
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
require.NoError(t.t, err)
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
@ -174,16 +149,12 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// Mine a block that should confirm the commit tx, the anchor if present
// and the coinbase.
block := mineBlocks(t, net, 1, expectedTxes)[0]
if len(block.Transactions) != expectedTxes+1 {
t.Fatalf("expected %v transactions in block, got %v",
expectedTxes+1, len(block.Transactions))
}
require.Len(t.t, block.Transactions, expectedTxes+1)
assertTxInBlock(t, block, &closingTxid)
// Restart bob again.
if err := restartBob(); err != nil {
t.Fatalf("unable to restart bob: %v", err)
}
err = restartBob()
require.NoError(t.t, err)
// After the force close transacion is mined, Carol should broadcast her
// second level HTLC transacion. Bob will broadcast a sweep tx to sweep
@ -198,9 +169,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
txes, err := getNTxsFromMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
require.NoError(t.t, err)
// Both Carol's second level transaction and Bob's sweep should be
// spending from the commitment transaction.
@ -209,16 +178,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// At this point we suspend Alice to make sure she'll handle the
// on-chain settle after a restart.
restartAlice, err := net.SuspendNode(alice)
if err != nil {
t.Fatalf("unable to suspend alice: %v", err)
}
require.NoError(t.t, err)
// Mine a block to confirm the two transactions (+ the coinbase).
block = mineBlocks(t, net, 1, expectedTxes)[0]
if len(block.Transactions) != expectedTxes+1 {
t.Fatalf("expected 3 transactions in block, got %v",
len(block.Transactions))
}
require.Len(t.t, block.Transactions, expectedTxes+1)
// Keep track of the second level tx maturity.
carolSecondLevelCSV := uint32(defaultCSV)
@ -226,73 +190,48 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// When Bob notices Carol's second level transaction in the block, he
// will extract the preimage and broadcast a second level tx to claim
// the HTLC in his (already closed) channel with Alice.
bobSecondLvlTx, err := waitForTxInMempool(net.Miner.Node,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobSecondLvlTx, err := waitForTxInMempool(
net.Miner.Node, minerMempoolTimeout,
)
require.NoError(t.t, err)
// It should spend from the commitment in the channel with Alice.
tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
require.NoError(t.t, err)
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobForceClose {
t.Fatalf("tx did not spend from bob's force close tx")
}
require.Equal(
t.t, *bobForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
)
// At this point, Bob should have broadcast his second layer success
// transaction, and should have sent it to the nursery for incubation.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
for _, forceCloseChan := range pendingChanResp.PendingForceClosingChannels {
if forceCloseChan.Channel.LocalBalance != 0 {
continue
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
if c.Channel.LocalBalance != 0 {
return nil
}
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
if len(c.PendingHtlcs) != 1 {
return fmt.Errorf("bob should have pending " +
"htlc but doesn't")
}
stage := forceCloseChan.PendingHtlcs[0].Stage
if stage != 1 {
predErr = fmt.Errorf("bob's htlc should have "+
if c.PendingHtlcs[0].Stage != 1 {
return fmt.Errorf("bob's htlc should have "+
"advanced to the first stage but was "+
"stage: %v", stage)
return false
"stage: %v", c.PendingHtlcs[0].Stage)
}
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
return nil
},
)
require.NoError(t.t, err)
// We'll now mine a block which should confirm Bob's second layer
// transaction.
block = mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 {
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
require.Len(t.t, block.Transactions, 2)
assertTxInBlock(t, block, bobSecondLvlTx)
// Keep track of Bob's second level maturity, and decrement our track
@ -302,21 +241,17 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// Now that the preimage from Bob has hit the chain, restart Alice to
// ensure she'll pick it up.
if err := restartAlice(); err != nil {
t.Fatalf("unable to restart alice: %v", err)
}
err = restartAlice()
require.NoError(t.t, err)
// If we then mine 3 additional blocks, Carol's second level tx should
// mature, and she can pull the funds from it with a sweep tx.
if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
_, err = net.Miner.Node.Generate(carolSecondLevelCSV)
require.NoError(t.t, err)
bobSecondLevelCSV -= carolSecondLevelCSV
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find Carol's sweeping transaction: %v", err)
}
require.NoError(t.t, err)
// Mining one additional block, Bob's second level tx is mature, and he
// can sweep the output.
@ -324,18 +259,14 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
assertTxInBlock(t, block, carolSweep)
bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction")
}
require.NoError(t.t, err)
// Make sure it spends from the second level tx.
tx, err = net.Miner.Node.GetRawTransaction(bobSweep)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobSecondLvlTx {
t.Fatalf("tx did not spend from bob's second level tx")
}
require.NoError(t.t, err)
require.Equal(
t.t, *bobSecondLvlTx, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
)
// When we mine one additional block, that will confirm Bob's sweep.
// Now Bob should have no pending channels anymore, as this just
@ -343,77 +274,15 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
block = mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, bobSweep)
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
req := &lnrpc.ListChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanInfo, err := bob.ListChannels(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query for open "+
"channels: %v", err)
return false
}
if len(chanInfo.Channels) != 0 {
predErr = fmt.Errorf("Bob should have no open "+
"channels, instead he has %v",
len(chanInfo.Channels))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
require.NoError(t.t, err)
assertNodeNumChannels(t, bob, 0)
// Also Carol should have no channels left (open nor pending).
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob carol has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
req := &lnrpc.ListChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanInfo, err := carol.ListChannels(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query for open "+
"channels: %v", err)
return false
}
if len(chanInfo.Channels) != 0 {
predErr = fmt.Errorf("carol should have no open "+
"channels, instead she has %v",
len(chanInfo.Channels))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
require.NoError(t.t, err)
assertNodeNumChannels(t, carol, 0)
// Finally, check that the Alice's payment is correctly marked
// succeeded.
@ -421,7 +290,5 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
err = checkPaymentStatus(
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
)
if err != nil {
t.Fatalf(err.Error())
}
require.NoError(t.t, err)
}

@ -2,19 +2,18 @@ package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the
@ -58,8 +57,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
payHash := makeFakePayHash(t)
_, err := alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
Dest: carolPubKey,
Amt: int64(dustHtlcAmt),
PaymentHash: dustPayHash,
@ -68,13 +66,10 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
require.NoError(t.t, err)
_, err = alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
Dest: carolPubKey,
Amt: int64(htlcAmt),
PaymentHash: payHash,
@ -83,21 +78,15 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
require.NoError(t.t, err)
// Verify that all nodes in the path now have two HTLC's with the
// proper parameters.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, dustPayHash, payHash)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, dustPayHash, payHash)
}, defaultTimeout)
require.NoError(t.t, err)
// Increase the fee estimate so that the following force close tx will
// be cpfp'ed.
@ -110,9 +99,8 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
numBlocks := padCLTV(
uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = net.Miner.Node.Generate(numBlocks)
require.NoError(t.t, err)
// Bob's force close transaction should now be found in the mempool. If
// there are anchors, we also expect Bob's anchor sweep.
@ -122,15 +110,11 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
}
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
require.NoError(t.t, err)
_, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("unable to find closing txid: %v", err)
}
require.NoError(t.t, err)
closeTx := getSpendingTxInMempool(
t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{
Hash: *bobFundingTxid,
@ -146,30 +130,25 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
// that we sent earlier. This means Alice should now only have a single
// HTLC on her channel.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash)
}, defaultTimeout)
require.NoError(t.t, err)
// With the closing transaction confirmed, we should expect Bob's HTLC
// timeout transaction to be broadcast due to the expiry being reached.
// If there are anchors, we also expect Carol's anchor sweep now.
txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's htlc timeout tx: %v", err)
}
txes, err := getNTxsFromMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
// Lookup the timeout transaction that is expected to spend from the
// closing tx. We distinguish it from a possibly anchor sweep by value.
var htlcTimeout *chainhash.Hash
for _, tx := range txes {
prevOp := tx.TxIn[0].PreviousOutPoint
if prevOp.Hash != closeTxid {
t.Fatalf("tx not spending from closing tx")
}
require.Equal(t.t, closeTxid, prevOp.Hash)
// Assume that the timeout tx doesn't spend an output of exactly
// the size of the anchor.
@ -178,9 +157,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
htlcTimeout = &hash
}
}
if htlcTimeout == nil {
t.Fatalf("htlc timeout tx not found in mempool")
}
require.NotNil(t.t, htlcTimeout)
// We'll mine the remaining blocks in order to generate the sweep
// transaction of Bob's commitment output.
@ -188,9 +165,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
// Check that the sweep spends from the mined commitment.
txes, err = getNTxsFromMempool(net.Miner.Node, 1, minerMempoolTimeout)
if err != nil {
t.Fatalf("sweep not found: %v", err)
}
require.NoError(t.t, err)
assertAllTxesSpendFrom(t, txes, closeTxid)
// Bob's pending channel report should show that he has a commitment
@ -199,21 +174,12 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
require.NoError(t.t, err)
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
t.Fatalf("bob should have pending for close chan but doesn't")
}
require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels))
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
t.Fatalf("bob should have nonzero limbo balance instead "+
"has: %v", forceCloseChan.LimboBalance)
}
if len(forceCloseChan.PendingHtlcs) == 0 {
t.Fatalf("bob should have pending htlc but doesn't")
}
require.NotZero(t.t, forceCloseChan.LimboBalance)
require.NotZero(t.t, len(forceCloseChan.PendingHtlcs))
// Now we'll mine an additional block, which should confirm Bob's commit
// sweep. This block should also prompt Bob to broadcast their second
@ -230,60 +196,33 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
// Therefore, at this point, there should be no active HTLC's on the
// commitment transaction from Alice -> Bob.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
}
err = wait.NoError(func() error {
return assertNumActiveHtlcs(nodes, 0)
}, defaultTimeout)
require.NoError(t.t, err)
// At this point, Bob should show that the pending HTLC has advanced to
// the second stage and is to be swept.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
require.NoError(t.t, err)
forceCloseChan = pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
t.Fatalf("bob's htlc should have advanced to the second stage: %v", err)
}
require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage)
// Next, we'll mine a final block that should confirm the second-layer
// sweeping transaction.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = net.Miner.Node.Generate(1)
require.NoError(t.t, err)
// Once this transaction has been confirmed, Bob should detect that he
// no longer has any pending channels.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending "+
"channels but shouldn't: %v",
spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
require.NoError(t.t, err)
// Coop close channel, expect no anchors.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssertType(
ctxt, t, net, alice, aliceChanPoint, false,
false,
ctxt, t, net, alice, aliceChanPoint, false, false,
)
}

@ -2,11 +2,9 @@ package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
@ -15,6 +13,7 @@ import (
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the
@ -53,9 +52,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
require.NoError(t.t, err)
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
@ -64,32 +61,21 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
defer cancel()
_, err = alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
require.NoError(t.t, err)
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash[:])
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash[:])
}, defaultTimeout)
require.NoError(t.t, err)
// Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit
@ -97,9 +83,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
waitForInvoiceAccepted(t, carol, payHash)
restartBob, err := net.SuspendNode(bob)
if err != nil {
t.Fatalf("unable to suspend bob: %v", err)
}
require.NoError(t.t, err)
// Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For
@ -111,9 +95,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:],
})
if err != nil {
t.Fatalf("settle invoice: %v", err)
}
require.NoError(t.t, err)
// Increase the fee estimate so that the following force close tx will
// be cpfp'ed.
@ -125,9 +107,8 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
numBlocks := padCLTV(uint32(
invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta,
))
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
_, err = net.Miner.Node.Generate(numBlocks)
require.NoError(t.t, err)
// At this point, Carol should broadcast her active commitment
// transaction in order to go to the chain and sweep her HTLC. If there
@ -139,14 +120,10 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
_, err = getNTxsFromMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("expected transaction not found in mempool: %v", err)
}
require.NoError(t.t, err)
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
require.NoError(t.t, err)
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
@ -164,9 +141,8 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
mineBlocks(t, net, 1, expectedTxes)
// Restart bob again.
if err := restartBob(); err != nil {
t.Fatalf("unable to restart bob: %v", err)
}
err = restartBob()
require.NoError(t.t, err)
// After the force close transaction is mined, Carol should broadcast
// her second level HTLC transaction. Bob will broadcast a sweep tx to
@ -178,20 +154,18 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
if c == commitTypeAnchors {
expectedTxes = 3
}
txes, err := getNTxsFromMempool(net.Miner.Node,
expectedTxes, minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
txes, err := getNTxsFromMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
// All transactions should be spending from the commitment transaction.
assertAllTxesSpendFrom(t, txes, closingTxid)
// We'll now mine an additional block which should confirm both the
// second layer transactions.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
_, err = net.Miner.Node.Generate(1)
require.NoError(t.t, err)
time.Sleep(time.Second * 4)
@ -203,98 +177,52 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
require.NoError(t.t, err)
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
t.Fatalf("carol should have pending for close chan but doesn't")
}
require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels))
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
t.Fatalf("carol should have nonzero limbo balance instead "+
"has: %v", forceCloseChan.LimboBalance)
}
require.NotZero(t.t, forceCloseChan.LimboBalance)
// The pending HTLC carol has should also now be in stage 2.
if len(forceCloseChan.PendingHtlcs) != 1 {
t.Fatalf("carol should have pending htlc but doesn't")
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
t.Fatalf("carol's htlc should have advanced to the second "+
"stage: %v", err)
}
require.Len(t.t, forceCloseChan.PendingHtlcs, 1)
require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage)
// Once the second-level transaction confirmed, Bob should have
// extracted the preimage from the chain, and sent it back to Alice,
// clearing the HTLC off-chain.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertNumActiveHtlcs(nodes, 0)
}, defaultTimeout)
require.NoError(t.t, err)
// If we mine 4 additional blocks, then both outputs should now be
// mature.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = net.Miner.Node.Generate(defaultCSV)
require.NoError(t.t, err)
// We should have a new transaction in the mempool.
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction: %v", err)
}
require.NoError(t.t, err)
// Finally, if we mine an additional block to confirm these two sweep
// transactions, Carol should not show a pending channel in her report
// afterwards.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to mine block: %v", err)
}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err = carol.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
predErr = fmt.Errorf("unable to query for pending channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("carol still has pending channels: %v",
spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
_, err = net.Miner.Node.Generate(1)
require.NoError(t.t, err)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
require.NoError(t.t, err)
// The invoice should show as settled for Carol, indicating that it was
// swept on-chain.
invoicesReq := &lnrpc.ListInvoiceRequest{}
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
if err != nil {
t.Fatalf("unable to retrieve invoices: %v", err)
}
if len(invoicesResp.Invoices) != 1 {
t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices))
}
require.NoError(t.t, err)
require.Len(t.t, invoicesResp.Invoices, 1)
invoice := invoicesResp.Invoices[0]
if invoice.State != lnrpc.Invoice_SETTLED {
t.Fatalf("expected invoice to be settled on chain")
}
if invoice.AmtPaidSat != invoiceAmt {
t.Fatalf("expected invoice to be settled with %d sat, got "+
"%d sat", invoiceAmt, invoice.AmtPaidSat)
}
require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State)
require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat)
// Finally, check that the Alice's payment is correctly marked
// succeeded.
@ -302,9 +230,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
err = checkPaymentStatus(
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
)
if err != nil {
t.Fatalf(err.Error())
}
require.NoError(t.t, err)
// We'll close out the channel between Alice and Bob, then shutdown
// carol to conclude the test.

@ -2,11 +2,8 @@ package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
@ -15,6 +12,7 @@ import (
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario,
@ -51,9 +49,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
require.NoError(t.t, err)
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
@ -62,32 +58,21 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
defer cancel()
_, err = alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
require.NoError(t.t, err)
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash[:])
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash[:])
}, defaultTimeout)
require.NoError(t.t, err)
// Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit
@ -102,22 +87,20 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// immediately force close the channel by broadcast her commitment
// transaction.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
aliceForceClose := closeChannelAndAssertType(ctxt, t, net, alice,
aliceChanPoint, c == commitTypeAnchors, true)
aliceForceClose := closeChannelAndAssertType(
ctxt, t, net, alice, aliceChanPoint, c == commitTypeAnchors,
true,
)
// Wait for the channel to be marked pending force close.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForChannelPendingForceClose(ctxt, alice, aliceChanPoint)
if err != nil {
t.Fatalf("channel not pending force close: %v", err)
}
require.NoError(t.t, err)
// Mine enough blocks for Alice to sweep her funds from the force
// closed channel.
_, err = net.Miner.Node.Generate(defaultCSV)
if err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
require.NoError(t.t, err)
// Alice should now sweep her funds. If there are anchors, Alice should
// also sweep hers.
@ -125,16 +108,14 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
if c == commitTypeAnchors {
expectedTxes = 2
}
_, err = waitForNTxsInMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find sweeping tx in mempool: %v", err)
}
_, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
// Suspend bob, so Carol is forced to go on chain.
restartBob, err := net.SuspendNode(bob)
if err != nil {
t.Fatalf("unable to suspend bob: %v", err)
}
require.NoError(t.t, err)
// Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For
@ -146,31 +127,25 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:],
})
if err != nil {
t.Fatalf("settle invoice: %v", err)
}
require.NoError(t.t, err)
// We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive.
numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry-
lncfg.DefaultIncomingBroadcastDelta) - defaultCSV)
numBlocks := padCLTV(uint32(
invoiceReq.CltvExpiry-lncfg.DefaultIncomingBroadcastDelta,
) - defaultCSV)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
_, err = net.Miner.Node.Generate(numBlocks)
require.NoError(t.t, err)
// Carol's commitment transaction should now be in the mempool. If there
// are anchors, Carol also sweeps her anchor.
_, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("unable to find carol's txes: %v", err)
}
require.NoError(t.t, err)
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
require.NoError(t.t, err)
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
@ -186,16 +161,12 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// Mine a block, which should contain: the commitment, possibly an
// anchor sweep and the coinbase tx.
block := mineBlocks(t, net, 1, expectedTxes)[0]
if len(block.Transactions) != expectedTxes+1 {
t.Fatalf("expected %v transactions in block, got %v",
expectedTxes, len(block.Transactions))
}
require.Len(t.t, block.Transactions, expectedTxes+1)
assertTxInBlock(t, block, &closingTxid)
// Restart bob again.
if err := restartBob(); err != nil {
t.Fatalf("unable to restart bob: %v", err)
}
err = restartBob()
require.NoError(t.t, err)
// After the force close transacion is mined, Carol should broadcast her
// second level HTLC transacion. Bob will broadcast a sweep tx to sweep
@ -206,21 +177,17 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
if c == commitTypeAnchors {
expectedTxes = 3
}
txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
txes, err := getNTxsFromMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
// All transactions should be pending from the commitment transaction.
assertAllTxesSpendFrom(t, txes, closingTxid)
// Mine a block to confirm the two transactions (+ coinbase).
block = mineBlocks(t, net, 1, expectedTxes)[0]
if len(block.Transactions) != expectedTxes+1 {
t.Fatalf("expected 3 transactions in block, got %v",
len(block.Transactions))
}
require.Len(t.t, block.Transactions, expectedTxes+1)
// Keep track of the second level tx maturity.
carolSecondLevelCSV := uint32(defaultCSV)
@ -228,114 +195,60 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// When Bob notices Carol's second level transaction in the block, he
// will extract the preimage and broadcast a sweep tx to directly claim
// the HTLC in his (already closed) channel with Alice.
bobHtlcSweep, err := waitForTxInMempool(net.Miner.Node,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobHtlcSweep, err := waitForTxInMempool(
net.Miner.Node, minerMempoolTimeout,
)
require.NoError(t.t, err)
// It should spend from the commitment in the channel with Alice.
tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *aliceForceClose {
t.Fatalf("tx did not spend from alice's force close tx")
}
require.NoError(t.t, err)
require.Equal(
t.t, *aliceForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
)
// We'll now mine a block which should confirm Bob's HTLC sweep
// transaction.
block = mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 {
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
require.Len(t.t, block.Transactions, 2)
assertTxInBlock(t, block, bobHtlcSweep)
carolSecondLevelCSV--
// Now that the sweeping transaction has been confirmed, Bob should now
// recognize that all contracts have been fully resolved, and show no
// pending close channels.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
require.NoError(t.t, err)
// If we then mine 3 additional blocks, Carol's second level tx will
// mature, and she should pull the funds.
if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
_, err = net.Miner.Node.Generate(carolSecondLevelCSV)
require.NoError(t.t, err)
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find Carol's sweeping transaction: %v", err)
}
carolSweep, err := waitForTxInMempool(
net.Miner.Node, minerMempoolTimeout,
)
require.NoError(t.t, err)
// When Carol's sweep gets confirmed, she should have no more pending
// channels.
block = mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, carolSweep)
pendingChansRequest = &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("carol still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
require.NoError(t.t, err)
// The invoice should show as settled for Carol, indicating that it was
// swept on-chain.
invoicesReq := &lnrpc.ListInvoiceRequest{}
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
if err != nil {
t.Fatalf("unable to retrieve invoices: %v", err)
}
if len(invoicesResp.Invoices) != 1 {
t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices))
}
require.NoError(t.t, err)
require.Len(t.t, invoicesResp.Invoices, 1)
invoice := invoicesResp.Invoices[0]
if invoice.State != lnrpc.Invoice_SETTLED {
t.Fatalf("expected invoice to be settled on chain")
}
if invoice.AmtPaidSat != invoiceAmt {
t.Fatalf("expected invoice to be settled with %d sat, got "+
"%d sat", invoiceAmt, invoice.AmtPaidSat)
}
require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State)
require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat)
// Finally, check that the Alice's payment is correctly marked
// succeeded.
@ -343,7 +256,5 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
err = checkPaymentStatus(
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
)
if err != nil {
t.Fatalf(err.Error())
}
require.NoError(t.t, err)
}

@ -3,14 +3,12 @@ package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC
@ -47,8 +45,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
carolPubKey := carol.PubKey[:]
payHash := makeFakePayHash(t)
_, err := alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
Dest: carolPubKey,
Amt: int64(htlcAmt),
PaymentHash: payHash,
@ -57,21 +54,15 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
require.NoError(t.t, err)
// Once the HTLC has cleared, all channels in our mini network should
// have the it locked in.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash)
}, defaultTimeout)
require.NoError(t.t, err)
// Increase the fee estimate so that the following force close tx will
// be cpfp'ed.
@ -87,105 +78,64 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// At this point, Bob should have a pending force close channel as he
// just went to chain.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(ctxt,
pendingChansRequest)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
if c.LimboBalance == 0 {
return fmt.Errorf("bob should have nonzero "+
"limbo balance instead has: %v",
c.LimboBalance)
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
predErr = fmt.Errorf("bob should have nonzero limbo "+
"balance instead has: %v",
forceCloseChan.LimboBalance)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
return nil
},
)
require.NoError(t.t, err)
// We'll mine defaultCSV blocks in order to generate the sweep
// transaction of Bob's funding output. If there are anchors, mine
// Carol's anchor sweep too.
if c == commitTypeAnchors {
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find carol's anchor sweep tx: %v", err)
}
require.NoError(t.t, err)
}
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = net.Miner.Node.Generate(defaultCSV)
require.NoError(t.t, err)
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's funding output sweep tx: %v", err)
}
require.NoError(t.t, err)
// We'll now mine enough blocks for the HTLC to expire. After this, Bob
// should hand off the now expired HTLC output to the utxo nursery.
numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV - 1))
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = net.Miner.Node.Generate(numBlocks)
require.NoError(t.t, err)
// Bob's pending channel report should show that he has a single HTLC
// that's now in stage one.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
if len(c.PendingHtlcs) != 1 {
return fmt.Errorf("bob should have pending " +
"htlc but doesn't")
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending force " +
"close chan but doesn't")
return false
}
if c.PendingHtlcs[0].Stage != 1 {
return fmt.Errorf("bob's htlc should have "+
"advanced to the first stage: %v", err)
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 1 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the first stage: %v", err)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
return nil
},
)
require.NoError(t.t, err)
// We should also now find a transaction in the mempool, as Bob should
// have broadcast his second layer timeout transaction.
timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's htlc timeout tx: %v", err)
}
require.NoError(t.t, err)
// Next, we'll mine an additional block. This should serve to confirm
// the second layer timeout transaction.
@ -195,62 +145,39 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// With the second layer timeout transaction confirmed, Bob should have
// canceled backwards the HTLC that carol sent.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
}
err = wait.NoError(func() error {
return assertNumActiveHtlcs(nodes, 0)
}, defaultTimeout)
require.NoError(t.t, err)
// Additionally, Bob should now show that HTLC as being advanced to the
// second stage.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
if len(c.PendingHtlcs) != 1 {
return fmt.Errorf("bob should have pending " +
"htlc but doesn't")
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
if c.PendingHtlcs[0].Stage != 2 {
return fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
return nil
},
)
require.NoError(t.t, err)
// We'll now mine 4 additional blocks. This should be enough for Bob's
// CSV timelock to expire and the sweeping transaction of the HTLC to be
// broadcast.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to mine blocks: %v", err)
}
_, err = net.Miner.Node.Generate(defaultCSV)
require.NoError(t.t, err)
sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's htlc sweep tx: %v", err)
}
require.NoError(t.t, err)
// We'll then mine a final block which should confirm this second layer
// sweep transaction.
@ -259,27 +186,9 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// At this point, Bob should no longer show any channels as pending
// close.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
require.NoError(t.t, err)
// Coop close, no anchors.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)

@ -3,14 +3,12 @@ package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a
@ -48,8 +46,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
carolPubKey := carol.PubKey[:]
payHash := makeFakePayHash(t)
_, err := alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
Dest: carolPubKey,
Amt: int64(htlcAmt),
PaymentHash: payHash,
@ -58,21 +55,15 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
require.NoError(t.t, err)
// Once the HTLC has cleared, all the nodes in our mini network should
// show that the HTLC has been locked in.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash)
}, defaultTimeout)
require.NoError(t.t, err)
// Increase the fee estimate so that the following force close tx will
// be cpfp'ed.
@ -90,28 +81,9 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// At this point, Bob should have a pending force close channel as
// Carol has gone directly to chain.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for "+
"pending channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending " +
"force close channels but doesn't")
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, bob, 1, nil)
require.NoError(t.t, err)
// Bob can sweep his output immediately. If there is an anchor, Bob will
// sweep that as well.
@ -123,55 +95,39 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
_, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("failed to find txes in miner mempool: %v", err)
}
require.NoError(t.t, err)
// Next, we'll mine enough blocks for the HTLC to expire. At this
// point, Bob should hand off the output to his internal utxo nursery,
// which will broadcast a sweep transaction.
numBlocks := padCLTV(finalCltvDelta - 1)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = net.Miner.Node.Generate(numBlocks)
require.NoError(t.t, err)
// If we check Bob's pending channel report, it should show that he has
// a single HTLC that's now in the second stage, as skip the initial
// first stage since this is a direct HTLC.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
if len(c.PendingHtlcs) != 1 {
return fmt.Errorf("bob should have pending " +
"htlc but doesn't")
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
if c.PendingHtlcs[0].Stage != 2 {
return fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
return false
}
return nil
},
)
require.NoError(t.t, err)
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// We need to generate an additional block to trigger the sweep.
_, err = net.Miner.Node.Generate(1)
require.NoError(t.t, err)
// Bob's sweeping transaction should now be found in the mempool at
// this point.
@ -185,14 +141,10 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// we'll fail.
// TODO(halseth): can we use waitForChannelPendingForceClose to
// avoid this hack?
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
_, err = net.Miner.Node.Generate(1)
require.NoError(t.t, err)
sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction: "+
"%v", err)
}
require.NoError(t.t, err)
}
// If we mine an additional block, then this should confirm Bob's
@ -204,45 +156,23 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// cancel back that HTLC. As a result, Alice should not know of any
// active HTLC's.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
}
err = wait.NoError(func() error {
return assertNumActiveHtlcs(nodes, 0)
}, defaultTimeout)
require.NoError(t.t, err)
// Now we'll check Bob's pending channel report. Since this was Carol's
// commitment, he doesn't have to wait for any CSV delays. As a result,
// he should show no additional pending transactions.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
require.NoError(t.t, err)
// We'll close out the test by closing the channel from Alice to Bob,
// and then shutting down the new node we created as its no longer
// needed. Coop close, no anchors.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssertType(
ctxt, t, net, alice, aliceChanPoint, false,
false,
ctxt, t, net, alice, aliceChanPoint, false, false,
)
}

@ -330,37 +330,68 @@ func waitForChannelPendingForceClose(ctx context.Context,
Index: fundingChanPoint.OutputIndex,
}
var predErr error
err = wait.Predicate(func() bool {
return wait.NoError(func() error {
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := node.PendingChannels(
ctx, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to get pending "+
"channels: %v", err)
return false
return fmt.Errorf("unable to get pending channels: %v",
err)
}
forceClose, err := findForceClosedChannel(pendingChanResp, &op)
if err != nil {
predErr = err
return false
return err
}
// We must wait until the UTXO nursery has received the channel
// and is aware of its maturity height.
if forceClose.MaturityHeight == 0 {
predErr = fmt.Errorf("channel had maturity height of 0")
return false
return fmt.Errorf("channel had maturity height of 0")
}
return true
}, time.Second*15)
if err != nil {
return predErr
}
return nil
return nil
}, defaultTimeout)
}
// lnrpcForceCloseChannel is a short type alias for a ridiculously long type
// name in the lnrpc package.
type lnrpcForceCloseChannel = lnrpc.PendingChannelsResponse_ForceClosedChannel
// waitForNumChannelPendingForceClose waits for the node to report a certain
// number of channels in state pending force close.
func waitForNumChannelPendingForceClose(ctx context.Context,
node *lntest.HarnessNode, expectedNum int,
perChanCheck func(channel *lnrpcForceCloseChannel) error) error {
return wait.NoError(func() error {
resp, err := node.PendingChannels(
ctx, &lnrpc.PendingChannelsRequest{},
)
if err != nil {
return fmt.Errorf("unable to get pending channels: %v",
err)
}
forceCloseChans := resp.PendingForceClosingChannels
if len(forceCloseChans) != expectedNum {
return fmt.Errorf("bob should have %d pending "+
"force close channels but has %d", expectedNum,
len(forceCloseChans))
}
if perChanCheck != nil {
for _, forceCloseChan := range forceCloseChans {
err := perChanCheck(forceCloseChan)
if err != nil {
return err
}
}
}
return nil
}, defaultTimeout)
}
// cleanupForceClose mines a force close commitment found in the mempool and