2020-03-04 15:21:27 +03:00
|
|
|
package itest
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"time"
|
|
|
|
|
2020-03-04 15:21:29 +03:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
2020-03-04 15:21:27 +03:00
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
2020-05-13 16:47:45 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lncfg"
|
2020-03-04 15:21:27 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2020-03-31 13:44:18 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
2020-03-04 15:21:27 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lntest"
|
|
|
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
2020-10-26 16:21:09 +03:00
|
|
|
"github.com/stretchr/testify/require"
|
2020-03-04 15:21:27 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the
|
|
|
|
// outgoing HTLC is about to time out, then we'll go to chain in order to claim
|
2020-03-04 15:21:28 +03:00
|
|
|
// it using the HTLC timeout transaction. Any dust HTLC's should be immediately
|
|
|
|
// canceled backwards. Once the timeout has been reached, then we should sweep
|
|
|
|
// it on-chain, and cancel the HTLC backwards.
|
2020-03-04 15:21:28 +03:00
|
|
|
func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
2020-03-04 15:21:28 +03:00
|
|
|
alice, bob *lntest.HarnessNode, c commitType) {
|
2020-03-04 15:21:28 +03:00
|
|
|
|
2020-03-04 15:21:27 +03:00
|
|
|
ctxb := context.Background()
|
|
|
|
|
|
|
|
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
|
|
|
|
// Carol refusing to actually settle or directly cancel any HTLC's
|
|
|
|
// self.
|
2020-03-04 15:21:28 +03:00
|
|
|
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
|
2020-03-04 15:21:28 +03:00
|
|
|
t, net, alice, bob, true, c,
|
2020-03-04 15:21:28 +03:00
|
|
|
)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
|
|
|
// Clean up carol's node when the test finishes.
|
|
|
|
defer shutdownAndAssert(net, t, carol)
|
|
|
|
|
|
|
|
time.Sleep(time.Second * 1)
|
|
|
|
|
|
|
|
// Now that our channels are set up, we'll send two HTLC's from Alice
|
|
|
|
// to Carol. The first HTLC will be universally considered "dust",
|
|
|
|
// while the second will be a proper fully valued HTLC.
|
|
|
|
const (
|
|
|
|
dustHtlcAmt = btcutil.Amount(100)
|
2020-12-09 14:24:02 +03:00
|
|
|
htlcAmt = btcutil.Amount(300_000)
|
2020-03-04 15:21:27 +03:00
|
|
|
finalCltvDelta = 40
|
|
|
|
)
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(ctxb)
|
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
// We'll create two random payment hashes unknown to carol, then send
|
|
|
|
// each of them by manually specifying the HTLC details.
|
|
|
|
carolPubKey := carol.PubKey[:]
|
|
|
|
dustPayHash := makeFakePayHash(t)
|
|
|
|
payHash := makeFakePayHash(t)
|
2020-03-31 13:44:18 +03:00
|
|
|
|
|
|
|
_, err := alice.RouterClient.SendPaymentV2(
|
2020-10-26 16:21:09 +03:00
|
|
|
ctx, &routerrpc.SendPaymentRequest{
|
2020-03-31 13:44:18 +03:00
|
|
|
Dest: carolPubKey,
|
|
|
|
Amt: int64(dustHtlcAmt),
|
|
|
|
PaymentHash: dustPayHash,
|
|
|
|
FinalCltvDelta: finalCltvDelta,
|
|
|
|
TimeoutSeconds: 60,
|
|
|
|
FeeLimitMsat: noFeeLimitMsat,
|
|
|
|
},
|
|
|
|
)
|
2020-10-26 16:21:09 +03:00
|
|
|
require.NoError(t.t, err)
|
2020-03-31 13:44:18 +03:00
|
|
|
|
|
|
|
_, err = alice.RouterClient.SendPaymentV2(
|
2020-10-26 16:21:09 +03:00
|
|
|
ctx, &routerrpc.SendPaymentRequest{
|
2020-03-31 13:44:18 +03:00
|
|
|
Dest: carolPubKey,
|
|
|
|
Amt: int64(htlcAmt),
|
|
|
|
PaymentHash: payHash,
|
|
|
|
FinalCltvDelta: finalCltvDelta,
|
|
|
|
TimeoutSeconds: 60,
|
|
|
|
FeeLimitMsat: noFeeLimitMsat,
|
|
|
|
},
|
|
|
|
)
|
2020-10-26 16:21:09 +03:00
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
|
|
|
// Verify that all nodes in the path now have two HTLC's with the
|
|
|
|
// proper parameters.
|
2020-03-04 15:21:28 +03:00
|
|
|
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
2020-10-26 16:21:09 +03:00
|
|
|
err = wait.NoError(func() error {
|
|
|
|
return assertActiveHtlcs(nodes, dustPayHash, payHash)
|
|
|
|
}, defaultTimeout)
|
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
2020-09-04 12:19:27 +03:00
|
|
|
// Increase the fee estimate so that the following force close tx will
|
|
|
|
// be cpfp'ed.
|
|
|
|
net.SetFeeEstimate(30000)
|
|
|
|
|
2020-03-04 15:21:27 +03:00
|
|
|
// We'll now mine enough blocks to trigger Bob's broadcast of his
|
|
|
|
// commitment transaction due to the fact that the HTLC is about to
|
|
|
|
// timeout. With the default outgoing broadcast delta of zero, this will
|
|
|
|
// be the same height as the htlc expiry height.
|
|
|
|
numBlocks := padCLTV(
|
2020-05-13 16:47:45 +03:00
|
|
|
uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
|
2020-03-04 15:21:27 +03:00
|
|
|
)
|
2021-03-10 01:12:26 +03:00
|
|
|
_, err = net.Miner.Client.Generate(numBlocks)
|
2020-10-26 16:21:09 +03:00
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
2020-03-04 15:21:29 +03:00
|
|
|
// Bob's force close transaction should now be found in the mempool. If
|
|
|
|
// there are anchors, we also expect Bob's anchor sweep.
|
|
|
|
expectedTxes := 1
|
|
|
|
if c == commitTypeAnchors {
|
|
|
|
expectedTxes = 2
|
|
|
|
}
|
|
|
|
|
2021-02-13 11:05:33 +03:00
|
|
|
bobFundingTxid, err := lnrpc.GetChanPointFundingTxid(bobChanPoint)
|
2020-10-26 16:21:09 +03:00
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:29 +03:00
|
|
|
_, err = waitForNTxsInMempool(
|
2021-03-10 01:12:26 +03:00
|
|
|
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
2020-03-04 15:21:29 +03:00
|
|
|
)
|
2020-10-26 16:21:09 +03:00
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:29 +03:00
|
|
|
closeTx := getSpendingTxInMempool(
|
2021-03-10 01:12:26 +03:00
|
|
|
t, net.Miner.Client, minerMempoolTimeout, wire.OutPoint{
|
2020-03-04 15:21:27 +03:00
|
|
|
Hash: *bobFundingTxid,
|
|
|
|
Index: bobChanPoint.OutputIndex,
|
|
|
|
},
|
|
|
|
)
|
2020-03-04 15:21:29 +03:00
|
|
|
closeTxid := closeTx.TxHash()
|
2020-03-04 15:21:27 +03:00
|
|
|
|
|
|
|
// Mine a block to confirm the closing transaction.
|
2020-03-04 15:21:29 +03:00
|
|
|
mineBlocks(t, net, 1, expectedTxes)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
|
|
|
// At this point, Bob should have canceled backwards the dust HTLC
|
|
|
|
// that we sent earlier. This means Alice should now only have a single
|
|
|
|
// HTLC on her channel.
|
2020-03-04 15:21:28 +03:00
|
|
|
nodes = []*lntest.HarnessNode{alice}
|
2020-10-26 16:21:09 +03:00
|
|
|
err = wait.NoError(func() error {
|
|
|
|
return assertActiveHtlcs(nodes, payHash)
|
|
|
|
}, defaultTimeout)
|
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
|
|
|
// With the closing transaction confirmed, we should expect Bob's HTLC
|
|
|
|
// timeout transaction to be broadcast due to the expiry being reached.
|
2020-03-04 15:21:29 +03:00
|
|
|
// If there are anchors, we also expect Carol's anchor sweep now.
|
2020-10-26 16:21:09 +03:00
|
|
|
txes, err := getNTxsFromMempool(
|
2021-03-10 01:12:26 +03:00
|
|
|
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
2020-10-26 16:21:09 +03:00
|
|
|
)
|
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
2020-03-04 15:21:29 +03:00
|
|
|
// Lookup the timeout transaction that is expected to spend from the
|
|
|
|
// closing tx. We distinguish it from a possibly anchor sweep by value.
|
|
|
|
var htlcTimeout *chainhash.Hash
|
|
|
|
for _, tx := range txes {
|
|
|
|
prevOp := tx.TxIn[0].PreviousOutPoint
|
2020-10-26 16:21:09 +03:00
|
|
|
require.Equal(t.t, closeTxid, prevOp.Hash)
|
2020-03-04 15:21:29 +03:00
|
|
|
|
|
|
|
// Assume that the timeout tx doesn't spend an output of exactly
|
|
|
|
// the size of the anchor.
|
|
|
|
if closeTx.TxOut[prevOp.Index].Value != anchorSize {
|
|
|
|
hash := tx.TxHash()
|
|
|
|
htlcTimeout = &hash
|
|
|
|
}
|
|
|
|
}
|
2020-10-26 16:21:09 +03:00
|
|
|
require.NotNil(t.t, htlcTimeout)
|
2020-03-04 15:21:29 +03:00
|
|
|
|
2020-03-04 15:21:27 +03:00
|
|
|
// We'll mine the remaining blocks in order to generate the sweep
|
2020-03-09 20:43:47 +03:00
|
|
|
// transaction of Bob's commitment output. The commitment was just
|
|
|
|
// mined at the current tip and the sweep will be broadcast so it can
|
|
|
|
// be mined at the tip+defaultCSV'th block, so mine one less to be able
|
|
|
|
// to make mempool assertions.
|
|
|
|
mineBlocks(t, net, defaultCSV-1, expectedTxes)
|
2020-03-04 15:21:29 +03:00
|
|
|
|
|
|
|
// Check that the sweep spends from the mined commitment.
|
2021-03-10 01:12:26 +03:00
|
|
|
txes, err = getNTxsFromMempool(net.Miner.Client, 1, minerMempoolTimeout)
|
2020-10-26 16:21:09 +03:00
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:29 +03:00
|
|
|
assertAllTxesSpendFrom(t, txes, closeTxid)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
|
|
|
// Bob's pending channel report should show that he has a commitment
|
|
|
|
// output awaiting sweeping, and also that there's an outgoing HTLC
|
|
|
|
// output pending.
|
|
|
|
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
|
|
|
|
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
2020-03-04 15:21:28 +03:00
|
|
|
pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest)
|
2020-10-26 16:21:09 +03:00
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
2020-10-26 16:21:09 +03:00
|
|
|
require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels))
|
2020-03-04 15:21:27 +03:00
|
|
|
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
|
2020-10-26 16:21:09 +03:00
|
|
|
require.NotZero(t.t, forceCloseChan.LimboBalance)
|
|
|
|
require.NotZero(t.t, len(forceCloseChan.PendingHtlcs))
|
2020-03-04 15:21:27 +03:00
|
|
|
|
2020-03-09 20:43:47 +03:00
|
|
|
// Mine a block to confirm Bob's commit sweep tx and assert it was in
|
|
|
|
// fact mined.
|
|
|
|
block := mineBlocks(t, net, 1, 1)[0]
|
|
|
|
commitSweepTxid := txes[0].TxHash()
|
|
|
|
assertTxInBlock(t, block, &commitSweepTxid)
|
|
|
|
|
|
|
|
// Mine an additional block to prompt Bob to broadcast their second
|
2020-03-04 15:21:27 +03:00
|
|
|
// layer sweep due to the CSV on the HTLC timeout output.
|
2020-03-09 20:43:47 +03:00
|
|
|
mineBlocks(t, net, 1, 0)
|
2020-03-04 15:21:27 +03:00
|
|
|
assertSpendingTxInMempool(
|
2021-03-10 01:12:26 +03:00
|
|
|
t, net.Miner.Client, minerMempoolTimeout, wire.OutPoint{
|
2020-03-04 15:21:27 +03:00
|
|
|
Hash: *htlcTimeout,
|
|
|
|
Index: 0,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
|
|
|
// The block should have confirmed Bob's HTLC timeout transaction.
|
|
|
|
// Therefore, at this point, there should be no active HTLC's on the
|
|
|
|
// commitment transaction from Alice -> Bob.
|
2020-03-04 15:21:28 +03:00
|
|
|
nodes = []*lntest.HarnessNode{alice}
|
2020-10-26 16:21:09 +03:00
|
|
|
err = wait.NoError(func() error {
|
|
|
|
return assertNumActiveHtlcs(nodes, 0)
|
|
|
|
}, defaultTimeout)
|
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
|
|
|
// At this point, Bob should show that the pending HTLC has advanced to
|
|
|
|
// the second stage and is to be swept.
|
|
|
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
2020-03-04 15:21:28 +03:00
|
|
|
pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest)
|
2020-10-26 16:21:09 +03:00
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:27 +03:00
|
|
|
forceCloseChan = pendingChanResp.PendingForceClosingChannels[0]
|
2020-10-26 16:21:09 +03:00
|
|
|
require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
|
|
|
// Next, we'll mine a final block that should confirm the second-layer
|
|
|
|
// sweeping transaction.
|
2021-03-10 01:12:26 +03:00
|
|
|
_, err = net.Miner.Client.Generate(1)
|
2020-10-26 16:21:09 +03:00
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
|
|
|
// Once this transaction has been confirmed, Bob should detect that he
|
|
|
|
// no longer has any pending channels.
|
2020-10-26 16:21:09 +03:00
|
|
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
|
|
|
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
|
|
|
|
require.NoError(t.t, err)
|
2020-03-04 15:21:27 +03:00
|
|
|
|
2020-03-04 15:21:29 +03:00
|
|
|
// Coop close channel, expect no anchors.
|
2020-03-04 15:21:27 +03:00
|
|
|
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
2020-03-04 15:21:29 +03:00
|
|
|
closeChannelAndAssertType(
|
2020-10-26 16:21:09 +03:00
|
|
|
ctxt, t, net, alice, aliceChanPoint, false, false,
|
2020-03-04 15:21:29 +03:00
|
|
|
)
|
2020-03-04 15:21:27 +03:00
|
|
|
}
|