diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index 5940a90a..0cf06fd1 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -50,6 +50,7 @@ import ( "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing" + "github.com/stretchr/testify/require" ) var ( @@ -3512,6 +3513,13 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, t.Fatalf("all funds should still be in limbo") } + // Create a map of outpoints to expected resolutions for alice and carol + // which we will add reports to as we sweep outputs. + var ( + aliceReports = make(map[string]*lnrpc.Resolution) + carolReports = make(map[string]*lnrpc.Resolution) + ) + // The several restarts in this test are intended to ensure that when a // channel is force-closed, the UTXO nursery has persisted the state of // the channel in the closure process and will recover the correct state @@ -3530,13 +3538,36 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, expectedTxes = 2 } - _, err = waitForNTxsInMempool( + sweepTxns, err := waitForNTxsInMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) if err != nil { t.Fatalf("failed to find commitment in miner mempool: %v", err) } + // Find alice's commit sweep and anchor sweep (if present) in the + // mempool. + aliceCloseTx := waitingClose.Commitments.LocalTxid + _, aliceAnchor := findCommitAndAnchor( + t, net, sweepTxns, aliceCloseTx, + ) + + // If we expect anchors, add alice's anchor to our expected set of + // reports. + if channelType == commitTypeAnchors { + aliceReports[aliceAnchor.OutPoint.String()] = &lnrpc.Resolution{ + ResolutionType: lnrpc.ResolutionType_ANCHOR, + Outcome: lnrpc.ResolutionOutcome_CLAIMED, + SweepTxid: aliceAnchor.SweepTx, + Outpoint: &lnrpc.OutPoint{ + TxidBytes: aliceAnchor.OutPoint.Hash[:], + TxidStr: aliceAnchor.OutPoint.Hash.String(), + OutputIndex: aliceAnchor.OutPoint.Index, + }, + AmountSat: uint64(anchorSize), + } + } + if _, err := net.Miner.Node.Generate(1); err != nil { t.Fatalf("unable to generate block: %v", err) } @@ -3605,7 +3636,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // Carol's sweep tx should be in the mempool already, as her output is // not timelocked. If there are anchors, we also expect Carol's anchor // sweep now. - _, err = waitForNTxsInMempool( + sweepTxns, err = waitForNTxsInMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) if err != nil { @@ -3613,6 +3644,27 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, err) } + // We look up the sweep txns we have found in mempool and create + // expected resolutions for carol. + carolCommit, carolAnchor := findCommitAndAnchor( + t, net, sweepTxns, aliceCloseTx, + ) + + // If we have anchors, add an anchor resolution for carol. + if channelType == commitTypeAnchors { + carolReports[carolAnchor.OutPoint.String()] = &lnrpc.Resolution{ + ResolutionType: lnrpc.ResolutionType_ANCHOR, + Outcome: lnrpc.ResolutionOutcome_CLAIMED, + SweepTxid: carolAnchor.SweepTx, + AmountSat: anchorSize, + Outpoint: &lnrpc.OutPoint{ + TxidBytes: carolAnchor.OutPoint.Hash[:], + TxidStr: carolAnchor.OutPoint.Hash.String(), + OutputIndex: carolAnchor.OutPoint.Index, + }, + } + } + // Currently within the codebase, the default CSV is 4 relative blocks. // For the persistence test, we generate two blocks, then trigger // a restart and then generate the final block that should trigger @@ -3630,6 +3682,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // Alice should see the channel in her set of pending force closed // channels with her funds still in limbo. + var aliceBalance int64 err = wait.NoError(func() error { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := alice.PendingChannels( @@ -3652,6 +3705,9 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, return err } + // Make a record of the balances we expect for alice and carol. + aliceBalance = forceClose.Channel.LocalBalance + // At this point, the nursery should show that the commitment // output has 2 block left before its CSV delay expires. In // total, we have mined exactly defaultCSV blocks, so the htlc @@ -3712,6 +3768,32 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, } } + // We expect a resolution which spends our commit output. + output := sweepTx.MsgTx().TxIn[0].PreviousOutPoint + aliceReports[output.String()] = &lnrpc.Resolution{ + ResolutionType: lnrpc.ResolutionType_COMMIT, + Outcome: lnrpc.ResolutionOutcome_CLAIMED, + SweepTxid: sweepingTXID.String(), + Outpoint: &lnrpc.OutPoint{ + TxidBytes: output.Hash[:], + TxidStr: output.Hash.String(), + OutputIndex: output.Index, + }, + AmountSat: uint64(aliceBalance), + } + + carolReports[carolCommit.OutPoint.String()] = &lnrpc.Resolution{ + ResolutionType: lnrpc.ResolutionType_COMMIT, + Outcome: lnrpc.ResolutionOutcome_CLAIMED, + Outpoint: &lnrpc.OutPoint{ + TxidBytes: carolCommit.OutPoint.Hash[:], + TxidStr: carolCommit.OutPoint.Hash.String(), + OutputIndex: carolCommit.OutPoint.Index, + }, + AmountSat: uint64(pushAmt), + SweepTxid: carolCommit.SweepTx, + } + // Check that we can find the commitment sweep in our set of known // sweeps. err = findSweep(ctxb, alice, sweepingTXID) @@ -3890,6 +3972,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // the sweeper check for these timeout transactions because they are // not swept by the sweeper; the nursery broadcasts the pre-signed // transaction. + var htlcLessFees uint64 for _, htlcTxID := range htlcTxIDs { // Fetch the sweep transaction, all input it's spending should // be from the commitment transaction which was broadcast @@ -3899,18 +3982,62 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, t.Fatalf("unable to fetch sweep tx: %v", err) } // Ensure the htlc transaction only has one input. - if len(htlcTx.MsgTx().TxIn) != 1 { + inputs := htlcTx.MsgTx().TxIn + if len(inputs) != 1 { t.Fatalf("htlc transaction should only have one txin, "+ "has %d", len(htlcTx.MsgTx().TxIn)) } // Ensure the htlc transaction is spending from the commitment // transaction. - txIn := htlcTx.MsgTx().TxIn[0] + txIn := inputs[0] if !closingTxID.IsEqual(&txIn.PreviousOutPoint.Hash) { t.Fatalf("htlc transaction not spending from commit "+ "tx %v, instead spending %v", closingTxID, txIn.PreviousOutPoint) } + + outputs := htlcTx.MsgTx().TxOut + if len(outputs) != 1 { + t.Fatalf("htlc transaction should only have one "+ + "txout, has: %v", len(outputs)) + } + + // For each htlc timeout transaction, we expect a resolver + // report recording this on chain resolution for both alice and + // carol. + outpoint := txIn.PreviousOutPoint + resolutionOutpoint := &lnrpc.OutPoint{ + TxidBytes: outpoint.Hash[:], + TxidStr: outpoint.Hash.String(), + OutputIndex: outpoint.Index, + } + + // We expect alice to have a timeout tx resolution with an + // amount equal to the payment amount. + aliceReports[outpoint.String()] = &lnrpc.Resolution{ + ResolutionType: lnrpc.ResolutionType_OUTGOING_HTLC, + Outcome: lnrpc.ResolutionOutcome_FIRST_STAGE, + SweepTxid: htlcTx.Hash().String(), + Outpoint: resolutionOutpoint, + AmountSat: uint64(paymentAmt), + } + + // We expect carol to have a resolution with an incoming htlc + // timeout which reflects the full amount of the htlc. It has + // no spend tx, because carol stops monitoring the htlc once + // it has timed out. + carolReports[outpoint.String()] = &lnrpc.Resolution{ + ResolutionType: lnrpc.ResolutionType_INCOMING_HTLC, + Outcome: lnrpc.ResolutionOutcome_TIMEOUT, + SweepTxid: "", + Outpoint: resolutionOutpoint, + AmountSat: uint64(paymentAmt), + } + + // We record the htlc amount less fees here, so that we know + // what value to expect for the second stage of our htlc + // htlc resolution. + htlcLessFees = uint64(outputs[0].Value) } // With the htlc timeout txns still in the mempool, we restart Alice to @@ -4022,6 +4149,12 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, t.Fatalf("htlc transaction should have %d txin, "+ "has %d", numInvoices, len(htlcSweepTx.MsgTx().TxIn)) } + outputCount := len(htlcSweepTx.MsgTx().TxOut) + if outputCount != 1 { + t.Fatalf("htlc sweep transaction should have one output, has: "+ + "%v", outputCount) + } + // Ensure that each output spends from exactly one htlc timeout txn. for _, txIn := range htlcSweepTx.MsgTx().TxIn { outpoint := txIn.PreviousOutPoint.Hash @@ -4038,6 +4171,21 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, t.Fatalf("htlc sweep tx has multiple spends from "+ "outpoint %v", outpoint) } + + // Since we have now swept our htlc timeout tx, we expect to + // have timeout resolutions for each of our htlcs. + output := txIn.PreviousOutPoint + aliceReports[output.String()] = &lnrpc.Resolution{ + ResolutionType: lnrpc.ResolutionType_OUTGOING_HTLC, + Outcome: lnrpc.ResolutionOutcome_TIMEOUT, + SweepTxid: htlcSweepTx.Hash().String(), + Outpoint: &lnrpc.OutPoint{ + TxidBytes: output.Hash[:], + TxidStr: output.Hash.String(), + OutputIndex: output.Index, + }, + AmountSat: uint64(htlcLessFees), + } } // Check that we can find the htlc sweep in our set of sweeps. @@ -4150,6 +4298,96 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, carolExpectedBalance, carolBalResp.ConfirmedBalance) } + + // Finally, we check that alice and carol have the set of resolutions + // we expect. + assertReports(ctxb, t, alice, op, aliceReports) + assertReports(ctxb, t, carol, op, carolReports) +} + +type sweptOutput struct { + OutPoint wire.OutPoint + SweepTx string +} + +// findCommitAndAnchor looks for a commitment sweep and anchor sweep in the +// mempool. Our anchor output is identified by having multiple inputs, because +// we have to bring another input to add fees to the anchor. Note that the +// anchor swept output may be nil if the channel did not have anchors. +func findCommitAndAnchor(t *harnessTest, net *lntest.NetworkHarness, + sweepTxns []*chainhash.Hash, closeTx string) (*sweptOutput, *sweptOutput) { + + var commitSweep, anchorSweep *sweptOutput + + for _, tx := range sweepTxns { + sweepTx, err := net.Miner.Node.GetRawTransaction(tx) + require.NoError(t.t, err) + + // We expect our commitment sweep to have a single input, and, + // our anchor sweep to have more inputs (because the wallet + // needs to add balance to the anchor amount). We find their + // sweep txids here to setup appropriate resolutions. We also + // need to find the outpoint for our resolution, which we do by + // matching the inputs to the sweep to the close transaction. + inputs := sweepTx.MsgTx().TxIn + if len(inputs) == 1 { + commitSweep = &sweptOutput{ + OutPoint: inputs[0].PreviousOutPoint, + SweepTx: tx.String(), + } + } else { + // Since we have more than one input, we run through + // them to find the outpoint that spends from the close + // tx. This will be our anchor output. + for _, txin := range inputs { + outpointStr := txin.PreviousOutPoint.Hash.String() + if outpointStr == closeTx { + anchorSweep = &sweptOutput{ + OutPoint: txin.PreviousOutPoint, + SweepTx: tx.String(), + } + } + } + } + } + + return commitSweep, anchorSweep +} + +// assertReports checks that the count of resolutions we have present per +// type matches a set of expected resolutions. +func assertReports(ctxb context.Context, t *harnessTest, + node *lntest.HarnessNode, channelPoint wire.OutPoint, + expected map[string]*lnrpc.Resolution) { + + // Get our node's closed channels. + ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + + closed, err := node.ClosedChannels( + ctxt, &lnrpc.ClosedChannelsRequest{}, + ) + require.NoError(t.t, err) + + var resolutions []*lnrpc.Resolution + for _, close := range closed.Channels { + if close.ChannelPoint == channelPoint.String() { + resolutions = close.Resolutions + break + } + } + + require.NotNil(t.t, resolutions) + require.Equal(t.t, len(expected), len(resolutions)) + + for _, res := range resolutions { + outPointStr := fmt.Sprintf("%v:%v", res.Outpoint.TxidStr, + res.Outpoint.OutputIndex) + + expected, ok := expected[outPointStr] + require.True(t.t, ok) + require.Equal(t.t, expected, res) + } } // findSweep looks up a sweep in a nodes list of broadcast sweeps.