diff --git a/contractcourt/chain_arbitrator.go b/contractcourt/chain_arbitrator.go index 4d97b10b..a9dde542 100644 --- a/contractcourt/chain_arbitrator.go +++ b/contractcourt/chain_arbitrator.go @@ -231,6 +231,33 @@ type arbChannel struct { c *ChainArbitrator } +// NewAnchorResolutions returns the anchor resolutions for currently valid +// commitment transactions. +// +// NOTE: Part of the ArbChannel interface. +func (a *arbChannel) NewAnchorResolutions() ([]*lnwallet.AnchorResolution, + error) { + + // Get a fresh copy of the database state to base the anchor resolutions + // on. Unfortunately the channel instance that we have here isn't the + // same instance that is used by the link. + chanPoint := a.channel.FundingOutpoint + + channel, err := a.c.chanSource.FetchChannel(chanPoint) + if err != nil { + return nil, err + } + + chanMachine, err := lnwallet.NewLightningChannel( + a.c.cfg.Signer, channel, nil, + ) + if err != nil { + return nil, err + } + + return chanMachine.NewAnchorResolutions() +} + // ForceCloseChan should force close the contract that this attendant is // watching over. We'll use this when we decide that we need to go to chain. It // should in addition tell the switch to remove the corresponding link, such diff --git a/contractcourt/channel_arbitrator.go b/contractcourt/channel_arbitrator.go index 313e8a53..f0cc1f31 100644 --- a/contractcourt/channel_arbitrator.go +++ b/contractcourt/channel_arbitrator.go @@ -13,9 +13,11 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/sweep" ) var ( @@ -74,6 +76,10 @@ type ArbChannel interface { // returned summary contains all items needed to eventually resolve all // outputs on chain. ForceCloseChan() (*lnwallet.LocalForceCloseSummary, error) + + // NewAnchorResolutions returns the anchor resolutions for currently + // valid commitment transactions. + NewAnchorResolutions() ([]*lnwallet.AnchorResolution, error) } // ChannelArbitratorConfig contains all the functionality that the @@ -847,9 +853,31 @@ func (c *ChannelArbitrator) stateStep( // to be confirmed. case StateCommitmentBroadcasted: switch trigger { - // We are waiting for a commitment to be confirmed, so any - // other trigger will be ignored. + + // We are waiting for a commitment to be confirmed. case chainTrigger, userTrigger: + // The commitment transaction has been broadcast, but it + // doesn't necessarily need to be the commitment + // transaction version that is going to be confirmed. To + // be sure that any of those versions can be anchored + // down, we now submit all anchor resolutions to the + // sweeper. The sweeper will keep trying to sweep all of + // them. + // + // Note that the sweeper is idempotent. If we ever + // happen to end up at this point in the code again, no + // harm is done by re-offering the anchors to the + // sweeper. + anchors, err := c.cfg.Channel.NewAnchorResolutions() + if err != nil { + return StateError, closeTx, err + } + + err = c.sweepAnchors(anchors, triggerHeight) + if err != nil { + return StateError, closeTx, err + } + nextState = StateCommitmentBroadcasted // If this state advance was triggered by any of the @@ -979,6 +1007,54 @@ func (c *ChannelArbitrator) stateStep( return nextState, closeTx, nil } +// sweepAnchors offers all given anchor resolutions to the sweeper. It requests +// sweeping at the minimum fee rate. This fee rate can be upped manually by the +// user via the BumpFee rpc. +func (c *ChannelArbitrator) sweepAnchors(anchors []*lnwallet.AnchorResolution, + heightHint uint32) error { + + // Use the chan id as the exclusive group. This prevents any of the + // anchors from being batched together. + exclusiveGroup := c.cfg.ShortChanID.ToUint64() + + // Retrieve the current minimum fee rate from the sweeper. + minFeeRate := c.cfg.Sweeper.RelayFeePerKW() + + for _, anchor := range anchors { + log.Debugf("ChannelArbitrator(%v): pre-confirmation sweep of "+ + "anchor of tx %v", c.cfg.ChanPoint, anchor.CommitAnchor) + + // Prepare anchor output for sweeping. + anchorInput := input.MakeBaseInput( + &anchor.CommitAnchor, + input.CommitmentAnchor, + &anchor.AnchorSignDescriptor, + heightHint, + ) + + // Sweep anchor output with the minimum fee rate. This usually + // (up to a min relay fee of 3 sat/b) means that the anchor + // sweep will be economical. Also signal that this is a force + // sweep. If the user decides to bump the fee on the anchor + // sweep, it will be swept even if it isn't economical. + _, err := c.cfg.Sweeper.SweepInput( + &anchorInput, + sweep.Params{ + Fee: sweep.FeePreference{ + FeeRate: minFeeRate, + }, + Force: true, + ExclusiveGroup: &exclusiveGroup, + }, + ) + if err != nil { + return err + } + } + + return nil +} + // launchResolvers updates the activeResolvers list and starts the resolvers. func (c *ChannelArbitrator) launchResolvers(resolvers []ContractResolver) { c.activeResolversLock.Lock() diff --git a/contractcourt/channel_arbitrator_test.go b/contractcourt/channel_arbitrator_test.go index 46316d0c..b78f3c31 100644 --- a/contractcourt/channel_arbitrator_test.go +++ b/contractcourt/channel_arbitrator_test.go @@ -197,6 +197,8 @@ type chanArbTestCtx struct { resolutions chan []ResolutionMsg log ArbitratorLog + + sweeper *mockSweeper } func (c *chanArbTestCtx) CleanUp() { @@ -314,6 +316,7 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog, incubateChan := make(chan struct{}) chainIO := &mockChainIO{} + mockSweeper := newMockSweeper() chainArbCfg := ChainArbitratorConfig{ ChainIO: chainIO, PublishTx: func(*wire.MsgTx) error { @@ -343,7 +346,8 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog, return true }, - Clock: clock.NewDefaultClock(), + Clock: clock.NewDefaultClock(), + Sweeper: mockSweeper, } // We'll use the resolvedChan to synchronize on call to @@ -420,6 +424,7 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog, blockEpochs: blockEpochs, log: log, incubationRequests: incubateChan, + sweeper: mockSweeper, }, nil } @@ -2083,7 +2088,92 @@ func TestRemoteCloseInitiator(t *testing.T) { } } -type mockChannel struct{} +// TestChannelArbitratorAnchors asserts that the commitment tx anchor is swept. +func TestChannelArbitratorAnchors(t *testing.T) { + log := &mockArbitratorLog{ + state: StateDefault, + newStates: make(chan ArbitratorState, 5), + } + + chanArbCtx, err := createTestChannelArbitrator(t, log) + if err != nil { + t.Fatalf("unable to create ChannelArbitrator: %v", err) + } + chanArb := chanArbCtx.chanArb + chanArb.cfg.PreimageDB = newMockWitnessBeacon() + chanArb.cfg.Registry = &mockRegistry{} + + // Setup two pre-confirmation anchor resolutions on the mock channel. + chanArb.cfg.Channel.(*mockChannel).anchorResolutions = + []*lnwallet.AnchorResolution{ + {}, {}, + } + + if err := chanArb.Start(); err != nil { + t.Fatalf("unable to start ChannelArbitrator: %v", err) + } + defer func() { + if err := chanArb.Stop(); err != nil { + t.Fatal(err) + } + }() + + // Create htlcUpdates channel. + htlcUpdates := make(chan *ContractUpdate) + + signals := &ContractSignals{ + HtlcUpdates: htlcUpdates, + ShortChanID: lnwire.ShortChannelID{}, + } + chanArb.UpdateContractSignals(signals) + + errChan := make(chan error, 1) + respChan := make(chan *wire.MsgTx, 1) + + // With the channel found, and the request crafted, we'll send over a + // force close request to the arbitrator that watches this channel. + chanArb.forceCloseReqs <- &forceCloseReq{ + errResp: errChan, + closeTx: respChan, + } + + // The force close request should trigger broadcast of the commitment + // transaction. + chanArbCtx.AssertStateTransitions( + StateBroadcastCommit, + StateCommitmentBroadcasted, + ) + + // With the commitment tx still unconfirmed, we expect sweep attempts + // for all three versions of the commitment transaction. + <-chanArbCtx.sweeper.sweptInputs + <-chanArbCtx.sweeper.sweptInputs + + select { + case <-respChan: + case <-time.After(5 * time.Second): + t.Fatalf("no response received") + } + + select { + case err := <-errChan: + if err != nil { + t.Fatalf("error force closing channel: %v", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("no response received") + } +} + +type mockChannel struct { + anchorResolutions []*lnwallet.AnchorResolution +} + +func (m *mockChannel) NewAnchorResolutions() ([]*lnwallet.AnchorResolution, + error) { + + return m.anchorResolutions, nil +} func (m *mockChannel) ForceCloseChan() (*lnwallet.LocalForceCloseSummary, error) { summary := &lnwallet.LocalForceCloseSummary{ diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index cc344372..d84ed114 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -8604,13 +8604,19 @@ func assertNumPendingChannels(t *harnessTest, node *lntest.HarnessNode, // on chain as he has no funds in the channel. func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, carol *lntest.HarnessNode, carolStartingBalance int64, - dave *lntest.HarnessNode, daveStartingBalance int64) { + dave *lntest.HarnessNode, daveStartingBalance int64, + anchors bool) { // Upon reconnection, the nodes should detect that Dave is out of sync. // Carol should force close the channel using her latest commitment. + expectedTxes := 1 + if anchors { + expectedTxes = 2 + } + ctxb := context.Background() - forceClose, err := waitForTxInMempool( - net.Miner.Node, minerMempoolTimeout, + _, err := waitForNTxsInMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, ) if err != nil { t.Fatalf("unable to find Carol's force close tx in mempool: %v", @@ -8633,12 +8639,13 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, } // Generate a single block, which should confirm the closing tx. - block := mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, forceClose) + block := mineBlocks(t, net, 1, expectedTxes)[0] // Dave should sweep his funds immediately, as they are not timelocked. - daveSweep, err := waitForTxInMempool( - net.Miner.Node, minerMempoolTimeout, + // We also expect Dave to sweep his anchor, if present. + + _, err = waitForNTxsInMempool( + net.Miner.Node, expectedTxes, minerMempoolTimeout, ) if err != nil { t.Fatalf("unable to find Dave's sweep tx in mempool: %v", err) @@ -8653,8 +8660,7 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, assertNumPendingChannels(t, carol, 0, 1) // Mine the sweep tx. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, daveSweep) + block = mineBlocks(t, net, 1, expectedTxes)[0] // Now Dave should consider the channel fully closed. assertNumPendingChannels(t, dave, 0, 0) @@ -8925,6 +8931,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { // on chain, and both of them properly carry out the DLP protocol. assertDLPExecuted( net, t, carol, carolStartingBalance, dave, daveStartingBalance, + false, ) // As a second part of this test, we will test the scenario where a @@ -14048,9 +14055,14 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, } defer shutdownAndAssert(net, t, carol) - // Now that our new node is created, we'll give him some coins it can - // use to open channels with Carol. + // Now that our new nodes are created, we'll give them some coins for + // channel opening and anchor sweeping. ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, carol) + if err != nil { + t.Fatalf("unable to send coins to dave: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, dave) if err != nil { t.Fatalf("unable to send coins to dave: %v", err) @@ -14216,6 +14228,7 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, // end of the protocol. assertDLPExecuted( net, t, carol, carolStartingBalance, dave, daveStartingBalance, + testCase.anchorCommit, ) }