From f052f18312b0a2a597791db6b7cf195c7d33ed55 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Thu, 12 Apr 2018 18:47:10 -0700 Subject: [PATCH 1/2] test: extend closeChannelAndAssert to also check that channel is no longer pending close In this commit, we extend the closeChannelAndAssert testing utility function to ensure that the channel is no longer marked as "pending close" in the database. With this change, we hop to catch a recently reported issue wherein users report that a co-op close channel has been fully confirmed, yet it still pops up in the `pendingchannels` command. --- lnd_test.go | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/lnd_test.go b/lnd_test.go index dd1c5878..285ccd10 100644 --- a/lnd_test.go +++ b/lnd_test.go @@ -234,7 +234,7 @@ func closeChannelAndAssert(ctx context.Context, t *harnessTest, } } - // Finally, generate a single block, wait for the final close status + // We'll now, generate a single block, wait for the final close status // update, then ensure that the closing transaction was included in the // block. block := mineBlocks(t, net, 1)[0] @@ -246,6 +246,32 @@ func closeChannelAndAssert(ctx context.Context, t *harnessTest, assertTxInBlock(t, block, closingTxid) + // Finally, the transaction should no longer be in the pending close + // state as we've just mined a block that should include the closing + // transaction. This only applies for co-op close channels though. + if !force { + err = lntest.WaitPredicate(func() bool { + pendingChansRequest := &lnrpc.PendingChannelsRequest{} + pendingChanResp, err := node.PendingChannels( + ctx, pendingChansRequest, + ) + if err != nil { + return false + } + + for _, pendingClose := range pendingChanResp.PendingClosingChannels { + if pendingClose.Channel.ChannelPoint == chanPointStr { + return false + } + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("closing transaction not marked as fully closed") + } + } + return closingTxid } From aaa8fa33b1ec43f5b993c986826d5eb3e1790661 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Thu, 12 Apr 2018 18:53:22 -0700 Subject: [PATCH 2/2] contractcourt: when creating resolveContract closure don't bind to loop variable In this commit, we fix a long standing bug where at times a co-op channel closure wouldn't be properly marked as fully closed in the database. The culprit was a re-occurring code flaw we've seen many times in the codebase: a closure variable that closes over a loop iterator variable. Before this instance, I assumed that this could only pop up when goroutines bind to the loop iterator within a closure. However, this instance is the exact same issue, but within a regular closure that has _delayed_ execution. As the closure doesn't execute until long after the loop has finished executing, it may still be holding onto the _last_ item the loop iterator variable was assigned to. The fix for this issue is very simple: re-assign the channel point before creating the closure. Without this fix, we would go to call db.MarkChanFullyClosed on a channel that may not have yet actually be in the pending close state, causing all executions to fail. Fixes #1054. Fixes #1056. Fixes #1075. --- contractcourt/chain_arbitrator.go | 9 ++++++--- contractcourt/chain_watcher.go | 7 +++++-- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/contractcourt/chain_arbitrator.go b/contractcourt/chain_arbitrator.go index 9acfb4d0..a5c621bc 100644 --- a/contractcourt/chain_arbitrator.go +++ b/contractcourt/chain_arbitrator.go @@ -327,19 +327,22 @@ func (c *ChainArbitrator) Start() error { // For each open channel, we'll configure then launch a corresponding // ChannelArbitrator. for _, channel := range openChannels { + chanPoint := channel.FundingOutpoint + // First, we'll create an active chainWatcher for this channel // to ensure that we detect any relevant on chain events. chainWatcher, err := newChainWatcher( channel, c.cfg.Notifier, c.cfg.PreimageDB, c.cfg.Signer, c.cfg.IsOurAddress, func() error { - return c.resolveContract(channel.FundingOutpoint, nil) + // TODO(roasbeef): also need to pass in log? + return c.resolveContract(chanPoint, nil) }, ) if err != nil { return err } - c.activeWatchers[channel.FundingOutpoint] = chainWatcher + c.activeWatchers[chanPoint] = chainWatcher channelArb, err := newActiveChannelArbitrator( channel, c, chainWatcher.SubscribeChannelEvents(false), ) @@ -347,7 +350,7 @@ func (c *ChainArbitrator) Start() error { return err } - c.activeChannels[channel.FundingOutpoint] = channelArb + c.activeChannels[chanPoint] = channelArb } // In addition to the channels that we know to be open, we'll also diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index 3f403fb8..d974132a 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -782,11 +782,14 @@ func (c *CooperativeCloseCtx) LogPotentialClose(potentialClose *channeldb.Channe // pending closed in the database, then launch a goroutine to mark the channel // fully closed upon confirmation. func (c *CooperativeCloseCtx) Finalize(preferredClose *channeldb.ChannelCloseSummary) error { - log.Infof("Finalizing chan close for ChannelPoint(%v)", - c.watcher.chanState.FundingOutpoint) + chanPoint := c.watcher.chanState.FundingOutpoint + + log.Infof("Finalizing chan close for ChannelPoint(%v)", chanPoint) err := c.watcher.chanState.CloseChannel(preferredClose) if err != nil { + log.Errorf("closeCtx: unable to close ChannelPoint(%v): %v", + chanPoint, err) return err }