diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index 38ed88d3..c183788f 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -21,6 +21,7 @@ import ( type LocalUnilateralCloseInfo struct { *chainntnfs.SpendDetail *lnwallet.LocalForceCloseSummary + *channeldb.ChannelCloseSummary } // CooperativeCloseInfo encapsulates all the informnation we need to act @@ -560,8 +561,7 @@ func (c *chainWatcher) dispatchLocalForceClose( } // As we've detected that the channel has been closed, immediately - // delete the state from disk, creating a close summary for future - // usage by related sub-systems. + // creating a close summary for future usage by related sub-systems. chanSnapshot := forceClose.ChanSnapshot closeSummary := &channeldb.ChannelCloseSummary{ ChanPoint: chanSnapshot.ChannelPoint, @@ -589,14 +589,12 @@ func (c *chainWatcher) dispatchLocalForceClose( htlcValue := btcutil.Amount(htlc.SweepSignDesc.Output.Value) closeSummary.TimeLockedBalance += htlcValue } - err = c.cfg.chanState.CloseChannel(closeSummary) - if err != nil { - return fmt.Errorf("unable to delete channel state: %v", err) - } // With the event processed, we'll now notify all subscribers of the // event. - closeInfo := &LocalUnilateralCloseInfo{commitSpend, forceClose} + closeInfo := &LocalUnilateralCloseInfo{ + commitSpend, forceClose, closeSummary, + } c.Lock() for _, sub := range c.clientSubscriptions { select { @@ -643,22 +641,10 @@ func (c *chainWatcher) dispatchRemoteForceClose( return err } - // As we've detected that the channel has been closed, immediately - // delete the state from disk, creating a close summary for future - // usage by related sub-systems. - err = c.cfg.chanState.CloseChannel(&uniClose.ChannelCloseSummary) - if err != nil { - return fmt.Errorf("unable to delete channel state: %v", err) - } - // With the event processed, we'll now notify all subscribers of the // event. c.Lock() for _, sub := range c.clientSubscriptions { - // TODO(roasbeef): send msg before writing to disk - // * need to ensure proper fault tolerance in all cases - // * get ACK from the consumer of the ntfn before writing to disk? - // * no harm in repeated ntfns: at least once semantics select { case sub.RemoteUnilateralClosure <- uniClose: case <-c.quit: @@ -745,6 +731,7 @@ func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail // channel as pending force closed. // // TODO(roasbeef): instead mark we got all the monies? + // TODO(halseth): move responsibility to breach arbiter? settledBalance := remoteCommit.LocalBalance.ToSatoshis() closeSummary := channeldb.ChannelCloseSummary{ ChanPoint: c.cfg.chanState.FundingOutpoint, diff --git a/contractcourt/channel_arbitrator.go b/contractcourt/channel_arbitrator.go index 4ff38282..9cd2fee2 100644 --- a/contractcourt/channel_arbitrator.go +++ b/contractcourt/channel_arbitrator.go @@ -1438,6 +1438,25 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) { return } + // After the set of resolutions are successfully + // logged, we can safely close the channel. After this + // succeeds we won't be getting chain events anymore, + // so we must make sure we can recover on restart after + // it is marked closed. If the next state transation + // fails, we'll start up in the prior state again, and + // we won't be longer getting chain events. In this + // case we must manually re-trigger the state + // transition into StateContractClosed based on the + // close status of the channel. + err = c.cfg.MarkChannelClosed( + closeInfo.ChannelCloseSummary, + ) + if err != nil { + log.Errorf("unable to mark "+ + "channel closed: %v", err) + return + } + // We'll now advance our state machine until it reaches // a terminal state. _, _, err = c.advanceState( @@ -1482,6 +1501,24 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) { return } + // After the set of resolutions are successfully + // logged, we can safely close the channel. After this + // succeeds we won't be getting chain events anymore, + // so we must make sure we can recover on restart after + // it is marked closed. If the next state transation + // fails, we'll start up in the prior state again, and + // we won't be longer getting chain events. In this + // case we must manually re-trigger the state + // transition into StateContractClosed based on the + // close status of the channel. + closeSummary := &uniClosure.ChannelCloseSummary + err = c.cfg.MarkChannelClosed(closeSummary) + if err != nil { + log.Errorf("unable to mark channel closed: %v", + err) + return + } + // We'll now advance our state machine until it reaches // a terminal state. _, _, err = c.advanceState( diff --git a/contractcourt/channel_arbitrator_test.go b/contractcourt/channel_arbitrator_test.go index 36d393d0..39dfa3e5 100644 --- a/contractcourt/channel_arbitrator_test.go +++ b/contractcourt/channel_arbitrator_test.go @@ -367,6 +367,7 @@ func TestChannelArbitratorLocalForceClose(t *testing.T) { CloseTx: &wire.MsgTx{}, HtlcResolutions: &lnwallet.HtlcResolutions{}, }, + &channeldb.ChannelCloseSummary{}, } // It should transition StateContractClosed -> StateFullyResolved.