contractcourt: move responsibility for closing force closes to chanArb

This commit moves the responsibility for closing local and remote force
closes in the database from the chain watcher to the channel arbitrator.

We do this because we previously would close the channel in the
database, before sending the event to the channel arbitrator. This could
lead to a situation where the channel was marked closed, but the channel
arbitrator didn't receive the event before shutdown. As we don't listen
for chain events for channels that are closed, those channels would be
stuck in the pending close state forever, as the channel arbitrator
state machine wouldn't progress.

We fix this by letting the ChannelArbitrator close the channel in the
database. After the contract resolutions are logged (in the state
callback before transitioning to StateContractClosed) we mark the
channel closed in the database. This way we make sure that it is marked
closed only if the resolutions have been successfully persisted.
This commit is contained in:
Johan T. Halseth 2018-08-21 12:21:15 +02:00
parent c11e0940c2
commit 75df58c68b
No known key found for this signature in database
GPG Key ID: 15BAADA29DA20D26
3 changed files with 44 additions and 19 deletions

@ -21,6 +21,7 @@ import (
type LocalUnilateralCloseInfo struct {
*chainntnfs.SpendDetail
*lnwallet.LocalForceCloseSummary
*channeldb.ChannelCloseSummary
}
// CooperativeCloseInfo encapsulates all the informnation we need to act
@ -560,8 +561,7 @@ func (c *chainWatcher) dispatchLocalForceClose(
}
// As we've detected that the channel has been closed, immediately
// delete the state from disk, creating a close summary for future
// usage by related sub-systems.
// creating a close summary for future usage by related sub-systems.
chanSnapshot := forceClose.ChanSnapshot
closeSummary := &channeldb.ChannelCloseSummary{
ChanPoint: chanSnapshot.ChannelPoint,
@ -589,14 +589,12 @@ func (c *chainWatcher) dispatchLocalForceClose(
htlcValue := btcutil.Amount(htlc.SweepSignDesc.Output.Value)
closeSummary.TimeLockedBalance += htlcValue
}
err = c.cfg.chanState.CloseChannel(closeSummary)
if err != nil {
return fmt.Errorf("unable to delete channel state: %v", err)
}
// With the event processed, we'll now notify all subscribers of the
// event.
closeInfo := &LocalUnilateralCloseInfo{commitSpend, forceClose}
closeInfo := &LocalUnilateralCloseInfo{
commitSpend, forceClose, closeSummary,
}
c.Lock()
for _, sub := range c.clientSubscriptions {
select {
@ -643,22 +641,10 @@ func (c *chainWatcher) dispatchRemoteForceClose(
return err
}
// As we've detected that the channel has been closed, immediately
// delete the state from disk, creating a close summary for future
// usage by related sub-systems.
err = c.cfg.chanState.CloseChannel(&uniClose.ChannelCloseSummary)
if err != nil {
return fmt.Errorf("unable to delete channel state: %v", err)
}
// With the event processed, we'll now notify all subscribers of the
// event.
c.Lock()
for _, sub := range c.clientSubscriptions {
// TODO(roasbeef): send msg before writing to disk
// * need to ensure proper fault tolerance in all cases
// * get ACK from the consumer of the ntfn before writing to disk?
// * no harm in repeated ntfns: at least once semantics
select {
case sub.RemoteUnilateralClosure <- uniClose:
case <-c.quit:
@ -745,6 +731,7 @@ func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail
// channel as pending force closed.
//
// TODO(roasbeef): instead mark we got all the monies?
// TODO(halseth): move responsibility to breach arbiter?
settledBalance := remoteCommit.LocalBalance.ToSatoshis()
closeSummary := channeldb.ChannelCloseSummary{
ChanPoint: c.cfg.chanState.FundingOutpoint,

@ -1438,6 +1438,25 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
return
}
// After the set of resolutions are successfully
// logged, we can safely close the channel. After this
// succeeds we won't be getting chain events anymore,
// so we must make sure we can recover on restart after
// it is marked closed. If the next state transation
// fails, we'll start up in the prior state again, and
// we won't be longer getting chain events. In this
// case we must manually re-trigger the state
// transition into StateContractClosed based on the
// close status of the channel.
err = c.cfg.MarkChannelClosed(
closeInfo.ChannelCloseSummary,
)
if err != nil {
log.Errorf("unable to mark "+
"channel closed: %v", err)
return
}
// We'll now advance our state machine until it reaches
// a terminal state.
_, _, err = c.advanceState(
@ -1482,6 +1501,24 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
return
}
// After the set of resolutions are successfully
// logged, we can safely close the channel. After this
// succeeds we won't be getting chain events anymore,
// so we must make sure we can recover on restart after
// it is marked closed. If the next state transation
// fails, we'll start up in the prior state again, and
// we won't be longer getting chain events. In this
// case we must manually re-trigger the state
// transition into StateContractClosed based on the
// close status of the channel.
closeSummary := &uniClosure.ChannelCloseSummary
err = c.cfg.MarkChannelClosed(closeSummary)
if err != nil {
log.Errorf("unable to mark channel closed: %v",
err)
return
}
// We'll now advance our state machine until it reaches
// a terminal state.
_, _, err = c.advanceState(

@ -367,6 +367,7 @@ func TestChannelArbitratorLocalForceClose(t *testing.T) {
CloseTx: &wire.MsgTx{},
HtlcResolutions: &lnwallet.HtlcResolutions{},
},
&channeldb.ChannelCloseSummary{},
}
// It should transition StateContractClosed -> StateFullyResolved.