contractcourt: write confirmed CommitSet to disk before MarkChannelClosed

In this commit, we make a series of changes to ensure that we'll be able
to properly survive restarts if we crash right after we call
MarkChannelClosed. In order to ensure we can survive restarts, we'll now
long the confirmed CommitSet to disk right before we close the channel.
Upon restart, we'll read these from disk so we can pick up where we left
over.

Additionally, we also will now consult the legacy chain actions if it
turns out that the channel has been closed, but we don't have a
confCommitSet written to disk. This will only be the case for nodes that
had pending close channels before this commitment.
This commit is contained in:
Olaoluwa Osuntokun 2019-05-23 20:27:04 -07:00
parent 364c0dd9f1
commit 2011ccc571
No known key found for this signature in database
GPG Key ID: CE58F7F8E20FD9A2
3 changed files with 96 additions and 35 deletions

@ -463,7 +463,7 @@ func (c *ChainArbitrator) Start() error {
// We can also leave off the set of HTLC's here as since the
// channel is already in the process of being full resolved, no
// new HTLC's we be added.
// new HTLC's will be added.
c.activeChannels[chanPoint] = NewChannelArbitrator(
arbCfg, nil, chanLog,
)

@ -3,6 +3,7 @@ package contractcourt
import (
"bytes"
"errors"
"fmt"
"sync"
"sync/atomic"
@ -370,11 +371,21 @@ func (c *ChannelArbitrator) Start() error {
}
}
// Next we'll fetch our confirmed commitment set. This will only exist
// if the channel has been closed out on chain for modern nodes. For
// older nodes, this won't be found at all, and will rely on the
// existing written chain actions. Additionally, if this channel hasn't
// logged any actions in the log, then this field won't be present.
commitSet, err := c.log.FetchConfirmedCommitSet()
if err != nil && err != errNoCommitSet && err != errScopeBucketNoExist {
return err
}
// We'll now attempt to advance our state forward based on the current
// on-chain state, and our set of active contracts.
startingState := c.state
nextState, _, err := c.advanceState(
triggerHeight, trigger, nil,
triggerHeight, trigger, commitSet,
)
if err != nil {
switch err {
@ -1512,6 +1523,52 @@ func (c *ChannelArbitrator) checkRemoteDiffActions(height uint32,
return actionMap
}
// constructChainActions returns the set of actions that should be taken for
// confirmed HTLCs at the specified height. Our actions will depend on the set
// of HTLCs that were active across all channels at the time of channel
// closure.
func (c *ChannelArbitrator) constructChainActions(confCommitSet *CommitSet,
height uint32, trigger transitionTrigger) (ChainActionMap, error) {
// If we've reached this point and have not confirmed commitment set,
// then this is an older node that had a pending close channel before
// the CommitSet was introduced. In this case, we'll just return the
// existing ChainActionMap they had on disk.
if confCommitSet == nil {
return c.log.FetchChainActions()
}
// Otherwise we have the full commitment set written to disk, and can
// proceed as normal.
htlcSets := confCommitSet.toActiveHTLCSets()
switch *confCommitSet.ConfCommitKey {
// If the local commitment transaction confirmed, then we'll examine
// that as well as their commitments to the set of chain actions.
case LocalHtlcSet:
return c.checkLocalChainActions(
height, trigger, htlcSets, true,
)
// If the remote commitment confirmed, then we'll grab all the chain
// actions for the remote commit, and check the pending commit for any
// HTLCS we need to handle immediately (dust).
case RemoteHtlcSet:
return c.checkRemoteChainActions(
height, trigger, htlcSets, false,
)
// Otherwise, the remote pending commitment confirmed, so we'll examine
// the HTLCs on that unrevoked dangling commitment.
case RemotePendingHtlcSet:
return c.checkRemoteChainActions(
height, trigger, htlcSets, true,
)
}
return nil, fmt.Errorf("unable to locate chain actions")
}
// prepContractResolutions is called either int he case that we decide we need
// to go to chain, or the remote party goes to chain. Given a set of actions we
// need to take for each HTLC, this method will return a set of contract
@ -1523,39 +1580,12 @@ func (c *ChannelArbitrator) prepContractResolutions(
trigger transitionTrigger,
confCommitSet *CommitSet) ([]ContractResolver, []ResolutionMsg, error) {
htlcSets := confCommitSet.toActiveHTLCSets()
// First, we'll reconstruct a fresh set of chain actions as the set of
// actions we need to act on may differ based on if it was our
// commitment, or they're commitment that hit the chain.
var (
htlcActions ChainActionMap
err error
htlcActions, err := c.constructChainActions(
confCommitSet, height, trigger,
)
switch *confCommitSet.ConfCommitKey {
// If the local commitment transaction confirmed, then we'll examine
// that as well as their commitments to the set of chain actions.
case LocalHtlcSet:
htlcActions, err = c.checkLocalChainActions(
height, trigger, htlcSets, true,
)
// If the remote commitment confirmed, then we'll grab all the chain
// actions for the remote commit, and check the pending commit for any
// HTLCS we need to handle immediately (dust).
case RemoteHtlcSet:
htlcActions, err = c.checkRemoteChainActions(
height, trigger, htlcSets, false,
)
// Otherwise, the remote pending commitment confirmed, so we'll examine
// the HTLCs on that unrevoked dangling commitment.
case RemotePendingHtlcSet:
htlcActions, err = c.checkRemoteChainActions(
height, trigger, htlcSets, true,
)
}
if err != nil {
return nil, nil, err
}
@ -2047,13 +2077,22 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
// When processing a unilateral close event, we'll
// transition to the ContractClosed state. We'll log
// out the set of resolutions such that they are
// available to fetch in that state.
// available to fetch in that state, we'll also write
// the commit set so we can reconstruct our chain
// actions on restart.
err := c.log.LogContractResolutions(contractRes)
if err != nil {
log.Errorf("unable to write resolutions: %v",
err)
return
}
err = c.log.InsertConfirmedCommitSet(
&closeInfo.CommitSet,
)
if err != nil {
log.Errorf("unable to write commit set: %v", err)
return
}
// After the set of resolutions are successfully
// logged, we can safely close the channel. After this
@ -2103,13 +2142,22 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
// When processing a unilateral close event, we'll
// transition to the ContractClosed state. We'll log
// out the set of resolutions such that they are
// available to fetch in that state.
// available to fetch in that state, we'll also write
// the commit set so we can reconstruct our chain
// actions on restart.
err := c.log.LogContractResolutions(contractRes)
if err != nil {
log.Errorf("unable to write resolutions: %v",
err)
return
}
err = c.log.InsertConfirmedCommitSet(
&uniClosure.CommitSet,
)
if err != nil {
log.Errorf("unable to write commit set: %v", err)
return
}
// After the set of resolutions are successfully
// logged, we can safely close the channel. After this

@ -26,6 +26,8 @@ type mockArbitratorLog struct {
chainActions ChainActionMap
resolvers map[ContractResolver]struct{}
commitSet *CommitSet
sync.Mutex
}
@ -108,6 +110,19 @@ func (b *mockArbitratorLog) FetchContractResolutions() (*ContractResolutions, er
return b.resolutions, nil
}
func (b *mockArbitratorLog) FetchChainActions() (ChainActionMap, error) {
return nil, nil
}
func (b *mockArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) error {
b.commitSet = c
return nil
}
func (b *mockArbitratorLog) FetchConfirmedCommitSet() (*CommitSet, error) {
return b.commitSet, nil
}
func (b *mockArbitratorLog) WipeHistory() error {
return nil
}
@ -1290,8 +1305,6 @@ func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) {
}
}
fmt.Println(len(testCases))
for _, testCase := range testCases {
testCase := testCase
testName := fmt.Sprintf("testCase: htlcExpired=%v,"+