contractcourt: ignore all other dispatch cases in closeObserver when recovered chan

In this commit, we modify the `closeObserver` to fast path the DLP
dispatch case if we detect that the channel has been restored. We do
this as otherwise, we may inadvertently enter one of the other cases
erroneously, causing us to now properly look up their dlp commitment
point.
This commit is contained in:
Olaoluwa Osuntokun 2019-03-10 17:47:06 -07:00
parent c722f2c064
commit 26f6fd7db2
No known key found for this signature in database
GPG Key ID: CE58F7F8E20FD9A2
2 changed files with 22 additions and 21 deletions

View File

@ -386,12 +386,12 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
}
switch {
// If state number spending transaction matches the
// current latest state, then they've initiated a
// unilateral close. So we'll trigger the unilateral
// close signal so subscribers can clean up the state
// as necessary.
case broadcastStateNum == remoteStateNum:
// If state number spending transaction matches the current
// latest state, then they've initiated a unilateral close. So
// we'll trigger the unilateral close signal so subscribers can
// clean up the state as necessary.
case broadcastStateNum == remoteStateNum && !isRecoveredChan:
err := c.dispatchRemoteForceClose(
commitSpend, *remoteCommit,
c.cfg.chanState.RemoteCurrentRevocation,
@ -402,14 +402,13 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
c.cfg.chanState.FundingOutpoint, err)
}
// We'll also handle the case of the remote party
// broadcasting their commitment transaction which is
// one height above ours. This case can arise when we
// initiate a state transition, but the remote party
// has a fail crash _after_ accepting the new state,
// but _before_ sending their signature to us.
// We'll also handle the case of the remote party broadcasting
// their commitment transaction which is one height above ours.
// This case can arise when we initiate a state transition, but
// the remote party has a fail crash _after_ accepting the new
// state, but _before_ sending their signature to us.
case broadcastStateNum == remoteStateNum+1 &&
remoteChainTip != nil:
remoteChainTip != nil && !isRecoveredChan:
err := c.dispatchRemoteForceClose(
commitSpend, remoteChainTip.Commitment,
@ -425,8 +424,12 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
// known state for them, and they don't have a pending
// commitment (we write them to disk before sending out), then
// this means that we've lost data. In this case, we'll enter
// the DLP protocol.
case broadcastStateNum > remoteStateNum:
// the DLP protocol. Otherwise, if we've recovered our channel
// state from scratch, then we don't know what the precise
// current state is, so we assume either the remote party
// forced closed or we've been breached. In the latter case,
// our tower will take care of us.
case broadcastStateNum > remoteStateNum || isRecoveredChan:
log.Warnf("Remote node broadcast state #%v, "+
"which is more than 1 beyond best known "+
"state #%v!!! Attempting recovery...",

View File

@ -2436,8 +2436,6 @@ func (r *rpcServer) ListChannels(ctx context.Context,
resp := &lnrpc.ListChannelsResponse{}
// TODO(roasbeef): expose chan status flags as well
graph := r.server.chanDB.ChannelGraph()
dbChannels, err := r.server.chanDB.FetchAllOpenChannels()
@ -4686,17 +4684,17 @@ func (r *rpcServer) ForwardingHistory(ctx context.Context,
// once lnd is running, or via the InitWallet and UnlockWallet methods from the
// WalletUnlocker service.
func (r *rpcServer) ExportChannelBackup(ctx context.Context,
in *lnrpc.ChannelPoint) (*lnrpc.ChannelBackup, error) {
in *lnrpc.ExportChannelBackupRequest) (*lnrpc.ChannelBackup, error) {
// First, we'll convert the lnrpc channel point into a wire.OutPoint
// that we can manipulate.
txid, err := getChanPointFundingTxid(in)
txid, err := getChanPointFundingTxid(in.ChanPoint)
if err != nil {
return nil, err
}
chanPoint := wire.OutPoint{
Hash: *txid,
Index: in.OutputIndex,
Index: in.ChanPoint.OutputIndex,
}
// Next, we'll attempt to fetch a channel backup for this channel from
@ -4730,7 +4728,7 @@ func (r *rpcServer) ExportChannelBackup(ctx context.Context,
}
return &lnrpc.ChannelBackup{
ChanPoint: in,
ChanPoint: in.ChanPoint,
ChanBackup: packedBackup,
}, nil
}