From 18f79e20d51a79ac7e7d7bfd748a1afc0aad258b Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 18 Nov 2020 22:45:35 +0100 Subject: [PATCH 1/8] lnwallet/test_utils: set up asymmetric commits The remote and local commits are not symmetric, so we correctly set them up pointing to the correct commitment tx. --- lnwallet/test_utils.go | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/lnwallet/test_utils.go b/lnwallet/test_utils.go index 55d8a342..bd048b2c 100644 --- a/lnwallet/test_utils.go +++ b/lnwallet/test_utils.go @@ -262,7 +262,7 @@ func CreateTestChannels(chanType channeldb.ChannelType) ( ) bobBalance := lnwire.NewMSatFromSatoshis(channelBal) - aliceCommit := channeldb.ChannelCommitment{ + aliceLocalCommit := channeldb.ChannelCommitment{ CommitHeight: 0, LocalBalance: aliceBalance, RemoteBalance: bobBalance, @@ -271,7 +271,16 @@ func CreateTestChannels(chanType channeldb.ChannelType) ( CommitTx: aliceCommitTx, CommitSig: testSigBytes, } - bobCommit := channeldb.ChannelCommitment{ + aliceRemoteCommit := channeldb.ChannelCommitment{ + CommitHeight: 0, + LocalBalance: aliceBalance, + RemoteBalance: bobBalance, + CommitFee: commitFee, + FeePerKw: btcutil.Amount(feePerKw), + CommitTx: bobCommitTx, + CommitSig: testSigBytes, + } + bobLocalCommit := channeldb.ChannelCommitment{ CommitHeight: 0, LocalBalance: bobBalance, RemoteBalance: aliceBalance, @@ -280,6 +289,15 @@ func CreateTestChannels(chanType channeldb.ChannelType) ( CommitTx: bobCommitTx, CommitSig: testSigBytes, } + bobRemoteCommit := channeldb.ChannelCommitment{ + CommitHeight: 0, + LocalBalance: bobBalance, + RemoteBalance: aliceBalance, + CommitFee: commitFee, + FeePerKw: btcutil.Amount(feePerKw), + CommitTx: aliceCommitTx, + CommitSig: testSigBytes, + } var chanIDBytes [8]byte if _, err := io.ReadFull(rand.Reader, chanIDBytes[:]); err != nil { @@ -302,8 +320,8 @@ func CreateTestChannels(chanType channeldb.ChannelType) ( RemoteCurrentRevocation: bobCommitPoint, RevocationProducer: alicePreimageProducer, RevocationStore: shachain.NewRevocationStore(), - LocalCommitment: aliceCommit, - RemoteCommitment: aliceCommit, + LocalCommitment: aliceLocalCommit, + RemoteCommitment: aliceRemoteCommit, Db: dbAlice, Packager: channeldb.NewChannelPackager(shortChanID), FundingTxn: testTx, @@ -320,8 +338,8 @@ func CreateTestChannels(chanType channeldb.ChannelType) ( RemoteCurrentRevocation: aliceCommitPoint, RevocationProducer: bobPreimageProducer, RevocationStore: shachain.NewRevocationStore(), - LocalCommitment: bobCommit, - RemoteCommitment: bobCommit, + LocalCommitment: bobLocalCommit, + RemoteCommitment: bobRemoteCommit, Db: dbBob, Packager: channeldb.NewChannelPackager(shortChanID), } From 450da3d2f4f346090c4b1be0110fc47a0c9b21c8 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 18 Nov 2020 22:45:35 +0100 Subject: [PATCH 2/8] contractcourt/chain_watcher test: do proper state rollback The tests didn't really roll back the channel state, so we would only rely on the state number to determine whether we had lost state. Now we properly roll back the channel to a previous state, in preparation for upcoming changes. --- contractcourt/chain_watcher_test.go | 108 +++++++++++++++++++--------- contractcourt/utils_test.go | 72 +++++++++++++++++++ 2 files changed, 147 insertions(+), 33 deletions(-) diff --git a/contractcourt/chain_watcher_test.go b/contractcourt/chain_watcher_test.go index 50e73ec5..b6e5d8e1 100644 --- a/contractcourt/chain_watcher_test.go +++ b/contractcourt/chain_watcher_test.go @@ -204,9 +204,32 @@ type dlpTestCase struct { NumUpdates uint8 } +// executeStateTransitions execute the given number of state transitions. +// Copies of Alice's channel state before each transition (including initial +// state) are returned. func executeStateTransitions(t *testing.T, htlcAmount lnwire.MilliSatoshi, aliceChannel, bobChannel *lnwallet.LightningChannel, - numUpdates uint8) error { + numUpdates uint8) ([]*channeldb.OpenChannel, func(), error) { + + // We'll make a copy of the channel state before each transition. + var ( + chanStates []*channeldb.OpenChannel + cleanupFuncs []func() + ) + + cleanAll := func() { + for _, f := range cleanupFuncs { + f() + } + } + + state, f, err := copyChannelState(aliceChannel.State()) + if err != nil { + return nil, nil, err + } + + chanStates = append(chanStates, state) + cleanupFuncs = append(cleanupFuncs, f) for i := 0; i < int(numUpdates); i++ { addFakeHTLC( @@ -215,11 +238,21 @@ func executeStateTransitions(t *testing.T, htlcAmount lnwire.MilliSatoshi, err := lnwallet.ForceStateTransition(aliceChannel, bobChannel) if err != nil { - return err + cleanAll() + return nil, nil, err } + + state, f, err := copyChannelState(aliceChannel.State()) + if err != nil { + cleanAll() + return nil, nil, err + } + + chanStates = append(chanStates, state) + cleanupFuncs = append(cleanupFuncs, f) } - return nil + return chanStates, cleanAll, nil } // TestChainWatcherDataLossProtect tests that if we've lost data (and are @@ -250,6 +283,24 @@ func TestChainWatcherDataLossProtect(t *testing.T) { } defer cleanUp() + // Based on the number of random updates for this state, make a + // new HTLC to add to the commitment, and then lock in a state + // transition. + const htlcAmt = 1000 + states, cleanStates, err := executeStateTransitions( + t, htlcAmt, aliceChannel, bobChannel, + testCase.BroadcastStateNum, + ) + if err != nil { + t.Errorf("unable to trigger state "+ + "transition: %v", err) + return false + } + defer cleanStates() + + // We'll use the state this test case wants Alice to start at. + aliceChanState := states[testCase.NumUpdates] + // With the channels created, we'll now create a chain watcher // instance which will be watching for any closes of Alice's // channel. @@ -259,7 +310,7 @@ func TestChainWatcherDataLossProtect(t *testing.T) { ConfChan: make(chan *chainntnfs.TxConfirmation), } aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ - chanState: aliceChannel.State(), + chanState: aliceChanState, notifier: aliceNotifier, signer: aliceChannel.Signer, extractStateNumHint: func(*wire.MsgTx, @@ -279,19 +330,6 @@ func TestChainWatcherDataLossProtect(t *testing.T) { } defer aliceChainWatcher.Stop() - // Based on the number of random updates for this state, make a - // new HTLC to add to the commitment, and then lock in a state - // transition. - const htlcAmt = 1000 - err = executeStateTransitions( - t, htlcAmt, aliceChannel, bobChannel, testCase.NumUpdates, - ) - if err != nil { - t.Errorf("unable to trigger state "+ - "transition: %v", err) - return false - } - // We'll request a new channel event subscription from Alice's // chain watcher so we can be notified of our fake close below. chanEvents := aliceChainWatcher.SubscribeChannelEvents() @@ -299,7 +337,7 @@ func TestChainWatcherDataLossProtect(t *testing.T) { // Otherwise, we'll feed in this new state number as a response // to the query, and insert the expected DLP commit point. dlpPoint := aliceChannel.State().RemoteCurrentRevocation - err = aliceChannel.State().MarkDataLoss(dlpPoint) + err = aliceChanState.MarkDataLoss(dlpPoint) if err != nil { t.Errorf("unable to insert dlp point: %v", err) return false @@ -421,6 +459,24 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { } defer cleanUp() + // We'll execute a number of state transitions based on the + // randomly selected number from testing/quick. We do this to + // get more coverage of various state hint encodings beyond 0 + // and 1. + const htlcAmt = 1000 + states, cleanStates, err := executeStateTransitions( + t, htlcAmt, aliceChannel, bobChannel, numUpdates, + ) + if err != nil { + t.Errorf("unable to trigger state "+ + "transition: %v", err) + return false + } + defer cleanStates() + + // We'll use the state this test case wants Alice to start at. + aliceChanState := states[numUpdates] + // With the channels created, we'll now create a chain watcher // instance which will be watching for any closes of Alice's // channel. @@ -430,7 +486,7 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { ConfChan: make(chan *chainntnfs.TxConfirmation), } aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ - chanState: aliceChannel.State(), + chanState: aliceChanState, notifier: aliceNotifier, signer: aliceChannel.Signer, extractStateNumHint: lnwallet.GetStateNumHint, @@ -443,20 +499,6 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { } defer aliceChainWatcher.Stop() - // We'll execute a number of state transitions based on the - // randomly selected number from testing/quick. We do this to - // get more coverage of various state hint encodings beyond 0 - // and 1. - const htlcAmt = 1000 - err = executeStateTransitions( - t, htlcAmt, aliceChannel, bobChannel, numUpdates, - ) - if err != nil { - t.Errorf("unable to trigger state "+ - "transition: %v", err) - return false - } - // We'll request a new channel event subscription from Alice's // chain watcher so we can be notified of our fake close below. chanEvents := aliceChainWatcher.SubscribeChannelEvents() diff --git a/contractcourt/utils_test.go b/contractcourt/utils_test.go index 2bf81b41..11f23d8c 100644 --- a/contractcourt/utils_test.go +++ b/contractcourt/utils_test.go @@ -1,10 +1,16 @@ package contractcourt import ( + "fmt" + "io" + "io/ioutil" "os" + "path/filepath" "runtime/pprof" "testing" "time" + + "github.com/lightningnetwork/lnd/channeldb" ) // timeout implements a test level timeout. @@ -24,3 +30,69 @@ func timeout(t *testing.T) func() { close(done) } } + +func copyFile(dest, src string) error { + s, err := os.Open(src) + if err != nil { + return err + } + defer s.Close() + + d, err := os.Create(dest) + if err != nil { + return err + } + + if _, err := io.Copy(d, s); err != nil { + d.Close() + return err + } + + return d.Close() +} + +// copyChannelState copies the OpenChannel state by copying the database and +// creating a new struct from it. The copied state and a cleanup function are +// returned. +func copyChannelState(state *channeldb.OpenChannel) ( + *channeldb.OpenChannel, func(), error) { + + // Make a copy of the DB. + dbFile := filepath.Join(state.Db.Path(), "channel.db") + tempDbPath, err := ioutil.TempDir("", "past-state") + if err != nil { + return nil, nil, err + } + + cleanup := func() { + os.RemoveAll(tempDbPath) + } + + tempDbFile := filepath.Join(tempDbPath, "channel.db") + err = copyFile(tempDbFile, dbFile) + if err != nil { + cleanup() + return nil, nil, err + } + + newDb, err := channeldb.Open(tempDbPath) + if err != nil { + cleanup() + return nil, nil, err + } + + chans, err := newDb.FetchAllChannels() + if err != nil { + cleanup() + return nil, nil, err + } + + // We only support DBs with a single channel, for now. + if len(chans) != 1 { + cleanup() + return nil, nil, fmt.Errorf("found %d chans in the db", + len(chans)) + } + + return chans[0], cleanup, nil +} From acc45934f89d4c0de863f6a7590a98be55eb33e4 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 18 Nov 2020 22:45:35 +0100 Subject: [PATCH 3/8] contraccourt/chain_watcher: define handleKnownLocalState We can only rely on the commit set and height being correct for the current commit, so check that first. --- contractcourt/chain_watcher.go | 45 ++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index ee62c0ce..aacac7d5 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -533,6 +533,19 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { commitTxBroadcast, obfuscator, ) + // We'll go on to check whether it could be our own commitment + // that was published and know is confirmed. + ok, err = c.handleKnownLocalState(commitSpend, chainSet) + if err != nil { + log.Errorf("Unable to handle known local state: %v", + err) + return + } + + if ok { + return + } + // Based on the output scripts within this commitment, we'll // determine if this is our commitment transaction or not (a // self force close). @@ -724,6 +737,38 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { } } +// handleKnownLocalState checks whether the passed spend is a local state that +// is known to us (the current state). If so we will act on this state using +// the passed chainSet. If this is not a known local state, false is returned. +func (c *chainWatcher) handleKnownLocalState( + commitSpend *chainntnfs.SpendDetail, chainSet *chainSet) (bool, error) { + + // If the channel is recovered, we won't have a local commit to check + // against, so immediately return. + if c.cfg.chanState.HasChanStatus(channeldb.ChanStatusRestored) { + return false, nil + } + + commitTxBroadcast := commitSpend.SpendingTx + commitHash := commitTxBroadcast.TxHash() + + // Check whether our latest local state hit the chain. + if chainSet.localCommit.CommitTx.TxHash() != commitHash { + return false, nil + } + + chainSet.commitSet.ConfCommitKey = &LocalHtlcSet + if err := c.dispatchLocalForceClose( + commitSpend, chainSet.localCommit, chainSet.commitSet, + ); err != nil { + return false, fmt.Errorf("unable to handle local"+ + "close for chan_point=%v: %v", + c.cfg.chanState.FundingOutpoint, err) + } + + return true, nil +} + // toSelfAmount takes a transaction and returns the sum of all outputs that pay // to a script that the wallet controls. If no outputs pay to us, then we // return zero. This is possible as our output may have been trimmed due to From 743ea7be74bf89a772b51c95b69b15ba5c2c84a0 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 18 Nov 2020 22:45:35 +0100 Subject: [PATCH 4/8] contractcourt/chain_watcher: handleKnownRemoteState Similar to what we did for the local state handling, we extract handling all known remote states we can act on (breach, current, pending state) into its own method. Since we want to handle the case where we lost state (both in case of local and remote close) last, we don't rely on the remote state number to check which commit we are looking at, but match on TXIDs directly. --- channeldb/channel.go | 6 +- channeldb/channel_test.go | 2 +- contractcourt/chain_watcher.go | 200 +++++++++++++++++++++------------ 3 files changed, 131 insertions(+), 77 deletions(-) diff --git a/channeldb/channel.go b/channeldb/channel.go index ec6654aa..48a52648 100644 --- a/channeldb/channel.go +++ b/channeldb/channel.go @@ -163,9 +163,9 @@ var ( // channel. ErrChanBorked = fmt.Errorf("cannot mutate borked channel") - // errLogEntryNotFound is returned when we cannot find a log entry at + // ErrLogEntryNotFound is returned when we cannot find a log entry at // the height requested in the revocation log. - errLogEntryNotFound = fmt.Errorf("log entry not found") + ErrLogEntryNotFound = fmt.Errorf("log entry not found") // errHeightNotFound is returned when a query for channel balances at // a height that we have not reached yet is made. @@ -3469,7 +3469,7 @@ func fetchChannelLogEntry(log kvdb.RBucket, logEntrykey := makeLogKey(updateNum) commitBytes := log.Get(logEntrykey[:]) if commitBytes == nil { - return ChannelCommitment{}, errLogEntryNotFound + return ChannelCommitment{}, ErrLogEntryNotFound } commitReader := bytes.NewReader(commitBytes) diff --git a/channeldb/channel_test.go b/channeldb/channel_test.go index 50ffb3bf..cc64597d 100644 --- a/channeldb/channel_test.go +++ b/channeldb/channel_test.go @@ -1485,7 +1485,7 @@ func TestBalanceAtHeight(t *testing.T) { targetHeight: unknownHeight, expectedLocalBalance: 0, expectedRemoteBalance: 0, - expectedError: errLogEntryNotFound, + expectedError: ErrLogEntryNotFound, }, { name: "height not reached", diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index aacac7d5..4364dc39 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -546,6 +546,22 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { return } + // Now that we know it is neither a non-cooperative closure nor + // a local close with the latest state, we check if it is the + // remote that closed with any prior or current state. + ok, err = c.handleKnownRemoteState( + commitSpend, broadcastStateNum, chainSet, + ) + if err != nil { + log.Errorf("Unable to handle known remote state: %v", + err) + return + } + + if ok { + return + } + // Based on the output scripts within this commitment, we'll // determine if this is our commitment transaction or not (a // self force close). @@ -607,51 +623,6 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { ) switch { - // If state number spending transaction matches the current - // latest state, then they've initiated a unilateral close. So - // we'll trigger the unilateral close signal so subscribers can - // clean up the state as necessary. - case broadcastStateNum == chainSet.remoteStateNum && - !isRecoveredChan: - - log.Infof("Remote party broadcast base set, "+ - "commit_num=%v", chainSet.remoteStateNum) - - chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet - err := c.dispatchRemoteForceClose( - commitSpend, chainSet.remoteCommit, - chainSet.commitSet, - c.cfg.chanState.RemoteCurrentRevocation, - ) - if err != nil { - log.Errorf("unable to handle remote "+ - "close for chan_point=%v: %v", - c.cfg.chanState.FundingOutpoint, err) - } - - // We'll also handle the case of the remote party broadcasting - // their commitment transaction which is one height above ours. - // This case can arise when we initiate a state transition, but - // the remote party has a fail crash _after_ accepting the new - // state, but _before_ sending their signature to us. - case broadcastStateNum == chainSet.remoteStateNum+1 && - chainSet.remotePendingCommit != nil && !isRecoveredChan: - - log.Infof("Remote party broadcast pending set, "+ - "commit_num=%v", chainSet.remoteStateNum+1) - - chainSet.commitSet.ConfCommitKey = &RemotePendingHtlcSet - err := c.dispatchRemoteForceClose( - commitSpend, *chainSet.remotePendingCommit, - chainSet.commitSet, - c.cfg.chanState.RemoteNextRevocation, - ) - if err != nil { - log.Errorf("unable to handle remote "+ - "close for chan_point=%v: %v", - c.cfg.chanState.FundingOutpoint, err) - } - // If the remote party has broadcasted a state beyond our best // known state for them, and they don't have a pending // commitment (we write them to disk before sending out), then @@ -710,21 +681,6 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { c.cfg.chanState.FundingOutpoint, err) } - // If the state number broadcast is lower than the remote - // node's current un-revoked height, then THEY'RE ATTEMPTING TO - // VIOLATE THE CONTRACT LAID OUT WITHIN THE PAYMENT CHANNEL. - // Therefore we close the signal indicating a revoked broadcast - // to allow subscribers to swiftly dispatch justice!!! - case broadcastStateNum < chainSet.remoteStateNum: - err := c.dispatchContractBreach( - commitSpend, &chainSet.remoteCommit, - broadcastStateNum, - ) - if err != nil { - log.Errorf("unable to handle channel "+ - "breach for chan_point=%v: %v", - c.cfg.chanState.FundingOutpoint, err) - } } // Now that a spend has been detected, we've done our job, so @@ -769,6 +725,115 @@ func (c *chainWatcher) handleKnownLocalState( return true, nil } +// handleKnownRemoteState checks whether the passed spend is a remote state +// that is known to us (a revoked, current or pending state). If so we will act +// on this state using the passed chainSet. If this is not a known remote +// state, false is returned. +func (c *chainWatcher) handleKnownRemoteState( + commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64, + chainSet *chainSet) (bool, error) { + + // If the channel is recovered, we won't have any remote commit to + // check against, so imemdiately return. + if c.cfg.chanState.HasChanStatus(channeldb.ChanStatusRestored) { + return false, nil + } + + commitTxBroadcast := commitSpend.SpendingTx + commitHash := commitTxBroadcast.TxHash() + spendHeight := uint32(commitSpend.SpendingHeight) + + switch { + // If the spending transaction matches the current latest state, then + // they've initiated a unilateral close. So we'll trigger the + // unilateral close signal so subscribers can clean up the state as + // necessary. + case chainSet.remoteCommit.CommitTx.TxHash() == commitHash: + log.Infof("Remote party broadcast base set, "+ + "commit_num=%v", chainSet.remoteStateNum) + + chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet + err := c.dispatchRemoteForceClose( + commitSpend, chainSet.remoteCommit, + chainSet.commitSet, + c.cfg.chanState.RemoteCurrentRevocation, + ) + if err != nil { + return false, fmt.Errorf("unable to handle remote "+ + "close for chan_point=%v: %v", + c.cfg.chanState.FundingOutpoint, err) + } + + return true, nil + + // We'll also handle the case of the remote party broadcasting + // their commitment transaction which is one height above ours. + // This case can arise when we initiate a state transition, but + // the remote party has a fail crash _after_ accepting the new + // state, but _before_ sending their signature to us. + case chainSet.remotePendingCommit != nil && + chainSet.remotePendingCommit.CommitTx.TxHash() == commitHash: + + log.Infof("Remote party broadcast pending set, "+ + "commit_num=%v", chainSet.remoteStateNum+1) + + chainSet.commitSet.ConfCommitKey = &RemotePendingHtlcSet + err := c.dispatchRemoteForceClose( + commitSpend, *chainSet.remotePendingCommit, + chainSet.commitSet, + c.cfg.chanState.RemoteNextRevocation, + ) + if err != nil { + return false, fmt.Errorf("unable to handle remote "+ + "close for chan_point=%v: %v", + c.cfg.chanState.FundingOutpoint, err) + } + + return true, nil + } + + // We check if we have a revoked state at this state num that matches + // the spend transaction. + retribution, err := lnwallet.NewBreachRetribution( + c.cfg.chanState, broadcastStateNum, spendHeight, + ) + + switch { + + // If we had no log entry at this height, this was not a revoked state. + case err == channeldb.ErrLogEntryNotFound: + return false, nil + case err == channeldb.ErrNoPastDeltas: + return false, nil + + case err != nil: + return false, fmt.Errorf("unable to create breach "+ + "retribution: %v", err) + } + + // We found a revoked state at this height, but it could still be our + // own broadcasted state we are looking at. Therefore check that the + // commit matches before assuming it was a breach. + if retribution.BreachTransaction.TxHash() != commitHash { + return false, nil + } + + // THEY'RE ATTEMPTING TO VIOLATE THE CONTRACT LAID OUT WITHIN THE + // PAYMENT CHANNEL. Therefore we close the signal indicating a revoked + // broadcast to allow subscribers to swiftly dispatch justice!!! + err = c.dispatchContractBreach( + commitSpend, &chainSet.remoteCommit, + broadcastStateNum, retribution, + ) + if err != nil { + return false, fmt.Errorf("unable to handle channel "+ + "breach for chan_point=%v: %v", + c.cfg.chanState.FundingOutpoint, err) + } + + return true, nil +} + // toSelfAmount takes a transaction and returns the sum of all outputs that pay // to a script that the wallet controls. If no outputs pay to us, then we // return zero. This is possible as our output may have been trimmed due to @@ -993,8 +1058,8 @@ func (c *chainWatcher) dispatchRemoteForceClose( // materials required to bring the cheater to justice, then notify all // registered subscribers of this event. func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail, - remoteCommit *channeldb.ChannelCommitment, - broadcastStateNum uint64) error { + remoteCommit *channeldb.ChannelCommitment, broadcastStateNum uint64, + retribution *lnwallet.BreachRetribution) error { log.Warnf("Remote peer has breached the channel contract for "+ "ChannelPoint(%v). Revoked state #%v was broadcast!!!", @@ -1006,17 +1071,6 @@ func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail spendHeight := uint32(spendEvent.SpendingHeight) - // Create a new reach retribution struct which contains all the data - // needed to swiftly bring the cheating peer to justice. - // - // TODO(roasbeef): move to same package - retribution, err := lnwallet.NewBreachRetribution( - c.cfg.chanState, broadcastStateNum, spendHeight, - ) - if err != nil { - return fmt.Errorf("unable to create breach retribution: %v", err) - } - // Nil the curve before printing. if retribution.RemoteOutputSignDesc != nil && retribution.RemoteOutputSignDesc.DoubleTweak != nil { From 93d917d82a51ff91795310509d990fbc4ed71b46 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 18 Nov 2020 22:45:35 +0100 Subject: [PATCH 5/8] contractcourt/chain_watcher: handleUnknownRemoteState This commit extracts the data loss protect recovery procedure into its own method. --- contractcourt/chain_watcher.go | 143 +++++++++++++++++---------------- 1 file changed, 74 insertions(+), 69 deletions(-) diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index 4364dc39..2b2b0492 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -613,78 +613,25 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { log.Warnf("Unprompted commitment broadcast for "+ "ChannelPoint(%v) ", c.cfg.chanState.FundingOutpoint) - // If this channel has been recovered, then we'll modify our - // behavior as it isn't possible for us to close out the - // channel off-chain ourselves. It can only be the remote party - // force closing, or a cooperative closure we signed off on - // before losing data getting confirmed in the chain. - isRecoveredChan := c.cfg.chanState.HasChanStatus( - channeldb.ChanStatusRestored, + // Since it was neither a known remote state, nor a local state + // that was published, it most likely mean we lost state and + // the remote node closed. In this case we must start the DLP + // protocol in hope of getting our money back. + ok, err = c.handleUnknownRemoteState( + commitSpend, broadcastStateNum, chainSet, ) - - switch { - // If the remote party has broadcasted a state beyond our best - // known state for them, and they don't have a pending - // commitment (we write them to disk before sending out), then - // this means that we've lost data. In this case, we'll enter - // the DLP protocol. Otherwise, if we've recovered our channel - // state from scratch, then we don't know what the precise - // current state is, so we assume either the remote party - // forced closed or we've been breached. In the latter case, - // our tower will take care of us. - case broadcastStateNum > chainSet.remoteStateNum || isRecoveredChan: - log.Warnf("Remote node broadcast state #%v, "+ - "which is more than 1 beyond best known "+ - "state #%v!!! Attempting recovery...", - broadcastStateNum, chainSet.remoteStateNum) - - // If this isn't a tweakless commitment, then we'll - // need to wait for the remote party's latest unrevoked - // commitment point to be presented to us as we need - // this to sweep. Otherwise, we can dispatch the remote - // close and sweep immediately using a fake commitPoint - // as it isn't actually needed for recovery anymore. - commitPoint := c.cfg.chanState.RemoteCurrentRevocation - tweaklessCommit := c.cfg.chanState.ChanType.IsTweakless() - if !tweaklessCommit { - commitPoint = c.waitForCommitmentPoint() - if commitPoint == nil { - return - } - - log.Infof("Recovered commit point(%x) for "+ - "channel(%v)! Now attempting to use it to "+ - "sweep our funds...", - commitPoint.SerializeCompressed(), - c.cfg.chanState.FundingOutpoint) - - } else { - log.Infof("ChannelPoint(%v) is tweakless, "+ - "moving to sweep directly on chain", - c.cfg.chanState.FundingOutpoint) - } - - // Since we don't have the commitment stored for this - // state, we'll just pass an empty commitment within - // the commitment set. Note that this means we won't be - // able to recover any HTLC funds. - // - // TODO(halseth): can we try to recover some HTLCs? - chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet - err = c.dispatchRemoteForceClose( - commitSpend, channeldb.ChannelCommitment{}, - chainSet.commitSet, commitPoint, - ) - if err != nil { - log.Errorf("unable to handle remote "+ - "close for chan_point=%v: %v", - c.cfg.chanState.FundingOutpoint, err) - } - + if err != nil { + log.Errorf("Unable to handle unknown remote state: %v", + err) + return } - // Now that a spend has been detected, we've done our job, so - // we'll exit immediately. + if ok { + return + } + + log.Warnf("Unable to handle spending tx %v of channel point %v", + commitTxBroadcast.TxHash(), c.cfg.chanState.FundingOutpoint) return // The chainWatcher has been signalled to exit, so we'll do so now. @@ -834,6 +781,64 @@ func (c *chainWatcher) handleKnownRemoteState( return true, nil } +// handleUnknownRemoteState is the last attempt we make at reclaiming funds +// from the closed channel, by checkin whether the passed spend _could_ be a +// remote spend that is unknown to us (we lost state). We will try to initiate +// Data Loss Protection in order to restore our commit point and reclaim our +// funds from the channel. If we are not able to act on it, false is returned. +func (c *chainWatcher) handleUnknownRemoteState( + commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64, + chainSet *chainSet) (bool, error) { + + log.Warnf("Remote node broadcast state #%v, "+ + "which is more than 1 beyond best known "+ + "state #%v!!! Attempting recovery...", + broadcastStateNum, chainSet.remoteStateNum) + + // If this isn't a tweakless commitment, then we'll need to wait for + // the remote party's latest unrevoked commitment point to be presented + // to us as we need this to sweep. Otherwise, we can dispatch the + // remote close and sweep immediately using a fake commitPoint as it + // isn't actually needed for recovery anymore. + commitPoint := c.cfg.chanState.RemoteCurrentRevocation + tweaklessCommit := c.cfg.chanState.ChanType.IsTweakless() + if !tweaklessCommit { + commitPoint = c.waitForCommitmentPoint() + if commitPoint == nil { + return false, fmt.Errorf("unable to get commit point") + } + + log.Infof("Recovered commit point(%x) for "+ + "channel(%v)! Now attempting to use it to "+ + "sweep our funds...", + commitPoint.SerializeCompressed(), + c.cfg.chanState.FundingOutpoint) + + } else { + log.Infof("ChannelPoint(%v) is tweakless, "+ + "moving to sweep directly on chain", + c.cfg.chanState.FundingOutpoint) + } + + // Since we don't have the commitment stored for this state, we'll just + // pass an empty commitment within the commitment set. Note that this + // means we won't be able to recover any HTLC funds. + // + // TODO(halseth): can we try to recover some HTLCs? + chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet + err := c.dispatchRemoteForceClose( + commitSpend, channeldb.ChannelCommitment{}, + chainSet.commitSet, commitPoint, + ) + if err != nil { + return false, fmt.Errorf("unable to handle remote "+ + "close for chan_point=%v: %v", + c.cfg.chanState.FundingOutpoint, err) + } + + return true, nil +} + // toSelfAmount takes a transaction and returns the sum of all outputs that pay // to a script that the wallet controls. If no outputs pay to us, then we // return zero. This is possible as our output may have been trimmed due to From 5bb89961625dcdec0790f28b3e292c9fd31bab69 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 18 Nov 2020 22:45:35 +0100 Subject: [PATCH 6/8] contractcourt/chain_watcher: handleUnknownLocalState Similar to what we did for other states, we extract handling of acting on a local future state into its own method. --- contractcourt/chain_watcher.go | 117 ++++++++++++++++++--------------- 1 file changed, 65 insertions(+), 52 deletions(-) diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index 2b2b0492..51d9c6f2 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -17,7 +17,6 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnwallet" - "github.com/lightningnetwork/lnd/shachain" ) const ( @@ -324,18 +323,27 @@ func (c *chainWatcher) SubscribeChannelEvents() *ChainEventSubscription { return sub } -// isOurCommitment returns true if the passed commitSpend is a spend of the -// funding transaction using our commitment transaction (a local force close). -// In order to do this in a state agnostic manner, we'll make our decisions -// based off of only the set of outputs included. -func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig, +// handleUnknownLocalState checks whether the passed spend _could_ be a local +// state that for some reason is unknown to us. This could be a state published +// by us before we lost state, which we will try to sweep. Or it could be one +// of our revoked states that somehow made it to the chain. If that's the case +// we cannot really hope that we'll be able to get our money back, but we'll +// try to sweep it anyway. If this is not an unknown local state, false is +// returned. +func (c *chainWatcher) handleUnknownLocalState( commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64, - revocationProducer shachain.Producer, - chanType channeldb.ChannelType) (bool, error) { + chainSet *chainSet) (bool, error) { + + // If the spend was a local commitment, at this point it must either be + // a past state (we breached!) or a future state (we lost state!). In + // either case, the only thing we can do is to attempt to sweep what is + // there. // First, we'll re-derive our commitment point for this state since // this is what we use to randomize each of the keys for this state. - commitSecret, err := revocationProducer.AtIndex(broadcastStateNum) + commitSecret, err := c.cfg.chanState.RevocationProducer.AtIndex( + broadcastStateNum, + ) if err != nil { return false, err } @@ -345,13 +353,14 @@ func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig, // and remote keys for this state. We use our point as only we can // revoke our own commitment. commitKeyRing := lnwallet.DeriveCommitmentKeys( - commitPoint, true, chanType, &localChanCfg, &remoteChanCfg, + commitPoint, true, c.cfg.chanState.ChanType, + &c.cfg.chanState.LocalChanCfg, &c.cfg.chanState.RemoteChanCfg, ) // With the keys derived, we'll construct the remote script that'll be // present if they have a non-dust balance on the commitment. remoteScript, _, err := lnwallet.CommitScriptToRemote( - chanType, commitKeyRing.ToRemoteKey, + c.cfg.chanState.ChanType, commitKeyRing.ToRemoteKey, ) if err != nil { return false, err @@ -361,12 +370,13 @@ func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig, // the remote party allowing them to claim this output before the CSV // delay if we breach. localScript, err := input.CommitScriptToSelf( - uint32(localChanCfg.CsvDelay), commitKeyRing.ToLocalKey, - commitKeyRing.RevocationKey, + uint32(c.cfg.chanState.LocalChanCfg.CsvDelay), + commitKeyRing.ToLocalKey, commitKeyRing.RevocationKey, ) if err != nil { return false, err } + localPkScript, err := input.WitnessScriptHash(localScript) if err != nil { return false, err @@ -375,21 +385,40 @@ func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig, // With all our scripts assembled, we'll examine the outputs of the // commitment transaction to determine if this is a local force close // or not. + ourCommit := false for _, output := range commitSpend.SpendingTx.TxOut { pkScript := output.PkScript switch { case bytes.Equal(localPkScript, pkScript): - return true, nil + ourCommit = true case bytes.Equal(remoteScript.PkScript, pkScript): - return true, nil + ourCommit = true } } - // If neither of these scripts are present, then it isn't a local force - // close. - return false, nil + // If the script is not present, this cannot be our commit. + if !ourCommit { + return false, nil + } + + log.Warnf("Detected local unilateral close of unknown state %v "+ + "(our state=%v)", broadcastStateNum, + chainSet.localCommit.CommitHeight) + + // If this is our commitment transaction, then we try to act even + // though we won't be able to sweep HTLCs. + chainSet.commitSet.ConfCommitKey = &LocalHtlcSet + if err := c.dispatchLocalForceClose( + commitSpend, chainSet.localCommit, chainSet.commitSet, + ); err != nil { + return false, fmt.Errorf("unable to handle local"+ + "close for chan_point=%v: %v", + c.cfg.chanState.FundingOutpoint, err) + } + + return true, nil } // chainSet includes all the information we need to dispatch a channel close @@ -562,39 +591,6 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { return } - // Based on the output scripts within this commitment, we'll - // determine if this is our commitment transaction or not (a - // self force close). - isOurCommit, err := isOurCommitment( - c.cfg.chanState.LocalChanCfg, - c.cfg.chanState.RemoteChanCfg, commitSpend, - broadcastStateNum, c.cfg.chanState.RevocationProducer, - c.cfg.chanState.ChanType, - ) - if err != nil { - log.Errorf("unable to determine self commit for "+ - "chan_point=%v: %v", - c.cfg.chanState.FundingOutpoint, err) - return - } - - // If this is our commitment transaction, then we can exit here - // as we don't have any further processing we need to do (we - // can't cheat ourselves :p). - if isOurCommit { - chainSet.commitSet.ConfCommitKey = &LocalHtlcSet - - if err := c.dispatchLocalForceClose( - commitSpend, chainSet.localCommit, - chainSet.commitSet, - ); err != nil { - log.Errorf("unable to handle local"+ - "close for chan_point=%v: %v", - c.cfg.chanState.FundingOutpoint, err) - } - return - } - // Next, we'll check to see if this is a cooperative channel // closure or not. This is characterized by having an input // sequence number that's finalized. This won't happen with @@ -610,9 +606,26 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { return } - log.Warnf("Unprompted commitment broadcast for "+ + log.Warnf("Unknown commitment broadcast for "+ "ChannelPoint(%v) ", c.cfg.chanState.FundingOutpoint) + // We'll try to recover as best as possible from losing state. + // We first check if this was a local unknown state. This could + // happen if we force close, then lose state or attempt + // recovery before the commitment confirms. + ok, err = c.handleUnknownLocalState( + commitSpend, broadcastStateNum, chainSet, + ) + if err != nil { + log.Errorf("Unable to handle known local state: %v", + err) + return + } + + if ok { + return + } + // Since it was neither a known remote state, nor a local state // that was published, it most likely mean we lost state and // the remote node closed. In this case we must start the DLP From 2a7a34ae10e318a198a33a66b2b8249164140391 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 18 Nov 2020 22:45:35 +0100 Subject: [PATCH 7/8] contractcourt+lnwallet: use state num instead of commit height when outdated local state This commit fixes a bug that would cause us to not sweep our local output in case we force closed, then lost state or attempted recovery. The reason being that we would use or local commit height when deriving our scripts, which would be incorrect. Instead we use the extracted state number to derive the correct scripts, allowing us to sweep the output. Allthough being an unlikely scenario, we would leave money on chain in this case without any warning (since we would just end up with an empty delay script) and forget about the spend. --- contractcourt/chain_watcher.go | 15 +++++++++------ lnwallet/channel.go | 33 +++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 20 deletions(-) diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index 51d9c6f2..97aa7852 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -411,7 +411,7 @@ func (c *chainWatcher) handleUnknownLocalState( // though we won't be able to sweep HTLCs. chainSet.commitSet.ConfCommitKey = &LocalHtlcSet if err := c.dispatchLocalForceClose( - commitSpend, chainSet.localCommit, chainSet.commitSet, + commitSpend, broadcastStateNum, chainSet.commitSet, ); err != nil { return false, fmt.Errorf("unable to handle local"+ "close for chan_point=%v: %v", @@ -564,7 +564,9 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { // We'll go on to check whether it could be our own commitment // that was published and know is confirmed. - ok, err = c.handleKnownLocalState(commitSpend, chainSet) + ok, err = c.handleKnownLocalState( + commitSpend, broadcastStateNum, chainSet, + ) if err != nil { log.Errorf("Unable to handle known local state: %v", err) @@ -657,7 +659,8 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { // is known to us (the current state). If so we will act on this state using // the passed chainSet. If this is not a known local state, false is returned. func (c *chainWatcher) handleKnownLocalState( - commitSpend *chainntnfs.SpendDetail, chainSet *chainSet) (bool, error) { + commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64, + chainSet *chainSet) (bool, error) { // If the channel is recovered, we won't have a local commit to check // against, so immediately return. @@ -675,7 +678,7 @@ func (c *chainWatcher) handleKnownLocalState( chainSet.commitSet.ConfCommitKey = &LocalHtlcSet if err := c.dispatchLocalForceClose( - commitSpend, chainSet.localCommit, chainSet.commitSet, + commitSpend, broadcastStateNum, chainSet.commitSet, ); err != nil { return false, fmt.Errorf("unable to handle local"+ "close for chan_point=%v: %v", @@ -945,14 +948,14 @@ func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDet // dispatchLocalForceClose processes a unilateral close by us being confirmed. func (c *chainWatcher) dispatchLocalForceClose( commitSpend *chainntnfs.SpendDetail, - localCommit channeldb.ChannelCommitment, commitSet CommitSet) error { + stateNum uint64, commitSet CommitSet) error { log.Infof("Local unilateral close of ChannelPoint(%v) "+ "detected", c.cfg.chanState.FundingOutpoint) forceClose, err := lnwallet.NewLocalForceCloseSummary( c.cfg.chanState, c.cfg.signer, - commitSpend.SpendingTx, localCommit, + commitSpend.SpendingTx, stateNum, ) if err != nil { return err diff --git a/lnwallet/channel.go b/lnwallet/channel.go index 090f8f00..4ab00e28 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -6038,7 +6038,7 @@ func (lc *LightningChannel) ForceClose() (*LocalForceCloseSummary, error) { localCommitment := lc.channelState.LocalCommitment summary, err := NewLocalForceCloseSummary( lc.channelState, lc.Signer, commitTx, - localCommitment, + localCommitment.CommitHeight, ) if err != nil { return nil, err @@ -6054,8 +6054,8 @@ func (lc *LightningChannel) ForceClose() (*LocalForceCloseSummary, error) { // NewLocalForceCloseSummary generates a LocalForceCloseSummary from the given // channel state. The passed commitTx must be a fully signed commitment // transaction corresponding to localCommit. -func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Signer, - commitTx *wire.MsgTx, localCommit channeldb.ChannelCommitment) ( +func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, + signer input.Signer, commitTx *wire.MsgTx, stateNum uint64) ( *LocalForceCloseSummary, error) { // Re-derive the original pkScript for to-self output within the @@ -6063,9 +6063,11 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si // output in the commitment transaction and potentially for creating // the sign descriptor. csvTimeout := uint32(chanState.LocalChanCfg.CsvDelay) - revocation, err := chanState.RevocationProducer.AtIndex( - localCommit.CommitHeight, - ) + + // We use the passed state num to derive our scripts, since in case + // this is after recovery, our latest channels state might not be up to + // date. + revocation, err := chanState.RevocationProducer.AtIndex(stateNum) if err != nil { return nil, err } @@ -6090,8 +6092,8 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si // We'll return the details of this output to the caller so they can // sweep it once it's mature. var ( - delayIndex uint32 - delayScript []byte + delayIndex uint32 + delayOut *wire.TxOut ) for i, txOut := range commitTx.TxOut { if !bytes.Equal(payToUsScriptHash, txOut.PkScript) { @@ -6099,7 +6101,7 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si } delayIndex = uint32(i) - delayScript = txOut.PkScript + delayOut = txOut break } @@ -6110,8 +6112,8 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si // If the output is non-existent (dust), have the sign descriptor be // nil. var commitResolution *CommitOutputResolution - if len(delayScript) != 0 { - localBalance := localCommit.LocalBalance + if delayOut != nil { + localBalance := delayOut.Value commitResolution = &CommitOutputResolution{ SelfOutPoint: wire.OutPoint{ Hash: commitTx.TxHash(), @@ -6122,8 +6124,8 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si SingleTweak: keyRing.LocalCommitKeyTweak, WitnessScript: selfScript, Output: &wire.TxOut{ - PkScript: delayScript, - Value: int64(localBalance.ToSatoshis()), + PkScript: delayOut.PkScript, + Value: localBalance, }, HashType: txscript.SigHashAll, }, @@ -6133,8 +6135,11 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si // Once the delay output has been found (if it exists), then we'll also // need to create a series of sign descriptors for any lingering - // outgoing HTLC's that we'll need to claim as well. + // outgoing HTLC's that we'll need to claim as well. If this is after + // recovery there is not much we can do with HTLCs, so we'll always + // use what we have in our latest state when extracting resolutions. txHash := commitTx.TxHash() + localCommit := chanState.LocalCommitment htlcResolutions, err := extractHtlcResolutions( chainfee.SatPerKWeight(localCommit.FeePerKw), true, signer, localCommit.Htlcs, keyRing, &chanState.LocalChanCfg, From ef426be351a9967266314a11c84cea2e0572ddab Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Wed, 18 Nov 2020 22:45:36 +0100 Subject: [PATCH 8/8] contractcourt/chainwatcher test: add test cases for future force close This adds the scenario to the local force close test cases where a node force closes one of its channels, then lose state (or do recovery) before the commmitment is confirmed. Without the previous commit this would go undetected. --- contractcourt/chain_watcher_test.go | 43 ++++++++++++++++++++++++++--- 1 file changed, 39 insertions(+), 4 deletions(-) diff --git a/contractcourt/chain_watcher_test.go b/contractcourt/chain_watcher_test.go index b6e5d8e1..4078f8a5 100644 --- a/contractcourt/chain_watcher_test.go +++ b/contractcourt/chain_watcher_test.go @@ -446,7 +446,7 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { // table driven tests. We'll assert that for any number of state // updates, and if the commitment transaction has our output or not, // we're able to properly detect a local force close. - localForceCloseScenario := func(t *testing.T, numUpdates uint8, + localForceCloseScenario := func(t *testing.T, numUpdates, localState uint8, remoteOutputOnly, localOutputOnly bool) bool { // First, we'll create two channels which already have @@ -475,7 +475,7 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { defer cleanStates() // We'll use the state this test case wants Alice to start at. - aliceChanState := states[numUpdates] + aliceChanState := states[localState] // With the channels created, we'll now create a chain watcher // instance which will be watching for any closes of Alice's @@ -530,7 +530,19 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { // should be able to detect the close based on the commitment // outputs. select { - case <-chanEvents.LocalUnilateralClosure: + case summary := <-chanEvents.LocalUnilateralClosure: + // Make sure we correctly extracted the commit + // resolution if we had a local output. + if remoteOutputOnly { + if summary.CommitResolution != nil { + t.Fatalf("expected no commit resolution") + } + } else { + if summary.CommitResolution == nil { + t.Fatalf("expected commit resolution") + } + } + return true case <-time.After(time.Second * 5): @@ -544,31 +556,53 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { // present and absent with non or some number of updates in the channel. testCases := []struct { numUpdates uint8 + localState uint8 remoteOutputOnly bool localOutputOnly bool }{ { numUpdates: 0, + localState: 0, remoteOutputOnly: true, }, { numUpdates: 0, + localState: 0, remoteOutputOnly: false, }, { numUpdates: 0, + localState: 0, localOutputOnly: true, }, { numUpdates: 20, + localState: 20, remoteOutputOnly: false, }, { numUpdates: 20, + localState: 20, remoteOutputOnly: true, }, { numUpdates: 20, + localState: 20, + localOutputOnly: true, + }, + { + numUpdates: 20, + localState: 5, + remoteOutputOnly: false, + }, + { + numUpdates: 20, + localState: 5, + remoteOutputOnly: true, + }, + { + numUpdates: 20, + localState: 5, localOutputOnly: true, }, } @@ -584,7 +618,8 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { t.Parallel() localForceCloseScenario( - t, testCase.numUpdates, testCase.remoteOutputOnly, + t, testCase.numUpdates, testCase.localState, + testCase.remoteOutputOnly, testCase.localOutputOnly, ) })