contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before this commit, we would directly check the broadcast commitment's txid against what we know to be our best local commitment. In the case of DLP recovery from an SCB, it's possible that the user force closed, _then_ attempted to recover their channels. As a result, we need to check the outputs directly in order to also handle this rare, but possible recovery scenario. The new detection method uses the outputs to detect if it's a local commitment or not. Based on the state number, we'll re-derive the expected scripts, and check to see if they're on the commitment. If not, then we know it's a remote force close. A new test has been added to exercise this new behavior, ensuring we catch local closes where we have and don't have a direct output.
This commit is contained in:
parent
30e0dd7311
commit
bdf1194835
@ -1,6 +1,7 @@
|
||||
package contractcourt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@ -16,6 +17,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/input"
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
"github.com/lightningnetwork/lnd/shachain"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -271,6 +273,74 @@ func (c *chainWatcher) SubscribeChannelEvents() *ChainEventSubscription {
|
||||
return sub
|
||||
}
|
||||
|
||||
// isOurCommitment returns true if the passed commitSpend is a spend of the
|
||||
// funding transaction using our commitment transaction (a local force close).
|
||||
// In order to do this in a state agnostic manner, we'll make our decisions
|
||||
// based off of only the set of outputs included.
|
||||
func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig,
|
||||
commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64,
|
||||
revocationProducer shachain.Producer) (bool, error) {
|
||||
|
||||
// First, we'll re-derive our commitment point for this state since
|
||||
// this is what we use to randomize each of the keys for this state.
|
||||
commitSecret, err := revocationProducer.AtIndex(broadcastStateNum)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
commitPoint := input.ComputeCommitmentPoint(commitSecret[:])
|
||||
|
||||
// Now that we have the commit point, we'll derive the tweaked local
|
||||
// and remote keys for this state. We use our point as only we can
|
||||
// revoke our own commitment.
|
||||
localDelayBasePoint := localChanCfg.DelayBasePoint.PubKey
|
||||
localDelayKey := input.TweakPubKey(localDelayBasePoint, commitPoint)
|
||||
remoteNonDelayPoint := remoteChanCfg.PaymentBasePoint.PubKey
|
||||
remotePayKey := input.TweakPubKey(remoteNonDelayPoint, commitPoint)
|
||||
|
||||
// With the keys derived, we'll construct the remote script that'll be
|
||||
// present if they have a non-dust balance on the commitment.
|
||||
remotePkScript, err := input.CommitScriptUnencumbered(remotePayKey)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Next, we'll derive our script that includes the revocation base for
|
||||
// the remote party allowing them to claim this output before the CSV
|
||||
// delay if we breach.
|
||||
revocationKey := input.DeriveRevocationPubkey(
|
||||
remoteChanCfg.RevocationBasePoint.PubKey, commitPoint,
|
||||
)
|
||||
localScript, err := input.CommitScriptToSelf(
|
||||
uint32(localChanCfg.CsvDelay), localDelayKey, revocationKey,
|
||||
)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
localPkScript, err := input.WitnessScriptHash(localScript)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// With all our scripts assembled, we'll examine the outputs of the
|
||||
// commitment transaction to determine if this is a local force close
|
||||
// or not.
|
||||
for _, output := range commitSpend.SpendingTx.TxOut {
|
||||
pkScript := output.PkScript
|
||||
|
||||
switch {
|
||||
case bytes.Equal(localPkScript, pkScript):
|
||||
return true, nil
|
||||
|
||||
case bytes.Equal(remotePkScript, pkScript):
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If neither of these scripts are present, then it isn't a local force
|
||||
// close.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// closeObserver is a dedicated goroutine that will watch for any closes of the
|
||||
// channel that it's watching on chain. In the event of an on-chain event, the
|
||||
// close observer will assembled the proper materials required to claim the
|
||||
@ -320,35 +390,40 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
||||
return
|
||||
}
|
||||
|
||||
// If this channel has been recovered, then we'll modify our
|
||||
// behavior as it isn't possible for us to close out the
|
||||
// channel off-chain ourselves. It can only be the remote party
|
||||
// force closing, or a cooperative closure we signed off on
|
||||
// before losing data getting confirmed in the chain.
|
||||
isRecoveredChan := c.cfg.chanState.HasChanStatus(
|
||||
channeldb.ChanStatusRestored,
|
||||
// Decode the state hint encoded within the commitment
|
||||
// transaction to determine if this is a revoked state or not.
|
||||
obfuscator := c.stateHintObfuscator
|
||||
broadcastStateNum := c.cfg.extractStateNumHint(
|
||||
commitTxBroadcast, obfuscator,
|
||||
)
|
||||
|
||||
// If we're not recovering this channel, and this is our
|
||||
// commitment transaction, then we can exit here as we don't
|
||||
// have any further processing we need to do (we can't cheat
|
||||
// ourselves :p).
|
||||
if !isRecoveredChan {
|
||||
commitmentHash := localCommit.CommitTx.TxHash()
|
||||
isOurCommitment := commitSpend.SpenderTxHash.IsEqual(
|
||||
&commitmentHash,
|
||||
)
|
||||
// Based on the output scripts within this commitment, we'll
|
||||
// determine if this is our commitment transaction or not (a
|
||||
// self force close).
|
||||
isOurCommit, err := isOurCommitment(
|
||||
c.cfg.chanState.LocalChanCfg,
|
||||
c.cfg.chanState.RemoteChanCfg, commitSpend,
|
||||
broadcastStateNum, c.cfg.chanState.RevocationProducer,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to determine self commit for "+
|
||||
"chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
return
|
||||
}
|
||||
|
||||
if isOurCommitment {
|
||||
if err := c.dispatchLocalForceClose(
|
||||
commitSpend, *localCommit,
|
||||
); err != nil {
|
||||
log.Errorf("unable to handle local"+
|
||||
"close for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
return
|
||||
// If this is our commitment transaction, then we can exit here
|
||||
// as we don't have any further processing we need to do (we
|
||||
// can't cheat ourselves :p).
|
||||
if isOurCommit {
|
||||
if err := c.dispatchLocalForceClose(
|
||||
commitSpend, *localCommit,
|
||||
); err != nil {
|
||||
log.Errorf("unable to handle local"+
|
||||
"close for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Next, we'll check to see if this is a cooperative channel
|
||||
@ -369,14 +444,9 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
||||
log.Warnf("Unprompted commitment broadcast for "+
|
||||
"ChannelPoint(%v) ", c.cfg.chanState.FundingOutpoint)
|
||||
|
||||
// Decode the state hint encoded within the commitment
|
||||
// transaction to determine if this is a revoked state or not.
|
||||
obfuscator := c.stateHintObfuscator
|
||||
broadcastStateNum := c.cfg.extractStateNumHint(
|
||||
commitTxBroadcast, obfuscator,
|
||||
)
|
||||
// Fetch the current known commit height for the remote party,
|
||||
// and their pending commitment chain tip if it exist.
|
||||
remoteStateNum := remoteCommit.CommitHeight
|
||||
|
||||
remoteChainTip, err := c.cfg.chanState.RemoteCommitChainTip()
|
||||
if err != nil && err != channeldb.ErrNoPendingCommit {
|
||||
log.Errorf("unable to obtain chain tip for "+
|
||||
@ -385,6 +455,15 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
||||
return
|
||||
}
|
||||
|
||||
// If this channel has been recovered, then we'll modify our
|
||||
// behavior as it isn't possible for us to close out the
|
||||
// channel off-chain ourselves. It can only be the remote party
|
||||
// force closing, or a cooperative closure we signed off on
|
||||
// before losing data getting confirmed in the chain.
|
||||
isRecoveredChan := c.cfg.chanState.HasChanStatus(
|
||||
channeldb.ChanStatusRestored,
|
||||
)
|
||||
|
||||
switch {
|
||||
// If state number spending transaction matches the current
|
||||
// latest state, then they've initiated a unilateral close. So
|
||||
|
@ -234,6 +234,24 @@ type dlpTestCase struct {
|
||||
NumUpdates uint8
|
||||
}
|
||||
|
||||
func executeStateTransitions(t *testing.T, htlcAmount lnwire.MilliSatoshi,
|
||||
aliceChannel, bobChannel *lnwallet.LightningChannel,
|
||||
numUpdates uint8) error {
|
||||
|
||||
for i := 0; i < int(numUpdates); i++ {
|
||||
addFakeHTLC(
|
||||
t, htlcAmount, uint64(i), aliceChannel, bobChannel,
|
||||
)
|
||||
|
||||
err := lnwallet.ForceStateTransition(aliceChannel, bobChannel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestChainWatcherDataLossProtect tests that if we've lost data (and are
|
||||
// behind the remote node), then we'll properly detect this case and dispatch a
|
||||
// remote force close using the obtained data loss commitment point.
|
||||
@ -291,19 +309,13 @@ func TestChainWatcherDataLossProtect(t *testing.T) {
|
||||
// new HTLC to add to the commitment, and then lock in a state
|
||||
// transition.
|
||||
const htlcAmt = 1000
|
||||
for i := 0; i < int(testCase.NumUpdates); i++ {
|
||||
addFakeHTLC(
|
||||
t, 1000, uint64(i), aliceChannel, bobChannel,
|
||||
)
|
||||
|
||||
err := lnwallet.ForceStateTransition(
|
||||
aliceChannel, bobChannel,
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("unable to trigger state "+
|
||||
"transition: %v", err)
|
||||
return false
|
||||
}
|
||||
err = executeStateTransitions(
|
||||
t, htlcAmt, aliceChannel, bobChannel, testCase.NumUpdates,
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("unable to trigger state "+
|
||||
"transition: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// We'll request a new channel event subscription from Alice's
|
||||
@ -412,3 +424,149 @@ func TestChainWatcherDataLossProtect(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestChainWatcherLocalForceCloseDetect tests we're able to always detect our
|
||||
// commitment output based on only the outputs present on the transaction.
|
||||
func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// localForceCloseScenario is the primary test we'll use to execut eout
|
||||
// table driven tests. We'll assert that for any number of state
|
||||
// updates, and if the commitment transaction has our output or not,
|
||||
// we're able to properly detect a local force close.
|
||||
localForceCloseScenario := func(t *testing.T, numUpdates uint8,
|
||||
remoteOutputOnly, localOutputOnly bool) bool {
|
||||
|
||||
// First, we'll create two channels which already have
|
||||
// established a commitment contract between themselves.
|
||||
aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create test channels: %v", err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
// With the channels created, we'll now create a chain watcher
|
||||
// instance which will be watching for any closes of Alice's
|
||||
// channel.
|
||||
aliceNotifier := &mockNotifier{
|
||||
spendChan: make(chan *chainntnfs.SpendDetail),
|
||||
}
|
||||
aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
|
||||
chanState: aliceChannel.State(),
|
||||
notifier: aliceNotifier,
|
||||
signer: aliceChannel.Signer,
|
||||
extractStateNumHint: lnwallet.GetStateNumHint,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create chain watcher: %v", err)
|
||||
}
|
||||
if err := aliceChainWatcher.Start(); err != nil {
|
||||
t.Fatalf("unable to start chain watcher: %v", err)
|
||||
}
|
||||
defer aliceChainWatcher.Stop()
|
||||
|
||||
// We'll execute a number of state transitions based on the
|
||||
// randomly selected number from testing/quick. We do this to
|
||||
// get more coverage of various state hint encodings beyond 0
|
||||
// and 1.
|
||||
const htlcAmt = 1000
|
||||
err = executeStateTransitions(
|
||||
t, htlcAmt, aliceChannel, bobChannel, numUpdates,
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("unable to trigger state "+
|
||||
"transition: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// We'll request a new channel event subscription from Alice's
|
||||
// chain watcher so we can be notified of our fake close below.
|
||||
chanEvents := aliceChainWatcher.SubscribeChannelEvents()
|
||||
|
||||
// Next, we'll obtain Alice's commitment transaction and
|
||||
// trigger a force close. This should cause her to detect a
|
||||
// local force close, and dispatch a local close event.
|
||||
aliceCommit := aliceChannel.State().LocalCommitment.CommitTx
|
||||
|
||||
// Since this is Alice's commitment, her output is always first
|
||||
// since she's the one creating the HTLCs (lower balance). In
|
||||
// order to simulate the commitment only having the remote
|
||||
// party's output, we'll remove Alice's output.
|
||||
if remoteOutputOnly {
|
||||
aliceCommit.TxOut = aliceCommit.TxOut[1:]
|
||||
}
|
||||
if localOutputOnly {
|
||||
aliceCommit.TxOut = aliceCommit.TxOut[:1]
|
||||
}
|
||||
|
||||
aliceTxHash := aliceCommit.TxHash()
|
||||
aliceSpend := &chainntnfs.SpendDetail{
|
||||
SpenderTxHash: &aliceTxHash,
|
||||
SpendingTx: aliceCommit,
|
||||
}
|
||||
aliceNotifier.spendChan <- aliceSpend
|
||||
|
||||
// We should get a local force close event from Alice as she
|
||||
// should be able to detect the close based on the commitment
|
||||
// outputs.
|
||||
select {
|
||||
case <-chanEvents.LocalUnilateralClosure:
|
||||
return true
|
||||
|
||||
case <-time.After(time.Second * 5):
|
||||
t.Errorf("didn't get local for close for state #%v",
|
||||
numUpdates)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// For our test cases, we'll ensure that we test having a remote output
|
||||
// present and absent with non or some number of updates in the channel.
|
||||
testCases := []struct {
|
||||
numUpdates uint8
|
||||
remoteOutputOnly bool
|
||||
localOutputOnly bool
|
||||
}{
|
||||
{
|
||||
numUpdates: 0,
|
||||
remoteOutputOnly: true,
|
||||
},
|
||||
{
|
||||
numUpdates: 0,
|
||||
remoteOutputOnly: false,
|
||||
},
|
||||
{
|
||||
numUpdates: 0,
|
||||
localOutputOnly: true,
|
||||
},
|
||||
{
|
||||
numUpdates: 20,
|
||||
remoteOutputOnly: false,
|
||||
},
|
||||
{
|
||||
numUpdates: 20,
|
||||
remoteOutputOnly: true,
|
||||
},
|
||||
{
|
||||
numUpdates: 20,
|
||||
localOutputOnly: true,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testName := fmt.Sprintf(
|
||||
"num_updates=%v,remote_output=%v,local_output=%v",
|
||||
testCase.numUpdates, testCase.remoteOutputOnly,
|
||||
testCase.localOutputOnly,
|
||||
)
|
||||
|
||||
testCase := testCase
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
localForceCloseScenario(
|
||||
t, testCase.numUpdates, testCase.remoteOutputOnly,
|
||||
testCase.localOutputOnly,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user