2018-04-30 01:42:22 +03:00
|
|
|
package contractcourt
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"crypto/sha256"
|
2019-04-20 02:33:53 +03:00
|
|
|
"fmt"
|
2018-04-30 01:42:22 +03:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2018-07-18 05:42:17 +03:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
2018-04-30 01:42:22 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2019-03-13 05:23:27 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
|
|
|
"github.com/lightningnetwork/lnd/input"
|
2018-04-30 01:42:22 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
|
|
|
)
|
|
|
|
|
|
|
|
type mockNotifier struct {
|
|
|
|
spendChan chan *chainntnfs.SpendDetail
|
2018-09-10 13:12:54 +03:00
|
|
|
epochChan chan *chainntnfs.BlockEpoch
|
|
|
|
confChan chan *chainntnfs.TxConfirmation
|
2018-04-30 01:42:22 +03:00
|
|
|
}
|
|
|
|
|
2018-05-31 08:07:54 +03:00
|
|
|
func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, _ []byte, numConfs,
|
2018-04-30 01:42:22 +03:00
|
|
|
heightHint uint32) (*chainntnfs.ConfirmationEvent, error) {
|
2018-09-10 13:12:54 +03:00
|
|
|
return &chainntnfs.ConfirmationEvent{
|
|
|
|
Confirmed: m.confChan,
|
2019-10-30 15:07:26 +03:00
|
|
|
Cancel: func() {},
|
2018-09-10 13:12:54 +03:00
|
|
|
}, nil
|
2018-04-30 01:42:22 +03:00
|
|
|
}
|
2018-09-10 13:12:54 +03:00
|
|
|
|
2018-08-09 10:05:27 +03:00
|
|
|
func (m *mockNotifier) RegisterBlockEpochNtfn(
|
|
|
|
bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
|
2018-09-10 13:12:54 +03:00
|
|
|
|
2018-04-30 01:42:22 +03:00
|
|
|
return &chainntnfs.BlockEpochEvent{
|
2018-09-10 13:12:54 +03:00
|
|
|
Epochs: m.epochChan,
|
2018-04-30 01:42:22 +03:00
|
|
|
Cancel: func() {},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockNotifier) Start() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *mockNotifier) Stop() error {
|
|
|
|
return nil
|
|
|
|
}
|
2018-07-18 05:42:17 +03:00
|
|
|
func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, _ []byte,
|
2018-07-17 10:13:06 +03:00
|
|
|
heightHint uint32) (*chainntnfs.SpendEvent, error) {
|
2018-09-10 13:12:54 +03:00
|
|
|
|
2018-04-30 01:42:22 +03:00
|
|
|
return &chainntnfs.SpendEvent{
|
|
|
|
Spend: m.spendChan,
|
|
|
|
Cancel: func() {},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestChainWatcherRemoteUnilateralClose tests that the chain watcher is able
|
|
|
|
// to properly detect a normal unilateral close by the remote node using their
|
|
|
|
// lowest commitment.
|
|
|
|
func TestChainWatcherRemoteUnilateralClose(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// First, we'll create two channels which already have established a
|
|
|
|
// commitment contract between themselves.
|
2019-08-01 06:16:52 +03:00
|
|
|
aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(true)
|
2018-04-30 01:42:22 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test channels: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// With the channels created, we'll now create a chain watcher instance
|
|
|
|
// which will be watching for any closes of Alice's channel.
|
|
|
|
aliceNotifier := &mockNotifier{
|
|
|
|
spendChan: make(chan *chainntnfs.SpendDetail),
|
|
|
|
}
|
2018-05-04 04:36:55 +03:00
|
|
|
aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
|
2019-03-13 05:23:27 +03:00
|
|
|
chanState: aliceChannel.State(),
|
|
|
|
notifier: aliceNotifier,
|
|
|
|
signer: aliceChannel.Signer,
|
|
|
|
extractStateNumHint: lnwallet.GetStateNumHint,
|
2018-05-04 04:36:55 +03:00
|
|
|
})
|
2018-04-30 01:42:22 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create chain watcher: %v", err)
|
|
|
|
}
|
|
|
|
err = aliceChainWatcher.Start()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to start chain watcher: %v", err)
|
|
|
|
}
|
|
|
|
defer aliceChainWatcher.Stop()
|
|
|
|
|
|
|
|
// We'll request a new channel event subscription from Alice's chain
|
|
|
|
// watcher.
|
2018-05-04 04:36:55 +03:00
|
|
|
chanEvents := aliceChainWatcher.SubscribeChannelEvents()
|
2018-04-30 01:42:22 +03:00
|
|
|
|
|
|
|
// If we simulate an immediate broadcast of the current commitment by
|
|
|
|
// Bob, then the chain watcher should detect this case.
|
|
|
|
bobCommit := bobChannel.State().LocalCommitment.CommitTx
|
|
|
|
bobTxHash := bobCommit.TxHash()
|
|
|
|
bobSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &bobTxHash,
|
|
|
|
SpendingTx: bobCommit,
|
|
|
|
}
|
|
|
|
aliceNotifier.spendChan <- bobSpend
|
|
|
|
|
|
|
|
// We should get a new spend event over the remote unilateral close
|
|
|
|
// event channel.
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
var uniClose *RemoteUnilateralCloseInfo
|
2018-04-30 01:42:22 +03:00
|
|
|
select {
|
|
|
|
case uniClose = <-chanEvents.RemoteUnilateralClosure:
|
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("didn't receive unilateral close event")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The unilateral close should have properly located Alice's output in
|
|
|
|
// the commitment transaction.
|
|
|
|
if uniClose.CommitResolution == nil {
|
|
|
|
t.Fatalf("unable to find alice's commit resolution")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-13 05:23:27 +03:00
|
|
|
func addFakeHTLC(t *testing.T, htlcAmount lnwire.MilliSatoshi, id uint64,
|
|
|
|
aliceChannel, bobChannel *lnwallet.LightningChannel) {
|
|
|
|
|
|
|
|
preimage := bytes.Repeat([]byte{byte(id)}, 32)
|
|
|
|
paymentHash := sha256.Sum256(preimage)
|
|
|
|
var returnPreimage [32]byte
|
|
|
|
copy(returnPreimage[:], preimage)
|
|
|
|
htlc := &lnwire.UpdateAddHTLC{
|
|
|
|
ID: uint64(id),
|
|
|
|
PaymentHash: paymentHash,
|
|
|
|
Amount: htlcAmount,
|
|
|
|
Expiry: uint32(5),
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := aliceChannel.AddHTLC(htlc, nil); err != nil {
|
|
|
|
t.Fatalf("alice unable to add htlc: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := bobChannel.ReceiveHTLC(htlc); err != nil {
|
|
|
|
t.Fatalf("bob unable to recv add htlc: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-30 01:42:22 +03:00
|
|
|
// TestChainWatcherRemoteUnilateralClosePendingCommit tests that the chain
|
|
|
|
// watcher is able to properly detect a unilateral close wherein the remote
|
|
|
|
// node broadcasts their newly received commitment, without first revoking the
|
|
|
|
// old one.
|
|
|
|
func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// First, we'll create two channels which already have established a
|
|
|
|
// commitment contract between themselves.
|
2019-08-01 06:16:52 +03:00
|
|
|
aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(true)
|
2018-04-30 01:42:22 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test channels: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// With the channels created, we'll now create a chain watcher instance
|
|
|
|
// which will be watching for any closes of Alice's channel.
|
|
|
|
aliceNotifier := &mockNotifier{
|
|
|
|
spendChan: make(chan *chainntnfs.SpendDetail),
|
|
|
|
}
|
2018-05-04 04:36:55 +03:00
|
|
|
aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
|
2019-03-13 05:23:27 +03:00
|
|
|
chanState: aliceChannel.State(),
|
|
|
|
notifier: aliceNotifier,
|
|
|
|
signer: aliceChannel.Signer,
|
|
|
|
extractStateNumHint: lnwallet.GetStateNumHint,
|
2018-05-04 04:36:55 +03:00
|
|
|
})
|
2018-04-30 01:42:22 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create chain watcher: %v", err)
|
|
|
|
}
|
|
|
|
if err := aliceChainWatcher.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start chain watcher: %v", err)
|
|
|
|
}
|
|
|
|
defer aliceChainWatcher.Stop()
|
|
|
|
|
|
|
|
// We'll request a new channel event subscription from Alice's chain
|
|
|
|
// watcher.
|
2018-05-04 04:36:55 +03:00
|
|
|
chanEvents := aliceChainWatcher.SubscribeChannelEvents()
|
2018-04-30 01:42:22 +03:00
|
|
|
|
|
|
|
// Next, we'll create a fake HTLC just so we can advance Alice's
|
|
|
|
// channel state to a new pending commitment on her remote commit chain
|
|
|
|
// for Bob.
|
|
|
|
htlcAmount := lnwire.NewMSatFromSatoshis(20000)
|
2019-03-13 05:23:27 +03:00
|
|
|
addFakeHTLC(t, htlcAmount, 0, aliceChannel, bobChannel)
|
2018-04-30 01:42:22 +03:00
|
|
|
|
|
|
|
// With the HTLC added, we'll now manually initiate a state transition
|
|
|
|
// from Alice to Bob.
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
_, _, _, err = aliceChannel.SignNextCommitment()
|
2018-04-30 01:42:22 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, we'll now Bob broadcasting this new pending unrevoked
|
|
|
|
// commitment.
|
|
|
|
bobPendingCommit, err := aliceChannel.State().RemoteCommitChainTip()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll craft a fake spend notification with Bob's actual commitment.
|
|
|
|
// The chain watcher should be able to detect that this is a pending
|
|
|
|
// commit broadcast based on the state hints in the commitment.
|
|
|
|
bobCommit := bobPendingCommit.Commitment.CommitTx
|
|
|
|
bobTxHash := bobCommit.TxHash()
|
|
|
|
bobSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &bobTxHash,
|
|
|
|
SpendingTx: bobCommit,
|
|
|
|
}
|
|
|
|
aliceNotifier.spendChan <- bobSpend
|
|
|
|
|
|
|
|
// We should get a new spend event over the remote unilateral close
|
|
|
|
// event channel.
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
var uniClose *RemoteUnilateralCloseInfo
|
2018-04-30 01:42:22 +03:00
|
|
|
select {
|
|
|
|
case uniClose = <-chanEvents.RemoteUnilateralClosure:
|
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("didn't receive unilateral close event")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The unilateral close should have properly located Alice's output in
|
|
|
|
// the commitment transaction.
|
|
|
|
if uniClose.CommitResolution == nil {
|
|
|
|
t.Fatalf("unable to find alice's commit resolution")
|
|
|
|
}
|
|
|
|
}
|
2019-03-13 05:23:27 +03:00
|
|
|
|
2019-05-05 01:35:37 +03:00
|
|
|
// dlpTestCase is a special struct that we'll use to generate randomized test
|
2019-03-13 05:23:27 +03:00
|
|
|
// cases for the main TestChainWatcherDataLossProtect test. This struct has a
|
|
|
|
// special Generate method that will generate a random state number, and a
|
|
|
|
// broadcast state number which is greater than that state number.
|
|
|
|
type dlpTestCase struct {
|
|
|
|
BroadcastStateNum uint8
|
|
|
|
NumUpdates uint8
|
|
|
|
}
|
|
|
|
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
func executeStateTransitions(t *testing.T, htlcAmount lnwire.MilliSatoshi,
|
|
|
|
aliceChannel, bobChannel *lnwallet.LightningChannel,
|
|
|
|
numUpdates uint8) error {
|
|
|
|
|
|
|
|
for i := 0; i < int(numUpdates); i++ {
|
|
|
|
addFakeHTLC(
|
|
|
|
t, htlcAmount, uint64(i), aliceChannel, bobChannel,
|
|
|
|
)
|
|
|
|
|
|
|
|
err := lnwallet.ForceStateTransition(aliceChannel, bobChannel)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-13 05:23:27 +03:00
|
|
|
// TestChainWatcherDataLossProtect tests that if we've lost data (and are
|
|
|
|
// behind the remote node), then we'll properly detect this case and dispatch a
|
|
|
|
// remote force close using the obtained data loss commitment point.
|
|
|
|
func TestChainWatcherDataLossProtect(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// dlpScenario is our primary quick check testing function for this
|
|
|
|
// test as whole. It ensures that if the remote party broadcasts a
|
|
|
|
// commitment that is beyond our best known commitment for them, and
|
|
|
|
// they don't have a pending commitment (one we sent but which hasn't
|
|
|
|
// been revoked), then we'll properly detect this case, and execute the
|
|
|
|
// DLP protocol on our end.
|
|
|
|
//
|
|
|
|
// broadcastStateNum is the number that we'll trick Alice into thinking
|
|
|
|
// was broadcast, while numUpdates is the actual number of updates
|
|
|
|
// we'll execute. Both of these will be random 8-bit values generated
|
|
|
|
// by testing/quick.
|
2019-04-20 02:33:53 +03:00
|
|
|
dlpScenario := func(t *testing.T, testCase dlpTestCase) bool {
|
2019-03-13 05:23:27 +03:00
|
|
|
// First, we'll create two channels which already have
|
|
|
|
// established a commitment contract between themselves.
|
2019-08-01 06:16:52 +03:00
|
|
|
aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
|
|
|
|
false,
|
|
|
|
)
|
2019-03-13 05:23:27 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test channels: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// With the channels created, we'll now create a chain watcher
|
|
|
|
// instance which will be watching for any closes of Alice's
|
|
|
|
// channel.
|
|
|
|
aliceNotifier := &mockNotifier{
|
|
|
|
spendChan: make(chan *chainntnfs.SpendDetail),
|
|
|
|
}
|
|
|
|
aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
|
|
|
|
chanState: aliceChannel.State(),
|
|
|
|
notifier: aliceNotifier,
|
|
|
|
signer: aliceChannel.Signer,
|
|
|
|
extractStateNumHint: func(*wire.MsgTx,
|
|
|
|
[lnwallet.StateHintSize]byte) uint64 {
|
|
|
|
|
|
|
|
// We'll return the "fake" broadcast commitment
|
|
|
|
// number so we can simulate broadcast of an
|
|
|
|
// arbitrary state.
|
|
|
|
return uint64(testCase.BroadcastStateNum)
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create chain watcher: %v", err)
|
|
|
|
}
|
|
|
|
if err := aliceChainWatcher.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start chain watcher: %v", err)
|
|
|
|
}
|
|
|
|
defer aliceChainWatcher.Stop()
|
|
|
|
|
|
|
|
// Based on the number of random updates for this state, make a
|
|
|
|
// new HTLC to add to the commitment, and then lock in a state
|
|
|
|
// transition.
|
|
|
|
const htlcAmt = 1000
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
err = executeStateTransitions(
|
|
|
|
t, htlcAmt, aliceChannel, bobChannel, testCase.NumUpdates,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("unable to trigger state "+
|
|
|
|
"transition: %v", err)
|
|
|
|
return false
|
2019-03-13 05:23:27 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll request a new channel event subscription from Alice's
|
|
|
|
// chain watcher so we can be notified of our fake close below.
|
|
|
|
chanEvents := aliceChainWatcher.SubscribeChannelEvents()
|
|
|
|
|
|
|
|
// Otherwise, we'll feed in this new state number as a response
|
|
|
|
// to the query, and insert the expected DLP commit point.
|
|
|
|
dlpPoint := aliceChannel.State().RemoteCurrentRevocation
|
|
|
|
err = aliceChannel.State().MarkDataLoss(dlpPoint)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("unable to insert dlp point: %v", err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we'll trigger the channel close event to trigger the
|
|
|
|
// scenario.
|
|
|
|
bobCommit := bobChannel.State().LocalCommitment.CommitTx
|
|
|
|
bobTxHash := bobCommit.TxHash()
|
|
|
|
bobSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &bobTxHash,
|
|
|
|
SpendingTx: bobCommit,
|
|
|
|
}
|
|
|
|
aliceNotifier.spendChan <- bobSpend
|
|
|
|
|
|
|
|
// We should get a new uni close resolution that indicates we
|
|
|
|
// processed the DLP scenario.
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
var uniClose *RemoteUnilateralCloseInfo
|
2019-03-13 05:23:27 +03:00
|
|
|
select {
|
|
|
|
case uniClose = <-chanEvents.RemoteUnilateralClosure:
|
|
|
|
// If we processed this as a DLP case, then the remote
|
|
|
|
// party's commitment should be blank, as we don't have
|
|
|
|
// this up to date state.
|
|
|
|
blankCommit := channeldb.ChannelCommitment{}
|
|
|
|
if uniClose.RemoteCommit.FeePerKw != blankCommit.FeePerKw {
|
|
|
|
t.Errorf("DLP path not executed")
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// The resolution should have also read the DLP point
|
|
|
|
// we stored above, and used that to derive their sweep
|
|
|
|
// key for this output.
|
|
|
|
sweepTweak := input.SingleTweakBytes(
|
|
|
|
dlpPoint,
|
|
|
|
aliceChannel.State().LocalChanCfg.PaymentBasePoint.PubKey,
|
|
|
|
)
|
|
|
|
commitResolution := uniClose.CommitResolution
|
|
|
|
resolutionTweak := commitResolution.SelfOutputSignDesc.SingleTweak
|
|
|
|
if !bytes.Equal(sweepTweak, resolutionTweak) {
|
|
|
|
t.Errorf("sweep key mismatch: expected %x got %x",
|
|
|
|
sweepTweak, resolutionTweak)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
|
|
|
|
case <-time.After(time.Second * 5):
|
|
|
|
t.Errorf("didn't receive unilateral close event")
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-20 02:33:53 +03:00
|
|
|
testCases := []dlpTestCase{
|
|
|
|
// For our first scenario, we'll ensure that if we're on state 1,
|
|
|
|
// and the remote party broadcasts state 2 and we don't have a
|
|
|
|
// pending commit for them, then we'll properly detect this as a
|
|
|
|
// DLP scenario.
|
|
|
|
{
|
|
|
|
BroadcastStateNum: 2,
|
|
|
|
NumUpdates: 1,
|
|
|
|
},
|
|
|
|
|
|
|
|
// We've completed a single update, but the remote party broadcasts
|
|
|
|
// a state that's 5 states byeond our best known state. We've lost
|
|
|
|
// data, but only partially, so we should enter a DLP secnario.
|
|
|
|
{
|
|
|
|
BroadcastStateNum: 6,
|
|
|
|
NumUpdates: 1,
|
|
|
|
},
|
|
|
|
|
|
|
|
// Similar to the case above, but we've done more than one
|
|
|
|
// update.
|
|
|
|
{
|
|
|
|
BroadcastStateNum: 6,
|
|
|
|
NumUpdates: 3,
|
|
|
|
},
|
|
|
|
|
|
|
|
// We've done zero updates, but our channel peer broadcasts a
|
|
|
|
// state beyond our knowledge.
|
|
|
|
{
|
|
|
|
BroadcastStateNum: 10,
|
|
|
|
NumUpdates: 0,
|
|
|
|
},
|
2019-03-13 05:23:27 +03:00
|
|
|
}
|
2019-04-20 02:33:53 +03:00
|
|
|
for _, testCase := range testCases {
|
|
|
|
testName := fmt.Sprintf("num_updates=%v,broadcast_state_num=%v",
|
|
|
|
testCase.NumUpdates, testCase.BroadcastStateNum)
|
2019-03-13 05:23:27 +03:00
|
|
|
|
2019-04-20 02:33:53 +03:00
|
|
|
testCase := testCase
|
|
|
|
t.Run(testName, func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
if !dlpScenario(t, testCase) {
|
|
|
|
t.Fatalf("test %v failed", testName)
|
2019-03-13 05:23:27 +03:00
|
|
|
}
|
2019-04-20 02:33:53 +03:00
|
|
|
})
|
2019-03-13 05:23:27 +03:00
|
|
|
}
|
|
|
|
}
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
|
|
|
|
// TestChainWatcherLocalForceCloseDetect tests we're able to always detect our
|
|
|
|
// commitment output based on only the outputs present on the transaction.
|
|
|
|
func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2019-08-01 06:16:52 +03:00
|
|
|
// localForceCloseScenario is the primary test we'll use to execute our
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
// table driven tests. We'll assert that for any number of state
|
|
|
|
// updates, and if the commitment transaction has our output or not,
|
|
|
|
// we're able to properly detect a local force close.
|
|
|
|
localForceCloseScenario := func(t *testing.T, numUpdates uint8,
|
|
|
|
remoteOutputOnly, localOutputOnly bool) bool {
|
|
|
|
|
|
|
|
// First, we'll create two channels which already have
|
|
|
|
// established a commitment contract between themselves.
|
2019-08-01 06:16:52 +03:00
|
|
|
aliceChannel, bobChannel, cleanUp, err := lnwallet.CreateTestChannels(
|
|
|
|
false,
|
|
|
|
)
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test channels: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// With the channels created, we'll now create a chain watcher
|
|
|
|
// instance which will be watching for any closes of Alice's
|
|
|
|
// channel.
|
|
|
|
aliceNotifier := &mockNotifier{
|
|
|
|
spendChan: make(chan *chainntnfs.SpendDetail),
|
|
|
|
}
|
|
|
|
aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
|
|
|
|
chanState: aliceChannel.State(),
|
|
|
|
notifier: aliceNotifier,
|
|
|
|
signer: aliceChannel.Signer,
|
|
|
|
extractStateNumHint: lnwallet.GetStateNumHint,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create chain watcher: %v", err)
|
|
|
|
}
|
|
|
|
if err := aliceChainWatcher.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start chain watcher: %v", err)
|
|
|
|
}
|
|
|
|
defer aliceChainWatcher.Stop()
|
|
|
|
|
|
|
|
// We'll execute a number of state transitions based on the
|
|
|
|
// randomly selected number from testing/quick. We do this to
|
|
|
|
// get more coverage of various state hint encodings beyond 0
|
|
|
|
// and 1.
|
|
|
|
const htlcAmt = 1000
|
|
|
|
err = executeStateTransitions(
|
|
|
|
t, htlcAmt, aliceChannel, bobChannel, numUpdates,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("unable to trigger state "+
|
|
|
|
"transition: %v", err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll request a new channel event subscription from Alice's
|
|
|
|
// chain watcher so we can be notified of our fake close below.
|
|
|
|
chanEvents := aliceChainWatcher.SubscribeChannelEvents()
|
|
|
|
|
|
|
|
// Next, we'll obtain Alice's commitment transaction and
|
|
|
|
// trigger a force close. This should cause her to detect a
|
|
|
|
// local force close, and dispatch a local close event.
|
|
|
|
aliceCommit := aliceChannel.State().LocalCommitment.CommitTx
|
|
|
|
|
|
|
|
// Since this is Alice's commitment, her output is always first
|
|
|
|
// since she's the one creating the HTLCs (lower balance). In
|
|
|
|
// order to simulate the commitment only having the remote
|
|
|
|
// party's output, we'll remove Alice's output.
|
|
|
|
if remoteOutputOnly {
|
|
|
|
aliceCommit.TxOut = aliceCommit.TxOut[1:]
|
|
|
|
}
|
|
|
|
if localOutputOnly {
|
|
|
|
aliceCommit.TxOut = aliceCommit.TxOut[:1]
|
|
|
|
}
|
|
|
|
|
|
|
|
aliceTxHash := aliceCommit.TxHash()
|
|
|
|
aliceSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &aliceTxHash,
|
|
|
|
SpendingTx: aliceCommit,
|
|
|
|
}
|
|
|
|
aliceNotifier.spendChan <- aliceSpend
|
|
|
|
|
|
|
|
// We should get a local force close event from Alice as she
|
|
|
|
// should be able to detect the close based on the commitment
|
|
|
|
// outputs.
|
|
|
|
select {
|
|
|
|
case <-chanEvents.LocalUnilateralClosure:
|
|
|
|
return true
|
|
|
|
|
|
|
|
case <-time.After(time.Second * 5):
|
|
|
|
t.Errorf("didn't get local for close for state #%v",
|
|
|
|
numUpdates)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// For our test cases, we'll ensure that we test having a remote output
|
|
|
|
// present and absent with non or some number of updates in the channel.
|
|
|
|
testCases := []struct {
|
|
|
|
numUpdates uint8
|
|
|
|
remoteOutputOnly bool
|
|
|
|
localOutputOnly bool
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
numUpdates: 0,
|
|
|
|
remoteOutputOnly: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
numUpdates: 0,
|
|
|
|
remoteOutputOnly: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
numUpdates: 0,
|
|
|
|
localOutputOnly: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
numUpdates: 20,
|
|
|
|
remoteOutputOnly: false,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
numUpdates: 20,
|
|
|
|
remoteOutputOnly: true,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
numUpdates: 20,
|
|
|
|
localOutputOnly: true,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
for _, testCase := range testCases {
|
|
|
|
testName := fmt.Sprintf(
|
|
|
|
"num_updates=%v,remote_output=%v,local_output=%v",
|
|
|
|
testCase.numUpdates, testCase.remoteOutputOnly,
|
|
|
|
testCase.localOutputOnly,
|
|
|
|
)
|
|
|
|
|
|
|
|
testCase := testCase
|
|
|
|
t.Run(testName, func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
localForceCloseScenario(
|
|
|
|
t, testCase.numUpdates, testCase.remoteOutputOnly,
|
|
|
|
testCase.localOutputOnly,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|