2018-01-19 00:54:52 +03:00
|
|
|
package contractcourt
|
|
|
|
|
|
|
|
import (
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
"bytes"
|
2018-01-19 00:54:52 +03:00
|
|
|
"fmt"
|
|
|
|
"sync"
|
2018-01-20 04:12:08 +03:00
|
|
|
"sync/atomic"
|
2018-11-20 17:09:46 +03:00
|
|
|
"time"
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2018-07-12 12:02:52 +03:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/chaincfg"
|
|
|
|
"github.com/btcsuite/btcd/txscript"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
2018-07-12 12:02:52 +03:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2019-01-16 17:47:43 +03:00
|
|
|
"github.com/lightningnetwork/lnd/input"
|
2018-07-12 12:02:52 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
"github.com/lightningnetwork/lnd/shachain"
|
2018-01-19 00:54:52 +03:00
|
|
|
)
|
|
|
|
|
2018-11-20 17:09:46 +03:00
|
|
|
const (
|
|
|
|
// minCommitPointPollTimeout is the minimum time we'll wait before
|
|
|
|
// polling the database for a channel's commitpoint.
|
|
|
|
minCommitPointPollTimeout = 1 * time.Second
|
|
|
|
|
|
|
|
// maxCommitPointPollTimeout is the maximum time we'll wait before
|
|
|
|
// polling the database for a channel's commitpoint.
|
|
|
|
maxCommitPointPollTimeout = 10 * time.Minute
|
|
|
|
)
|
|
|
|
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
// LocalUnilateralCloseInfo encapsulates all the information we need to act on
|
|
|
|
// a local force close that gets confirmed.
|
2018-03-16 16:08:37 +03:00
|
|
|
type LocalUnilateralCloseInfo struct {
|
|
|
|
*chainntnfs.SpendDetail
|
|
|
|
*lnwallet.LocalForceCloseSummary
|
2018-08-21 13:21:15 +03:00
|
|
|
*channeldb.ChannelCloseSummary
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
|
|
|
|
// CommitSet is the set of known valid commitments at the time the
|
|
|
|
// remote party's commitment hit the chain.
|
|
|
|
CommitSet CommitSet
|
2018-03-16 16:08:37 +03:00
|
|
|
}
|
|
|
|
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
// CooperativeCloseInfo encapsulates all the information we need to act on a
|
|
|
|
// cooperative close that gets confirmed.
|
2018-08-21 13:21:15 +03:00
|
|
|
type CooperativeCloseInfo struct {
|
|
|
|
*channeldb.ChannelCloseSummary
|
|
|
|
}
|
|
|
|
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
// RemoteUnilateralCloseInfo wraps the normal UnilateralCloseSummary to couple
|
|
|
|
// the CommitSet at the time of channel closure.
|
|
|
|
type RemoteUnilateralCloseInfo struct {
|
|
|
|
*lnwallet.UnilateralCloseSummary
|
|
|
|
|
|
|
|
// CommitSet is the set of known valid commitments at the time the
|
2019-05-17 03:34:46 +03:00
|
|
|
// remote party's commitment hit the chain.
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
CommitSet CommitSet
|
|
|
|
}
|
|
|
|
|
|
|
|
// CommitSet is a collection of the set of known valid commitments at a given
|
|
|
|
// instant. If ConfCommitKey is set, then the commitment identified by the
|
|
|
|
// HtlcSetKey has hit the chain. This struct will be used to examine all live
|
|
|
|
// HTLCs to determine if any additional actions need to be made based on the
|
|
|
|
// remote party's commitments.
|
|
|
|
type CommitSet struct {
|
|
|
|
// ConfCommitKey if non-nil, identifies the commitment that was
|
|
|
|
// confirmed in the chain.
|
|
|
|
ConfCommitKey *HtlcSetKey
|
|
|
|
|
|
|
|
// HtlcSets stores the set of all known active HTLC for each active
|
|
|
|
// commitment at the time of channel closure.
|
|
|
|
HtlcSets map[HtlcSetKey][]channeldb.HTLC
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsEmpty returns true if there are no HTLCs at all within all commitments
|
|
|
|
// that are a part of this commitment diff.
|
|
|
|
func (c *CommitSet) IsEmpty() bool {
|
|
|
|
if c == nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, htlcs := range c.HtlcSets {
|
|
|
|
if len(htlcs) != 0 {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// toActiveHTLCSets returns the set of all active HTLCs across all commitment
|
|
|
|
// transactions.
|
|
|
|
func (c *CommitSet) toActiveHTLCSets() map[HtlcSetKey]htlcSet {
|
|
|
|
htlcSets := make(map[HtlcSetKey]htlcSet)
|
|
|
|
|
|
|
|
for htlcSetKey, htlcs := range c.HtlcSets {
|
|
|
|
htlcSets[htlcSetKey] = newHtlcSet(htlcs)
|
|
|
|
}
|
|
|
|
|
|
|
|
return htlcSets
|
|
|
|
}
|
|
|
|
|
2018-01-19 00:54:52 +03:00
|
|
|
// ChainEventSubscription is a struct that houses a subscription to be notified
|
|
|
|
// for any on-chain events related to a channel. There are three types of
|
|
|
|
// possible on-chain events: a cooperative channel closure, a unilateral
|
|
|
|
// channel closure, and a channel breach. The fourth type: a force close is
|
|
|
|
// locally initiated, so we don't provide any event stream for said event.
|
|
|
|
type ChainEventSubscription struct {
|
2018-01-20 04:12:08 +03:00
|
|
|
// ChanPoint is that channel that chain events will be dispatched for.
|
2018-01-19 00:54:52 +03:00
|
|
|
ChanPoint wire.OutPoint
|
|
|
|
|
2018-03-16 16:08:37 +03:00
|
|
|
// RemoteUnilateralClosure is a channel that will be sent upon in the
|
|
|
|
// event that the remote party's commitment transaction is confirmed.
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
RemoteUnilateralClosure chan *RemoteUnilateralCloseInfo
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2018-03-16 16:08:37 +03:00
|
|
|
// LocalUnilateralClosure is a channel that will be sent upon in the
|
|
|
|
// event that our commitment transaction is confirmed.
|
|
|
|
LocalUnilateralClosure chan *LocalUnilateralCloseInfo
|
|
|
|
|
|
|
|
// CooperativeClosure is a signal that will be sent upon once a
|
|
|
|
// cooperative channel closure has been detected confirmed.
|
2018-08-21 13:21:15 +03:00
|
|
|
CooperativeClosure chan *CooperativeCloseInfo
|
2018-01-19 00:54:52 +03:00
|
|
|
|
|
|
|
// ContractBreach is a channel that will be sent upon if we detect a
|
|
|
|
// contract breach. The struct sent across the channel contains all the
|
|
|
|
// material required to bring the cheating channel peer to justice.
|
|
|
|
ContractBreach chan *lnwallet.BreachRetribution
|
|
|
|
|
|
|
|
// Cancel cancels the subscription to the event stream for a particular
|
|
|
|
// channel. This method should be called once the caller no longer needs to
|
|
|
|
// be notified of any on-chain events for a particular channel.
|
|
|
|
Cancel func()
|
|
|
|
}
|
|
|
|
|
2018-04-19 14:05:05 +03:00
|
|
|
// chainWatcherConfig encapsulates all the necessary functions and interfaces
|
|
|
|
// needed to watch and act on on-chain events for a particular channel.
|
|
|
|
type chainWatcherConfig struct {
|
2018-01-19 00:54:52 +03:00
|
|
|
// chanState is a snapshot of the persistent state of the channel that
|
|
|
|
// we're watching. In the event of an on-chain event, we'll query the
|
|
|
|
// database to ensure that we act using the most up to date state.
|
|
|
|
chanState *channeldb.OpenChannel
|
|
|
|
|
|
|
|
// notifier is a reference to the channel notifier that we'll use to be
|
|
|
|
// notified of output spends and when transactions are confirmed.
|
|
|
|
notifier chainntnfs.ChainNotifier
|
|
|
|
|
|
|
|
// signer is the main signer instances that will be responsible for
|
|
|
|
// signing any HTLC and commitment transaction generated by the state
|
|
|
|
// machine.
|
2019-01-16 17:47:43 +03:00
|
|
|
signer input.Signer
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2018-04-18 14:41:03 +03:00
|
|
|
// contractBreach is a method that will be called by the watcher if it
|
|
|
|
// detects that a contract breach transaction has been confirmed. Only
|
|
|
|
// when this method returns with a non-nil error it will be safe to mark
|
|
|
|
// the channel as pending close in the database.
|
|
|
|
contractBreach func(*lnwallet.BreachRetribution) error
|
|
|
|
|
2018-04-19 14:05:05 +03:00
|
|
|
// isOurAddr is a function that returns true if the passed address is
|
|
|
|
// known to us.
|
|
|
|
isOurAddr func(btcutil.Address) bool
|
2019-03-13 05:19:24 +03:00
|
|
|
|
|
|
|
// extractStateNumHint extracts the encoded state hint using the passed
|
|
|
|
// obfuscater. This is used by the chain watcher to identify which
|
|
|
|
// state was broadcast and confirmed on-chain.
|
|
|
|
extractStateNumHint func(*wire.MsgTx, [lnwallet.StateHintSize]byte) uint64
|
2018-04-19 14:05:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// chainWatcher is a system that's assigned to every active channel. The duty
|
|
|
|
// of this system is to watch the chain for spends of the channels chan point.
|
|
|
|
// If a spend is detected then with chain watcher will notify all subscribers
|
|
|
|
// that the channel has been closed, and also give them the materials necessary
|
|
|
|
// to sweep the funds of the channel on chain eventually.
|
|
|
|
type chainWatcher struct {
|
2018-06-01 01:41:41 +03:00
|
|
|
started int32 // To be used atomically.
|
|
|
|
stopped int32 // To be used atomically.
|
2018-04-19 14:05:05 +03:00
|
|
|
|
|
|
|
quit chan struct{}
|
|
|
|
wg sync.WaitGroup
|
|
|
|
|
|
|
|
cfg chainWatcherConfig
|
|
|
|
|
|
|
|
// stateHintObfuscator is a 48-bit state hint that's used to obfuscate
|
|
|
|
// the current state number on the commitment transactions.
|
|
|
|
stateHintObfuscator [lnwallet.StateHintSize]byte
|
|
|
|
|
2018-01-19 00:54:52 +03:00
|
|
|
// All the fields below are protected by this mutex.
|
2018-01-21 07:25:54 +03:00
|
|
|
sync.Mutex
|
2018-01-19 00:54:52 +03:00
|
|
|
|
|
|
|
// clientID is an ephemeral counter used to keep track of each
|
|
|
|
// individual client subscription.
|
|
|
|
clientID uint64
|
|
|
|
|
|
|
|
// clientSubscriptions is a map that keeps track of all the active
|
|
|
|
// client subscriptions for events related to this channel.
|
|
|
|
clientSubscriptions map[uint64]*ChainEventSubscription
|
|
|
|
}
|
|
|
|
|
|
|
|
// newChainWatcher returns a new instance of a chainWatcher for a channel given
|
|
|
|
// the chan point to watch, and also a notifier instance that will allow us to
|
|
|
|
// detect on chain events.
|
2018-04-19 14:05:05 +03:00
|
|
|
func newChainWatcher(cfg chainWatcherConfig) (*chainWatcher, error) {
|
2018-01-19 00:54:52 +03:00
|
|
|
// In order to be able to detect the nature of a potential channel
|
|
|
|
// closure we'll need to reconstruct the state hint bytes used to
|
|
|
|
// obfuscate the commitment state number encoded in the lock time and
|
|
|
|
// sequence fields.
|
|
|
|
var stateHint [lnwallet.StateHintSize]byte
|
2018-04-19 14:05:05 +03:00
|
|
|
chanState := cfg.chanState
|
2018-01-19 00:54:52 +03:00
|
|
|
if chanState.IsInitiator {
|
|
|
|
stateHint = lnwallet.DeriveStateHintObfuscator(
|
2018-02-18 02:29:01 +03:00
|
|
|
chanState.LocalChanCfg.PaymentBasePoint.PubKey,
|
|
|
|
chanState.RemoteChanCfg.PaymentBasePoint.PubKey,
|
2018-01-19 00:54:52 +03:00
|
|
|
)
|
|
|
|
} else {
|
|
|
|
stateHint = lnwallet.DeriveStateHintObfuscator(
|
2018-02-18 02:29:01 +03:00
|
|
|
chanState.RemoteChanCfg.PaymentBasePoint.PubKey,
|
|
|
|
chanState.LocalChanCfg.PaymentBasePoint.PubKey,
|
2018-01-19 00:54:52 +03:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &chainWatcher{
|
2018-04-19 14:05:05 +03:00
|
|
|
cfg: cfg,
|
2018-01-19 00:54:52 +03:00
|
|
|
stateHintObfuscator: stateHint,
|
|
|
|
quit: make(chan struct{}),
|
|
|
|
clientSubscriptions: make(map[uint64]*ChainEventSubscription),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start starts all goroutines that the chainWatcher needs to perform its
|
|
|
|
// duties.
|
|
|
|
func (c *chainWatcher) Start() error {
|
2018-01-20 04:12:08 +03:00
|
|
|
if !atomic.CompareAndSwapInt32(&c.started, 0, 1) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-04-19 14:05:05 +03:00
|
|
|
chanState := c.cfg.chanState
|
2018-01-19 00:54:52 +03:00
|
|
|
log.Debugf("Starting chain watcher for ChannelPoint(%v)",
|
2018-04-19 14:05:05 +03:00
|
|
|
chanState.FundingOutpoint)
|
2018-01-19 00:54:52 +03:00
|
|
|
|
|
|
|
// First, we'll register for a notification to be dispatched if the
|
|
|
|
// funding output is spent.
|
2018-04-19 14:05:05 +03:00
|
|
|
fundingOut := &chanState.FundingOutpoint
|
2018-01-19 00:54:52 +03:00
|
|
|
|
|
|
|
// As a height hint, we'll try to use the opening height, but if the
|
|
|
|
// channel isn't yet open, then we'll use the height it was broadcast
|
|
|
|
// at.
|
2018-05-02 02:27:20 +03:00
|
|
|
heightHint := c.cfg.chanState.ShortChanID().BlockHeight
|
2018-01-19 00:54:52 +03:00
|
|
|
if heightHint == 0 {
|
2018-04-19 14:05:05 +03:00
|
|
|
heightHint = chanState.FundingBroadcastHeight
|
2018-01-19 00:54:52 +03:00
|
|
|
}
|
|
|
|
|
2018-07-18 05:42:17 +03:00
|
|
|
localKey := chanState.LocalChanCfg.MultiSigKey.PubKey.SerializeCompressed()
|
|
|
|
remoteKey := chanState.RemoteChanCfg.MultiSigKey.PubKey.SerializeCompressed()
|
2019-01-16 17:47:43 +03:00
|
|
|
multiSigScript, err := input.GenMultiSigScript(
|
2018-07-18 05:42:17 +03:00
|
|
|
localKey, remoteKey,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-01-16 17:47:43 +03:00
|
|
|
pkScript, err := input.WitnessScriptHash(multiSigScript)
|
2018-07-18 05:42:17 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-04-19 14:05:05 +03:00
|
|
|
spendNtfn, err := c.cfg.notifier.RegisterSpendNtfn(
|
2018-07-18 05:42:17 +03:00
|
|
|
fundingOut, pkScript, heightHint,
|
2018-01-19 00:54:52 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the spend notification obtained, we'll now dispatch the
|
|
|
|
// closeObserver which will properly react to any changes.
|
|
|
|
c.wg.Add(1)
|
|
|
|
go c.closeObserver(spendNtfn)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop signals the close observer to gracefully exit.
|
|
|
|
func (c *chainWatcher) Stop() error {
|
2018-01-20 04:12:08 +03:00
|
|
|
if !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-19 00:54:52 +03:00
|
|
|
close(c.quit)
|
|
|
|
|
|
|
|
c.wg.Wait()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-04-18 05:02:04 +03:00
|
|
|
// SubscribeChannelEvents returns an active subscription to the set of channel
|
2018-01-19 00:54:52 +03:00
|
|
|
// events for the channel watched by this chain watcher. Once clients no longer
|
|
|
|
// require the subscription, they should call the Cancel() method to allow the
|
2018-04-18 14:41:03 +03:00
|
|
|
// watcher to regain those committed resources.
|
|
|
|
func (c *chainWatcher) SubscribeChannelEvents() *ChainEventSubscription {
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2018-01-21 07:25:54 +03:00
|
|
|
c.Lock()
|
2018-01-19 00:54:52 +03:00
|
|
|
clientID := c.clientID
|
|
|
|
c.clientID++
|
2018-01-21 07:25:54 +03:00
|
|
|
c.Unlock()
|
2018-01-19 00:54:52 +03:00
|
|
|
|
|
|
|
log.Debugf("New ChainEventSubscription(id=%v) for ChannelPoint(%v)",
|
2018-04-19 14:05:05 +03:00
|
|
|
clientID, c.cfg.chanState.FundingOutpoint)
|
2018-01-19 00:54:52 +03:00
|
|
|
|
|
|
|
sub := &ChainEventSubscription{
|
2018-04-19 14:05:05 +03:00
|
|
|
ChanPoint: c.cfg.chanState.FundingOutpoint,
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
RemoteUnilateralClosure: make(chan *RemoteUnilateralCloseInfo, 1),
|
2018-03-16 16:08:37 +03:00
|
|
|
LocalUnilateralClosure: make(chan *LocalUnilateralCloseInfo, 1),
|
2018-08-21 13:21:15 +03:00
|
|
|
CooperativeClosure: make(chan *CooperativeCloseInfo, 1),
|
2018-03-16 16:08:37 +03:00
|
|
|
ContractBreach: make(chan *lnwallet.BreachRetribution, 1),
|
2018-01-19 00:54:52 +03:00
|
|
|
Cancel: func() {
|
|
|
|
c.Lock()
|
|
|
|
delete(c.clientSubscriptions, clientID)
|
|
|
|
c.Unlock()
|
|
|
|
return
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-01-21 07:25:54 +03:00
|
|
|
c.Lock()
|
2018-01-19 00:54:52 +03:00
|
|
|
c.clientSubscriptions[clientID] = sub
|
2018-01-21 07:25:54 +03:00
|
|
|
c.Unlock()
|
2018-01-19 00:54:52 +03:00
|
|
|
|
|
|
|
return sub
|
|
|
|
}
|
|
|
|
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
// isOurCommitment returns true if the passed commitSpend is a spend of the
|
|
|
|
// funding transaction using our commitment transaction (a local force close).
|
|
|
|
// In order to do this in a state agnostic manner, we'll make our decisions
|
|
|
|
// based off of only the set of outputs included.
|
|
|
|
func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig,
|
|
|
|
commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64,
|
2020-01-06 13:42:04 +03:00
|
|
|
revocationProducer shachain.Producer,
|
|
|
|
chanType channeldb.ChannelType) (bool, error) {
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
|
|
|
|
// First, we'll re-derive our commitment point for this state since
|
|
|
|
// this is what we use to randomize each of the keys for this state.
|
|
|
|
commitSecret, err := revocationProducer.AtIndex(broadcastStateNum)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
commitPoint := input.ComputeCommitmentPoint(commitSecret[:])
|
|
|
|
|
|
|
|
// Now that we have the commit point, we'll derive the tweaked local
|
|
|
|
// and remote keys for this state. We use our point as only we can
|
|
|
|
// revoke our own commitment.
|
2019-09-17 05:06:19 +03:00
|
|
|
commitKeyRing := lnwallet.DeriveCommitmentKeys(
|
2020-01-06 13:42:04 +03:00
|
|
|
commitPoint, true, chanType, &localChanCfg, &remoteChanCfg,
|
2019-09-17 05:06:19 +03:00
|
|
|
)
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
|
|
|
|
// With the keys derived, we'll construct the remote script that'll be
|
|
|
|
// present if they have a non-dust balance on the commitment.
|
2020-03-06 18:11:46 +03:00
|
|
|
remoteScript, _, err := lnwallet.CommitScriptToRemote(
|
|
|
|
chanType, commitKeyRing.ToRemoteKey,
|
2019-09-17 05:06:19 +03:00
|
|
|
)
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll derive our script that includes the revocation base for
|
|
|
|
// the remote party allowing them to claim this output before the CSV
|
|
|
|
// delay if we breach.
|
|
|
|
localScript, err := input.CommitScriptToSelf(
|
2020-01-06 13:42:04 +03:00
|
|
|
uint32(localChanCfg.CsvDelay), commitKeyRing.ToLocalKey,
|
2019-09-17 05:06:19 +03:00
|
|
|
commitKeyRing.RevocationKey,
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
localPkScript, err := input.WitnessScriptHash(localScript)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With all our scripts assembled, we'll examine the outputs of the
|
|
|
|
// commitment transaction to determine if this is a local force close
|
|
|
|
// or not.
|
|
|
|
for _, output := range commitSpend.SpendingTx.TxOut {
|
|
|
|
pkScript := output.PkScript
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case bytes.Equal(localPkScript, pkScript):
|
|
|
|
return true, nil
|
|
|
|
|
2020-01-06 13:42:04 +03:00
|
|
|
case bytes.Equal(remoteScript.PkScript, pkScript):
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If neither of these scripts are present, then it isn't a local force
|
|
|
|
// close.
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
2020-03-30 03:20:44 +03:00
|
|
|
// chainSet includes all the information we need to dispatch a channel close
|
|
|
|
// event to any subscribers.
|
|
|
|
type chainSet struct {
|
|
|
|
// remoteStateNum is the commitment number of the lowest valid
|
|
|
|
// commitment the remote party holds from our PoV. This value is used
|
|
|
|
// to determine if the remote party is playing a state that's behind,
|
|
|
|
// in line, or ahead of the latest state we know for it.
|
|
|
|
remoteStateNum uint64
|
|
|
|
|
|
|
|
// commitSet includes information pertaining to the set of active HTLCs
|
|
|
|
// on each commitment.
|
|
|
|
commitSet CommitSet
|
|
|
|
|
|
|
|
// remoteCommit is the current commitment of the remote party.
|
|
|
|
remoteCommit channeldb.ChannelCommitment
|
|
|
|
|
|
|
|
// localCommit is our current commitment.
|
|
|
|
localCommit channeldb.ChannelCommitment
|
|
|
|
|
|
|
|
// remotePendingCommit points to the dangling commitment of the remote
|
|
|
|
// party, if it exists. If there's no dangling commitment, then this
|
|
|
|
// pointer will be nil.
|
|
|
|
remotePendingCommit *channeldb.ChannelCommitment
|
|
|
|
}
|
|
|
|
|
|
|
|
// newChainSet creates a new chainSet given the current up to date channel
|
|
|
|
// state.
|
|
|
|
func newChainSet(chanState *channeldb.OpenChannel) (*chainSet, error) {
|
|
|
|
// First, we'll grab the current unrevoked commitments for ourselves
|
|
|
|
// and the remote party.
|
|
|
|
localCommit, remoteCommit, err := chanState.LatestCommitments()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to fetch channel state for "+
|
|
|
|
"chan_point=%v", chanState.FundingOutpoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debugf("ChannelPoint(%v): local_commit_type=%v, local_commit=%v",
|
2020-04-04 01:28:49 +03:00
|
|
|
chanState.FundingOutpoint, chanState.ChanType,
|
|
|
|
spew.Sdump(localCommit))
|
2020-03-30 03:20:44 +03:00
|
|
|
log.Debugf("ChannelPoint(%v): remote_commit_type=%v, remote_commit=%v",
|
2020-04-04 01:28:49 +03:00
|
|
|
chanState.FundingOutpoint, chanState.ChanType,
|
|
|
|
spew.Sdump(remoteCommit))
|
2020-03-30 03:20:44 +03:00
|
|
|
|
|
|
|
// Fetch the current known commit height for the remote party, and
|
|
|
|
// their pending commitment chain tip if it exists.
|
|
|
|
remoteStateNum := remoteCommit.CommitHeight
|
|
|
|
remoteChainTip, err := chanState.RemoteCommitChainTip()
|
|
|
|
if err != nil && err != channeldb.ErrNoPendingCommit {
|
|
|
|
return nil, fmt.Errorf("unable to obtain chain tip for "+
|
|
|
|
"ChannelPoint(%v): %v",
|
|
|
|
chanState.FundingOutpoint, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we have all the possible valid commitments, we'll make the
|
|
|
|
// CommitSet the ChannelArbitrator will need in order to carry out its
|
|
|
|
// duty.
|
|
|
|
commitSet := CommitSet{
|
|
|
|
HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
|
|
|
|
LocalHtlcSet: localCommit.Htlcs,
|
|
|
|
RemoteHtlcSet: remoteCommit.Htlcs,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var remotePendingCommit *channeldb.ChannelCommitment
|
|
|
|
if remoteChainTip != nil {
|
|
|
|
remotePendingCommit = &remoteChainTip.Commitment
|
|
|
|
log.Debugf("ChannelPoint(%v): remote_pending_commit_type=%v, "+
|
2020-04-04 01:28:49 +03:00
|
|
|
"remote_pending_commit=%v", chanState.FundingOutpoint,
|
2020-03-30 03:20:44 +03:00
|
|
|
chanState.ChanType,
|
|
|
|
spew.Sdump(remoteChainTip.Commitment))
|
|
|
|
|
|
|
|
htlcs := remoteChainTip.Commitment.Htlcs
|
|
|
|
commitSet.HtlcSets[RemotePendingHtlcSet] = htlcs
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll now retrieve the latest state of the revocation store so we
|
|
|
|
// can populate the revocation information within the channel state
|
|
|
|
// object that we have.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): mutation is bad mkay
|
|
|
|
_, err = chanState.RemoteRevocationStore()
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to fetch revocation state for "+
|
|
|
|
"chan_point=%v", chanState.FundingOutpoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &chainSet{
|
|
|
|
remoteStateNum: remoteStateNum,
|
|
|
|
commitSet: commitSet,
|
|
|
|
localCommit: *localCommit,
|
|
|
|
remoteCommit: *remoteCommit,
|
|
|
|
remotePendingCommit: remotePendingCommit,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2018-01-19 00:54:52 +03:00
|
|
|
// closeObserver is a dedicated goroutine that will watch for any closes of the
|
|
|
|
// channel that it's watching on chain. In the event of an on-chain event, the
|
|
|
|
// close observer will assembled the proper materials required to claim the
|
|
|
|
// funds of the channel on-chain (if required), then dispatch these as
|
|
|
|
// notifications to all subscribers.
|
|
|
|
func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
|
|
|
defer c.wg.Done()
|
|
|
|
|
|
|
|
log.Infof("Close observer for ChannelPoint(%v) active",
|
2018-04-19 14:05:05 +03:00
|
|
|
c.cfg.chanState.FundingOutpoint)
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2018-04-17 11:08:50 +03:00
|
|
|
select {
|
2019-03-11 02:46:30 +03:00
|
|
|
// We've detected a spend of the channel onchain! Depending on the type
|
|
|
|
// of spend, we'll act accordingly , so we'll examine the spending
|
|
|
|
// transaction to determine what we should do.
|
2018-04-17 11:08:50 +03:00
|
|
|
//
|
|
|
|
// TODO(Roasbeef): need to be able to ensure this only triggers
|
|
|
|
// on confirmation, to ensure if multiple txns are broadcast, we
|
|
|
|
// act on the one that's timestamped
|
|
|
|
case commitSpend, ok := <-spendNtfn.Spend:
|
2019-03-11 02:46:30 +03:00
|
|
|
// If the channel was closed, then this means that the notifier
|
|
|
|
// exited, so we will as well.
|
2018-04-17 11:08:50 +03:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2019-03-11 02:46:30 +03:00
|
|
|
// Otherwise, the remote party might have broadcast a prior
|
|
|
|
// revoked state...!!!
|
2018-04-17 11:08:50 +03:00
|
|
|
commitTxBroadcast := commitSpend.SpendingTx
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2020-03-30 03:20:44 +03:00
|
|
|
// First, we'll construct the chainset which includes all the
|
|
|
|
// data we need to dispatch an event to our subscribers about
|
|
|
|
// this possible channel close event.
|
|
|
|
chainSet, err := newChainSet(c.cfg.chanState)
|
2018-04-17 11:08:50 +03:00
|
|
|
if err != nil {
|
2020-03-30 03:20:44 +03:00
|
|
|
log.Errorf("unable to create commit set: %v", err)
|
2018-04-17 11:08:50 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
// Decode the state hint encoded within the commitment
|
|
|
|
// transaction to determine if this is a revoked state or not.
|
|
|
|
obfuscator := c.stateHintObfuscator
|
|
|
|
broadcastStateNum := c.cfg.extractStateNumHint(
|
|
|
|
commitTxBroadcast, obfuscator,
|
2018-04-17 11:08:50 +03:00
|
|
|
)
|
2019-03-11 03:05:00 +03:00
|
|
|
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
// Based on the output scripts within this commitment, we'll
|
|
|
|
// determine if this is our commitment transaction or not (a
|
|
|
|
// self force close).
|
|
|
|
isOurCommit, err := isOurCommitment(
|
|
|
|
c.cfg.chanState.LocalChanCfg,
|
|
|
|
c.cfg.chanState.RemoteChanCfg, commitSpend,
|
|
|
|
broadcastStateNum, c.cfg.chanState.RevocationProducer,
|
2020-01-06 13:42:04 +03:00
|
|
|
c.cfg.chanState.ChanType,
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("unable to determine self commit for "+
|
|
|
|
"chan_point=%v: %v",
|
|
|
|
c.cfg.chanState.FundingOutpoint, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this is our commitment transaction, then we can exit here
|
|
|
|
// as we don't have any further processing we need to do (we
|
|
|
|
// can't cheat ourselves :p).
|
|
|
|
if isOurCommit {
|
2020-03-30 03:20:44 +03:00
|
|
|
chainSet.commitSet.ConfCommitKey = &LocalHtlcSet
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
if err := c.dispatchLocalForceClose(
|
2020-03-30 03:20:44 +03:00
|
|
|
commitSpend, chainSet.localCommit,
|
|
|
|
chainSet.commitSet,
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
); err != nil {
|
|
|
|
log.Errorf("unable to handle local"+
|
|
|
|
"close for chan_point=%v: %v",
|
|
|
|
c.cfg.chanState.FundingOutpoint, err)
|
2018-01-19 00:54:52 +03:00
|
|
|
}
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
return
|
2018-04-17 11:08:50 +03:00
|
|
|
}
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2019-03-11 02:46:30 +03:00
|
|
|
// Next, we'll check to see if this is a cooperative channel
|
|
|
|
// closure or not. This is characterized by having an input
|
|
|
|
// sequence number that's finalized. This won't happen with
|
|
|
|
// regular commitment transactions due to the state hint
|
|
|
|
// encoding scheme.
|
2018-04-17 11:08:50 +03:00
|
|
|
if commitTxBroadcast.TxIn[0].Sequence == wire.MaxTxInSequenceNum {
|
2019-03-11 02:46:30 +03:00
|
|
|
// TODO(roasbeef): rare but possible, need itest case
|
|
|
|
// for
|
2018-04-17 11:08:50 +03:00
|
|
|
err := c.dispatchCooperativeClose(commitSpend)
|
2018-01-19 00:54:52 +03:00
|
|
|
if err != nil {
|
2018-04-17 11:08:50 +03:00
|
|
|
log.Errorf("unable to handle co op close: %v", err)
|
2018-01-19 00:54:52 +03:00
|
|
|
}
|
2018-04-17 11:08:50 +03:00
|
|
|
return
|
|
|
|
}
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2018-04-17 11:08:50 +03:00
|
|
|
log.Warnf("Unprompted commitment broadcast for "+
|
2018-04-19 14:05:05 +03:00
|
|
|
"ChannelPoint(%v) ", c.cfg.chanState.FundingOutpoint)
|
2018-01-19 00:54:52 +03:00
|
|
|
|
contractcourt: detect local force closes based on commitment outputs
In this commit, we modify the way we detect local force closes. Before
this commit, we would directly check the broadcast commitment's txid
against what we know to be our best local commitment. In the case of DLP
recovery from an SCB, it's possible that the user force closed, _then_
attempted to recover their channels. As a result, we need to check the
outputs directly in order to also handle this rare, but
possible recovery scenario.
The new detection method uses the outputs to detect if it's a local
commitment or not. Based on the state number, we'll re-derive the
expected scripts, and check to see if they're on the commitment. If not,
then we know it's a remote force close. A new test has been added to
exercise this new behavior, ensuring we catch local closes where we have
and don't have a direct output.
2019-03-30 04:19:01 +03:00
|
|
|
// If this channel has been recovered, then we'll modify our
|
|
|
|
// behavior as it isn't possible for us to close out the
|
|
|
|
// channel off-chain ourselves. It can only be the remote party
|
|
|
|
// force closing, or a cooperative closure we signed off on
|
|
|
|
// before losing data getting confirmed in the chain.
|
|
|
|
isRecoveredChan := c.cfg.chanState.HasChanStatus(
|
|
|
|
channeldb.ChanStatusRestored,
|
|
|
|
)
|
|
|
|
|
2018-04-17 11:08:50 +03:00
|
|
|
switch {
|
2019-03-11 03:47:06 +03:00
|
|
|
// If state number spending transaction matches the current
|
|
|
|
// latest state, then they've initiated a unilateral close. So
|
|
|
|
// we'll trigger the unilateral close signal so subscribers can
|
|
|
|
// clean up the state as necessary.
|
2020-03-30 03:20:44 +03:00
|
|
|
case broadcastStateNum == chainSet.remoteStateNum &&
|
|
|
|
!isRecoveredChan:
|
|
|
|
|
|
|
|
log.Infof("Remote party broadcast base set, "+
|
|
|
|
"commit_num=%v", chainSet.remoteStateNum)
|
2019-03-11 03:47:06 +03:00
|
|
|
|
2020-03-30 03:20:44 +03:00
|
|
|
chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet
|
2018-04-30 01:42:01 +03:00
|
|
|
err := c.dispatchRemoteForceClose(
|
2020-03-30 03:20:44 +03:00
|
|
|
commitSpend, chainSet.remoteCommit,
|
|
|
|
chainSet.commitSet,
|
2018-07-12 12:02:52 +03:00
|
|
|
c.cfg.chanState.RemoteCurrentRevocation,
|
2018-04-30 01:42:01 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("unable to handle remote "+
|
|
|
|
"close for chan_point=%v: %v",
|
|
|
|
c.cfg.chanState.FundingOutpoint, err)
|
|
|
|
}
|
|
|
|
|
2019-03-11 03:47:06 +03:00
|
|
|
// We'll also handle the case of the remote party broadcasting
|
|
|
|
// their commitment transaction which is one height above ours.
|
|
|
|
// This case can arise when we initiate a state transition, but
|
|
|
|
// the remote party has a fail crash _after_ accepting the new
|
|
|
|
// state, but _before_ sending their signature to us.
|
2020-03-30 03:20:44 +03:00
|
|
|
case broadcastStateNum == chainSet.remoteStateNum+1 &&
|
|
|
|
chainSet.remotePendingCommit != nil && !isRecoveredChan:
|
2018-04-30 01:42:01 +03:00
|
|
|
|
2020-03-30 03:20:44 +03:00
|
|
|
log.Infof("Remote party broadcast pending set, "+
|
|
|
|
"commit_num=%v", chainSet.remoteStateNum+1)
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
|
2020-03-30 03:20:44 +03:00
|
|
|
chainSet.commitSet.ConfCommitKey = &RemotePendingHtlcSet
|
2018-04-30 01:42:01 +03:00
|
|
|
err := c.dispatchRemoteForceClose(
|
2020-03-30 03:20:44 +03:00
|
|
|
commitSpend, *chainSet.remotePendingCommit,
|
|
|
|
chainSet.commitSet,
|
2018-07-12 12:02:52 +03:00
|
|
|
c.cfg.chanState.RemoteNextRevocation,
|
2018-04-30 01:42:01 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
2018-04-17 11:08:50 +03:00
|
|
|
log.Errorf("unable to handle remote "+
|
|
|
|
"close for chan_point=%v: %v",
|
2018-04-19 14:05:05 +03:00
|
|
|
c.cfg.chanState.FundingOutpoint, err)
|
2018-01-20 04:12:08 +03:00
|
|
|
}
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2019-03-13 05:25:01 +03:00
|
|
|
// If the remote party has broadcasted a state beyond our best
|
|
|
|
// known state for them, and they don't have a pending
|
|
|
|
// commitment (we write them to disk before sending out), then
|
|
|
|
// this means that we've lost data. In this case, we'll enter
|
2019-03-11 03:47:06 +03:00
|
|
|
// the DLP protocol. Otherwise, if we've recovered our channel
|
|
|
|
// state from scratch, then we don't know what the precise
|
|
|
|
// current state is, so we assume either the remote party
|
|
|
|
// forced closed or we've been breached. In the latter case,
|
|
|
|
// our tower will take care of us.
|
2020-03-30 03:20:44 +03:00
|
|
|
case broadcastStateNum > chainSet.remoteStateNum || isRecoveredChan:
|
2018-07-12 12:02:54 +03:00
|
|
|
log.Warnf("Remote node broadcast state #%v, "+
|
2018-04-30 01:42:01 +03:00
|
|
|
"which is more than 1 beyond best known "+
|
2018-07-12 12:02:54 +03:00
|
|
|
"state #%v!!! Attempting recovery...",
|
2020-03-30 03:20:44 +03:00
|
|
|
broadcastStateNum, chainSet.remoteStateNum)
|
2018-07-12 12:02:54 +03:00
|
|
|
|
2019-08-01 06:22:55 +03:00
|
|
|
// If this isn't a tweakless commitment, then we'll
|
|
|
|
// need to wait for the remote party's latest unrevoked
|
|
|
|
// commitment point to be presented to us as we need
|
|
|
|
// this to sweep. Otherwise, we can dispatch the remote
|
|
|
|
// close and sweep immediately using a fake commitPoint
|
|
|
|
// as it isn't actually needed for recovery anymore.
|
|
|
|
commitPoint := c.cfg.chanState.RemoteCurrentRevocation
|
2020-01-06 13:42:04 +03:00
|
|
|
tweaklessCommit := c.cfg.chanState.ChanType.IsTweakless()
|
2019-08-01 06:22:55 +03:00
|
|
|
if !tweaklessCommit {
|
|
|
|
commitPoint = c.waitForCommitmentPoint()
|
|
|
|
if commitPoint == nil {
|
2018-11-20 17:09:46 +03:00
|
|
|
return
|
|
|
|
}
|
2018-07-12 12:02:54 +03:00
|
|
|
|
2019-08-01 06:22:55 +03:00
|
|
|
log.Infof("Recovered commit point(%x) for "+
|
|
|
|
"channel(%v)! Now attempting to use it to "+
|
|
|
|
"sweep our funds...",
|
|
|
|
commitPoint.SerializeCompressed(),
|
|
|
|
c.cfg.chanState.FundingOutpoint)
|
|
|
|
|
|
|
|
} else {
|
2020-02-11 15:23:34 +03:00
|
|
|
log.Infof("ChannelPoint(%v) is tweakless, "+
|
|
|
|
"moving to sweep directly on chain",
|
|
|
|
c.cfg.chanState.FundingOutpoint)
|
2019-08-01 06:22:55 +03:00
|
|
|
}
|
2018-07-12 12:02:54 +03:00
|
|
|
|
|
|
|
// Since we don't have the commitment stored for this
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
// state, we'll just pass an empty commitment within
|
|
|
|
// the commitment set. Note that this means we won't be
|
|
|
|
// able to recover any HTLC funds.
|
2019-03-13 05:19:24 +03:00
|
|
|
//
|
2018-07-12 12:02:54 +03:00
|
|
|
// TODO(halseth): can we try to recover some HTLCs?
|
2020-03-30 03:20:44 +03:00
|
|
|
chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet
|
2018-07-12 12:02:54 +03:00
|
|
|
err = c.dispatchRemoteForceClose(
|
|
|
|
commitSpend, channeldb.ChannelCommitment{},
|
2020-03-30 03:20:44 +03:00
|
|
|
chainSet.commitSet, commitPoint,
|
2018-07-12 12:02:54 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("unable to handle remote "+
|
|
|
|
"close for chan_point=%v: %v",
|
|
|
|
c.cfg.chanState.FundingOutpoint, err)
|
|
|
|
}
|
2018-04-30 01:42:01 +03:00
|
|
|
|
2019-03-11 02:46:30 +03:00
|
|
|
// If the state number broadcast is lower than the remote
|
|
|
|
// node's current un-revoked height, then THEY'RE ATTEMPTING TO
|
|
|
|
// VIOLATE THE CONTRACT LAID OUT WITHIN THE PAYMENT CHANNEL.
|
|
|
|
// Therefore we close the signal indicating a revoked broadcast
|
|
|
|
// to allow subscribers to swiftly dispatch justice!!!
|
2020-03-30 03:20:44 +03:00
|
|
|
case broadcastStateNum < chainSet.remoteStateNum:
|
2018-04-30 01:42:01 +03:00
|
|
|
err := c.dispatchContractBreach(
|
2020-03-30 03:20:44 +03:00
|
|
|
commitSpend, &chainSet.remoteCommit,
|
2018-04-17 11:08:50 +03:00
|
|
|
broadcastStateNum,
|
2018-04-30 01:42:01 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
2018-04-17 11:08:50 +03:00
|
|
|
log.Errorf("unable to handle channel "+
|
|
|
|
"breach for chan_point=%v: %v",
|
2018-04-19 14:05:05 +03:00
|
|
|
c.cfg.chanState.FundingOutpoint, err)
|
2018-01-19 00:54:52 +03:00
|
|
|
}
|
2018-04-17 11:08:50 +03:00
|
|
|
}
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2019-03-11 02:46:30 +03:00
|
|
|
// Now that a spend has been detected, we've done our job, so
|
|
|
|
// we'll exit immediately.
|
2018-04-17 11:08:50 +03:00
|
|
|
return
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2018-04-17 11:08:50 +03:00
|
|
|
// The chainWatcher has been signalled to exit, so we'll do so now.
|
|
|
|
case <-c.quit:
|
|
|
|
return
|
2018-01-19 00:54:52 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-20 04:12:08 +03:00
|
|
|
// toSelfAmount takes a transaction and returns the sum of all outputs that pay
|
|
|
|
// to a script that the wallet controls. If no outputs pay to us, then we
|
|
|
|
// return zero. This is possible as our output may have been trimmed due to
|
|
|
|
// being dust.
|
|
|
|
func (c *chainWatcher) toSelfAmount(tx *wire.MsgTx) btcutil.Amount {
|
|
|
|
var selfAmt btcutil.Amount
|
|
|
|
for _, txOut := range tx.TxOut {
|
|
|
|
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
|
|
|
|
// Doesn't matter what net we actually pass in.
|
|
|
|
txOut.PkScript, &chaincfg.TestNet3Params,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, addr := range addrs {
|
2018-04-19 14:05:05 +03:00
|
|
|
if c.cfg.isOurAddr(addr) {
|
2018-01-20 04:12:08 +03:00
|
|
|
selfAmt += btcutil.Amount(txOut.Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return selfAmt
|
|
|
|
}
|
|
|
|
|
|
|
|
// dispatchCooperativeClose processed a detect cooperative channel closure.
|
|
|
|
// We'll use the spending transaction to locate our output within the
|
|
|
|
// transaction, then clean up the database state. We'll also dispatch a
|
|
|
|
// notification to all subscribers that the channel has been closed in this
|
|
|
|
// manner.
|
|
|
|
func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDetail) error {
|
|
|
|
broadcastTx := commitSpend.SpendingTx
|
|
|
|
|
|
|
|
log.Infof("Cooperative closure for ChannelPoint(%v): %v",
|
2018-04-19 14:05:05 +03:00
|
|
|
c.cfg.chanState.FundingOutpoint, spew.Sdump(broadcastTx))
|
2018-01-20 04:12:08 +03:00
|
|
|
|
|
|
|
// If the input *is* final, then we'll check to see which output is
|
|
|
|
// ours.
|
|
|
|
localAmt := c.toSelfAmount(broadcastTx)
|
|
|
|
|
2018-05-22 15:26:02 +03:00
|
|
|
// Once this is known, we'll mark the state as fully closed in the
|
|
|
|
// database. We can do this as a cooperatively closed channel has all
|
|
|
|
// its outputs resolved after only one confirmation.
|
2018-01-20 04:12:08 +03:00
|
|
|
closeSummary := &channeldb.ChannelCloseSummary{
|
2018-08-14 05:17:36 +03:00
|
|
|
ChanPoint: c.cfg.chanState.FundingOutpoint,
|
|
|
|
ChainHash: c.cfg.chanState.ChainHash,
|
|
|
|
ClosingTXID: *commitSpend.SpenderTxHash,
|
|
|
|
RemotePub: c.cfg.chanState.IdentityPub,
|
|
|
|
Capacity: c.cfg.chanState.Capacity,
|
|
|
|
CloseHeight: uint32(commitSpend.SpendingHeight),
|
|
|
|
SettledBalance: localAmt,
|
|
|
|
CloseType: channeldb.CooperativeClose,
|
|
|
|
ShortChanID: c.cfg.chanState.ShortChanID(),
|
2018-08-21 13:21:15 +03:00
|
|
|
IsPending: true,
|
2018-08-14 05:17:36 +03:00
|
|
|
RemoteCurrentRevocation: c.cfg.chanState.RemoteCurrentRevocation,
|
|
|
|
RemoteNextRevocation: c.cfg.chanState.RemoteNextRevocation,
|
|
|
|
LocalChanConfig: c.cfg.chanState.LocalChanCfg,
|
2018-01-20 04:12:08 +03:00
|
|
|
}
|
|
|
|
|
2018-11-20 17:09:45 +03:00
|
|
|
// Attempt to add a channel sync message to the close summary.
|
2019-09-11 12:15:57 +03:00
|
|
|
chanSync, err := c.cfg.chanState.ChanSyncMsg()
|
2018-11-20 17:09:45 +03:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("ChannelPoint(%v): unable to create channel sync "+
|
|
|
|
"message: %v", c.cfg.chanState.FundingOutpoint, err)
|
|
|
|
} else {
|
|
|
|
closeSummary.LastChanSyncMsg = chanSync
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:15 +03:00
|
|
|
// Create a summary of all the information needed to handle the
|
|
|
|
// cooperative closure.
|
|
|
|
closeInfo := &CooperativeCloseInfo{
|
|
|
|
ChannelCloseSummary: closeSummary,
|
|
|
|
}
|
2018-01-20 04:12:08 +03:00
|
|
|
|
2018-08-21 13:21:15 +03:00
|
|
|
// With the event processed, we'll now notify all subscribers of the
|
|
|
|
// event.
|
2018-01-20 04:12:08 +03:00
|
|
|
c.Lock()
|
|
|
|
for _, sub := range c.clientSubscriptions {
|
|
|
|
select {
|
2018-08-21 13:21:15 +03:00
|
|
|
case sub.CooperativeClosure <- closeInfo:
|
2018-01-20 04:12:08 +03:00
|
|
|
case <-c.quit:
|
2018-04-12 00:50:05 +03:00
|
|
|
c.Unlock()
|
2018-01-20 04:12:08 +03:00
|
|
|
return fmt.Errorf("exiting")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-16 16:08:37 +03:00
|
|
|
// dispatchLocalForceClose processes a unilateral close by us being confirmed.
|
|
|
|
func (c *chainWatcher) dispatchLocalForceClose(
|
|
|
|
commitSpend *chainntnfs.SpendDetail,
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
localCommit channeldb.ChannelCommitment, commitSet CommitSet) error {
|
2018-03-16 16:08:37 +03:00
|
|
|
|
|
|
|
log.Infof("Local unilateral close of ChannelPoint(%v) "+
|
2018-04-19 14:05:05 +03:00
|
|
|
"detected", c.cfg.chanState.FundingOutpoint)
|
2018-03-16 16:08:37 +03:00
|
|
|
|
|
|
|
forceClose, err := lnwallet.NewLocalForceCloseSummary(
|
2019-04-15 15:24:43 +03:00
|
|
|
c.cfg.chanState, c.cfg.signer,
|
2018-04-19 14:05:05 +03:00
|
|
|
commitSpend.SpendingTx, localCommit,
|
2018-03-16 16:08:37 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-03-27 15:06:34 +03:00
|
|
|
// As we've detected that the channel has been closed, immediately
|
2018-08-21 13:21:15 +03:00
|
|
|
// creating a close summary for future usage by related sub-systems.
|
2018-03-27 15:06:34 +03:00
|
|
|
chanSnapshot := forceClose.ChanSnapshot
|
|
|
|
closeSummary := &channeldb.ChannelCloseSummary{
|
2018-08-14 05:17:36 +03:00
|
|
|
ChanPoint: chanSnapshot.ChannelPoint,
|
|
|
|
ChainHash: chanSnapshot.ChainHash,
|
|
|
|
ClosingTXID: forceClose.CloseTx.TxHash(),
|
|
|
|
RemotePub: &chanSnapshot.RemoteIdentity,
|
|
|
|
Capacity: chanSnapshot.Capacity,
|
|
|
|
CloseType: channeldb.LocalForceClose,
|
|
|
|
IsPending: true,
|
|
|
|
ShortChanID: c.cfg.chanState.ShortChanID(),
|
|
|
|
CloseHeight: uint32(commitSpend.SpendingHeight),
|
|
|
|
RemoteCurrentRevocation: c.cfg.chanState.RemoteCurrentRevocation,
|
|
|
|
RemoteNextRevocation: c.cfg.chanState.RemoteNextRevocation,
|
|
|
|
LocalChanConfig: c.cfg.chanState.LocalChanCfg,
|
2018-03-27 15:06:34 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// If our commitment output isn't dust or we have active HTLC's on the
|
|
|
|
// commitment transaction, then we'll populate the balances on the
|
|
|
|
// close channel summary.
|
|
|
|
if forceClose.CommitResolution != nil {
|
|
|
|
closeSummary.SettledBalance = chanSnapshot.LocalBalance.ToSatoshis()
|
|
|
|
closeSummary.TimeLockedBalance = chanSnapshot.LocalBalance.ToSatoshis()
|
|
|
|
}
|
|
|
|
for _, htlc := range forceClose.HtlcResolutions.OutgoingHTLCs {
|
|
|
|
htlcValue := btcutil.Amount(htlc.SweepSignDesc.Output.Value)
|
|
|
|
closeSummary.TimeLockedBalance += htlcValue
|
|
|
|
}
|
|
|
|
|
2018-11-20 17:09:45 +03:00
|
|
|
// Attempt to add a channel sync message to the close summary.
|
2019-09-11 12:15:57 +03:00
|
|
|
chanSync, err := c.cfg.chanState.ChanSyncMsg()
|
2018-11-20 17:09:45 +03:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("ChannelPoint(%v): unable to create channel sync "+
|
|
|
|
"message: %v", c.cfg.chanState.FundingOutpoint, err)
|
|
|
|
} else {
|
|
|
|
closeSummary.LastChanSyncMsg = chanSync
|
|
|
|
}
|
|
|
|
|
2018-03-16 16:08:37 +03:00
|
|
|
// With the event processed, we'll now notify all subscribers of the
|
|
|
|
// event.
|
2018-08-21 13:21:15 +03:00
|
|
|
closeInfo := &LocalUnilateralCloseInfo{
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
SpendDetail: commitSpend,
|
|
|
|
LocalForceCloseSummary: forceClose,
|
|
|
|
ChannelCloseSummary: closeSummary,
|
|
|
|
CommitSet: commitSet,
|
2018-08-21 13:21:15 +03:00
|
|
|
}
|
2018-03-16 16:08:37 +03:00
|
|
|
c.Lock()
|
|
|
|
for _, sub := range c.clientSubscriptions {
|
|
|
|
select {
|
|
|
|
case sub.LocalUnilateralClosure <- closeInfo:
|
|
|
|
case <-c.quit:
|
|
|
|
c.Unlock()
|
|
|
|
return fmt.Errorf("exiting")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-04-30 01:42:01 +03:00
|
|
|
// dispatchRemoteForceClose processes a detected unilateral channel closure by
|
|
|
|
// the remote party. This function will prepare a UnilateralCloseSummary which
|
|
|
|
// will then be sent to any subscribers allowing them to resolve all our funds
|
|
|
|
// in the channel on chain. Once this close summary is prepared, all registered
|
2018-07-12 12:02:52 +03:00
|
|
|
// subscribers will receive a notification of this event. The commitPoint
|
|
|
|
// argument should be set to the per_commitment_point corresponding to the
|
|
|
|
// spending commitment.
|
|
|
|
//
|
|
|
|
// NOTE: The remoteCommit argument should be set to the stored commitment for
|
|
|
|
// this particular state. If we don't have the commitment stored (should only
|
|
|
|
// happen in case we have lost state) it should be set to an empty struct, in
|
|
|
|
// which case we will attempt to sweep the non-HTLC output using the passed
|
|
|
|
// commitPoint.
|
|
|
|
func (c *chainWatcher) dispatchRemoteForceClose(
|
|
|
|
commitSpend *chainntnfs.SpendDetail,
|
|
|
|
remoteCommit channeldb.ChannelCommitment,
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
commitSet CommitSet, commitPoint *btcec.PublicKey) error {
|
2018-01-19 00:54:52 +03:00
|
|
|
|
|
|
|
log.Infof("Unilateral close of ChannelPoint(%v) "+
|
2018-04-19 14:05:05 +03:00
|
|
|
"detected", c.cfg.chanState.FundingOutpoint)
|
2018-01-19 00:54:52 +03:00
|
|
|
|
|
|
|
// First, we'll create a closure summary that contains all the
|
|
|
|
// materials required to let each subscriber sweep the funds in the
|
|
|
|
// channel on-chain.
|
2018-04-30 01:42:01 +03:00
|
|
|
uniClose, err := lnwallet.NewUnilateralCloseSummary(
|
2019-04-15 15:24:43 +03:00
|
|
|
c.cfg.chanState, c.cfg.signer, commitSpend,
|
2018-07-12 12:02:52 +03:00
|
|
|
remoteCommit, commitPoint,
|
2018-01-19 00:54:52 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the event processed, we'll now notify all subscribers of the
|
|
|
|
// event.
|
|
|
|
c.Lock()
|
|
|
|
for _, sub := range c.clientSubscriptions {
|
|
|
|
select {
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
case sub.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
|
|
|
UnilateralCloseSummary: uniClose,
|
|
|
|
CommitSet: commitSet,
|
|
|
|
}:
|
2018-01-19 00:54:52 +03:00
|
|
|
case <-c.quit:
|
2018-04-12 00:50:05 +03:00
|
|
|
c.Unlock()
|
2018-01-19 00:54:52 +03:00
|
|
|
return fmt.Errorf("exiting")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// dispatchContractBreach processes a detected contract breached by the remote
|
|
|
|
// party. This method is to be called once we detect that the remote party has
|
|
|
|
// broadcast a prior revoked commitment state. This method well prepare all the
|
|
|
|
// materials required to bring the cheater to justice, then notify all
|
|
|
|
// registered subscribers of this event.
|
|
|
|
func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail,
|
2018-01-21 07:26:15 +03:00
|
|
|
remoteCommit *channeldb.ChannelCommitment,
|
|
|
|
broadcastStateNum uint64) error {
|
2018-01-19 00:54:52 +03:00
|
|
|
|
|
|
|
log.Warnf("Remote peer has breached the channel contract for "+
|
|
|
|
"ChannelPoint(%v). Revoked state #%v was broadcast!!!",
|
2018-04-19 14:05:05 +03:00
|
|
|
c.cfg.chanState.FundingOutpoint, broadcastStateNum)
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2018-04-19 14:05:05 +03:00
|
|
|
if err := c.cfg.chanState.MarkBorked(); err != nil {
|
2018-01-19 00:54:52 +03:00
|
|
|
return fmt.Errorf("unable to mark channel as borked: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-12-20 08:50:46 +03:00
|
|
|
spendHeight := uint32(spendEvent.SpendingHeight)
|
2018-01-19 00:54:52 +03:00
|
|
|
|
|
|
|
// Create a new reach retribution struct which contains all the data
|
|
|
|
// needed to swiftly bring the cheating peer to justice.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): move to same package
|
|
|
|
retribution, err := lnwallet.NewBreachRetribution(
|
2018-12-20 08:50:46 +03:00
|
|
|
c.cfg.chanState, broadcastStateNum, spendHeight,
|
2018-01-19 00:54:52 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to create breach retribution: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-03-27 15:07:47 +03:00
|
|
|
// Nil the curve before printing.
|
|
|
|
if retribution.RemoteOutputSignDesc != nil &&
|
|
|
|
retribution.RemoteOutputSignDesc.DoubleTweak != nil {
|
|
|
|
retribution.RemoteOutputSignDesc.DoubleTweak.Curve = nil
|
|
|
|
}
|
2018-12-20 08:50:45 +03:00
|
|
|
if retribution.RemoteOutputSignDesc != nil &&
|
|
|
|
retribution.RemoteOutputSignDesc.KeyDesc.PubKey != nil {
|
|
|
|
retribution.RemoteOutputSignDesc.KeyDesc.PubKey.Curve = nil
|
|
|
|
}
|
2018-03-27 15:07:47 +03:00
|
|
|
if retribution.LocalOutputSignDesc != nil &&
|
|
|
|
retribution.LocalOutputSignDesc.DoubleTweak != nil {
|
|
|
|
retribution.LocalOutputSignDesc.DoubleTweak.Curve = nil
|
|
|
|
}
|
2018-12-20 08:50:45 +03:00
|
|
|
if retribution.LocalOutputSignDesc != nil &&
|
|
|
|
retribution.LocalOutputSignDesc.KeyDesc.PubKey != nil {
|
|
|
|
retribution.LocalOutputSignDesc.KeyDesc.PubKey.Curve = nil
|
|
|
|
}
|
2018-03-27 15:07:47 +03:00
|
|
|
|
2018-01-19 00:54:52 +03:00
|
|
|
log.Debugf("Punishment breach retribution created: %v",
|
2018-03-27 15:07:47 +03:00
|
|
|
newLogClosure(func() string {
|
2018-12-20 08:50:45 +03:00
|
|
|
retribution.KeyRing.CommitPoint.Curve = nil
|
|
|
|
retribution.KeyRing.LocalHtlcKey = nil
|
|
|
|
retribution.KeyRing.RemoteHtlcKey = nil
|
2020-01-06 13:42:04 +03:00
|
|
|
retribution.KeyRing.ToLocalKey = nil
|
|
|
|
retribution.KeyRing.ToRemoteKey = nil
|
2018-12-20 08:50:45 +03:00
|
|
|
retribution.KeyRing.RevocationKey = nil
|
2018-03-27 15:07:47 +03:00
|
|
|
return spew.Sdump(retribution)
|
|
|
|
}))
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2018-04-18 14:41:03 +03:00
|
|
|
// Hand the retribution info over to the breach arbiter.
|
|
|
|
if err := c.cfg.contractBreach(retribution); err != nil {
|
|
|
|
log.Errorf("unable to hand breached contract off to "+
|
|
|
|
"breachArbiter: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-19 00:54:52 +03:00
|
|
|
// With the event processed, we'll now notify all subscribers of the
|
|
|
|
// event.
|
|
|
|
c.Lock()
|
|
|
|
for _, sub := range c.clientSubscriptions {
|
|
|
|
select {
|
|
|
|
case sub.ContractBreach <- retribution:
|
|
|
|
case <-c.quit:
|
2018-04-12 00:50:05 +03:00
|
|
|
c.Unlock()
|
2018-01-19 00:54:52 +03:00
|
|
|
return fmt.Errorf("quitting")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c.Unlock()
|
|
|
|
|
|
|
|
// At this point, we've successfully received an ack for the breach
|
|
|
|
// close. We now construct and persist the close summary, marking the
|
|
|
|
// channel as pending force closed.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): instead mark we got all the monies?
|
2018-08-21 13:21:15 +03:00
|
|
|
// TODO(halseth): move responsibility to breach arbiter?
|
2018-01-19 00:54:52 +03:00
|
|
|
settledBalance := remoteCommit.LocalBalance.ToSatoshis()
|
|
|
|
closeSummary := channeldb.ChannelCloseSummary{
|
2018-08-14 05:17:36 +03:00
|
|
|
ChanPoint: c.cfg.chanState.FundingOutpoint,
|
|
|
|
ChainHash: c.cfg.chanState.ChainHash,
|
|
|
|
ClosingTXID: *spendEvent.SpenderTxHash,
|
|
|
|
CloseHeight: spendHeight,
|
|
|
|
RemotePub: c.cfg.chanState.IdentityPub,
|
|
|
|
Capacity: c.cfg.chanState.Capacity,
|
|
|
|
SettledBalance: settledBalance,
|
|
|
|
CloseType: channeldb.BreachClose,
|
|
|
|
IsPending: true,
|
|
|
|
ShortChanID: c.cfg.chanState.ShortChanID(),
|
|
|
|
RemoteCurrentRevocation: c.cfg.chanState.RemoteCurrentRevocation,
|
|
|
|
RemoteNextRevocation: c.cfg.chanState.RemoteNextRevocation,
|
|
|
|
LocalChanConfig: c.cfg.chanState.LocalChanCfg,
|
2018-01-19 00:54:52 +03:00
|
|
|
}
|
|
|
|
|
2018-11-20 17:09:45 +03:00
|
|
|
// Attempt to add a channel sync message to the close summary.
|
2019-09-11 12:15:57 +03:00
|
|
|
chanSync, err := c.cfg.chanState.ChanSyncMsg()
|
2018-11-20 17:09:45 +03:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("ChannelPoint(%v): unable to create channel sync "+
|
|
|
|
"message: %v", c.cfg.chanState.FundingOutpoint, err)
|
|
|
|
} else {
|
|
|
|
closeSummary.LastChanSyncMsg = chanSync
|
|
|
|
}
|
|
|
|
|
2020-02-21 14:49:12 +03:00
|
|
|
if err := c.cfg.chanState.CloseChannel(
|
|
|
|
&closeSummary, channeldb.ChanStatusRemoteCloseInitiator,
|
|
|
|
); err != nil {
|
2018-04-16 16:18:38 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-19 00:54:52 +03:00
|
|
|
log.Infof("Breached channel=%v marked pending-closed",
|
2018-04-19 14:05:05 +03:00
|
|
|
c.cfg.chanState.FundingOutpoint)
|
2018-01-19 00:54:52 +03:00
|
|
|
|
2018-04-16 16:18:38 +03:00
|
|
|
return nil
|
2018-01-19 00:54:52 +03:00
|
|
|
}
|
2019-08-01 06:22:55 +03:00
|
|
|
|
|
|
|
// waitForCommitmentPoint waits for the commitment point to be inserted into
|
|
|
|
// the local database. We'll use this method in the DLP case, to wait for the
|
|
|
|
// remote party to send us their point, as we can't proceed until we have that.
|
|
|
|
func (c *chainWatcher) waitForCommitmentPoint() *btcec.PublicKey {
|
|
|
|
// If we are lucky, the remote peer sent us the correct commitment
|
|
|
|
// point during channel sync, such that we can sweep our funds. If we
|
|
|
|
// cannot find the commit point, there's not much we can do other than
|
|
|
|
// wait for us to retrieve it. We will attempt to retrieve it from the
|
|
|
|
// peer each time we connect to it.
|
|
|
|
//
|
|
|
|
// TODO(halseth): actively initiate re-connection to the peer?
|
|
|
|
backoff := minCommitPointPollTimeout
|
|
|
|
for {
|
|
|
|
commitPoint, err := c.cfg.chanState.DataLossCommitPoint()
|
|
|
|
if err == nil {
|
|
|
|
return commitPoint
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Errorf("Unable to retrieve commitment point for "+
|
|
|
|
"channel(%v) with lost state: %v. Retrying in %v.",
|
|
|
|
c.cfg.chanState.FundingOutpoint, err, backoff)
|
|
|
|
|
|
|
|
select {
|
|
|
|
// Wait before retrying, with an exponential backoff.
|
|
|
|
case <-time.After(backoff):
|
|
|
|
backoff = 2 * backoff
|
|
|
|
if backoff > maxCommitPointPollTimeout {
|
|
|
|
backoff = maxCommitPointPollTimeout
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-c.quit:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|