Merge pull request #4780 from halseth/chainwatcher-force-close-future-state
chainwatcher: properly derive to_local script for "future" local force closes
This commit is contained in:
commit
daf7c8a854
@ -163,9 +163,9 @@ var (
|
||||
// channel.
|
||||
ErrChanBorked = fmt.Errorf("cannot mutate borked channel")
|
||||
|
||||
// errLogEntryNotFound is returned when we cannot find a log entry at
|
||||
// ErrLogEntryNotFound is returned when we cannot find a log entry at
|
||||
// the height requested in the revocation log.
|
||||
errLogEntryNotFound = fmt.Errorf("log entry not found")
|
||||
ErrLogEntryNotFound = fmt.Errorf("log entry not found")
|
||||
|
||||
// errHeightNotFound is returned when a query for channel balances at
|
||||
// a height that we have not reached yet is made.
|
||||
@ -3469,7 +3469,7 @@ func fetchChannelLogEntry(log kvdb.RBucket,
|
||||
logEntrykey := makeLogKey(updateNum)
|
||||
commitBytes := log.Get(logEntrykey[:])
|
||||
if commitBytes == nil {
|
||||
return ChannelCommitment{}, errLogEntryNotFound
|
||||
return ChannelCommitment{}, ErrLogEntryNotFound
|
||||
}
|
||||
|
||||
commitReader := bytes.NewReader(commitBytes)
|
||||
|
@ -1485,7 +1485,7 @@ func TestBalanceAtHeight(t *testing.T) {
|
||||
targetHeight: unknownHeight,
|
||||
expectedLocalBalance: 0,
|
||||
expectedRemoteBalance: 0,
|
||||
expectedError: errLogEntryNotFound,
|
||||
expectedError: ErrLogEntryNotFound,
|
||||
},
|
||||
{
|
||||
name: "height not reached",
|
||||
|
@ -17,7 +17,6 @@ import (
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/input"
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
"github.com/lightningnetwork/lnd/shachain"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -324,18 +323,27 @@ func (c *chainWatcher) SubscribeChannelEvents() *ChainEventSubscription {
|
||||
return sub
|
||||
}
|
||||
|
||||
// isOurCommitment returns true if the passed commitSpend is a spend of the
|
||||
// funding transaction using our commitment transaction (a local force close).
|
||||
// In order to do this in a state agnostic manner, we'll make our decisions
|
||||
// based off of only the set of outputs included.
|
||||
func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig,
|
||||
// handleUnknownLocalState checks whether the passed spend _could_ be a local
|
||||
// state that for some reason is unknown to us. This could be a state published
|
||||
// by us before we lost state, which we will try to sweep. Or it could be one
|
||||
// of our revoked states that somehow made it to the chain. If that's the case
|
||||
// we cannot really hope that we'll be able to get our money back, but we'll
|
||||
// try to sweep it anyway. If this is not an unknown local state, false is
|
||||
// returned.
|
||||
func (c *chainWatcher) handleUnknownLocalState(
|
||||
commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64,
|
||||
revocationProducer shachain.Producer,
|
||||
chanType channeldb.ChannelType) (bool, error) {
|
||||
chainSet *chainSet) (bool, error) {
|
||||
|
||||
// If the spend was a local commitment, at this point it must either be
|
||||
// a past state (we breached!) or a future state (we lost state!). In
|
||||
// either case, the only thing we can do is to attempt to sweep what is
|
||||
// there.
|
||||
|
||||
// First, we'll re-derive our commitment point for this state since
|
||||
// this is what we use to randomize each of the keys for this state.
|
||||
commitSecret, err := revocationProducer.AtIndex(broadcastStateNum)
|
||||
commitSecret, err := c.cfg.chanState.RevocationProducer.AtIndex(
|
||||
broadcastStateNum,
|
||||
)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -345,13 +353,14 @@ func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig,
|
||||
// and remote keys for this state. We use our point as only we can
|
||||
// revoke our own commitment.
|
||||
commitKeyRing := lnwallet.DeriveCommitmentKeys(
|
||||
commitPoint, true, chanType, &localChanCfg, &remoteChanCfg,
|
||||
commitPoint, true, c.cfg.chanState.ChanType,
|
||||
&c.cfg.chanState.LocalChanCfg, &c.cfg.chanState.RemoteChanCfg,
|
||||
)
|
||||
|
||||
// With the keys derived, we'll construct the remote script that'll be
|
||||
// present if they have a non-dust balance on the commitment.
|
||||
remoteScript, _, err := lnwallet.CommitScriptToRemote(
|
||||
chanType, commitKeyRing.ToRemoteKey,
|
||||
c.cfg.chanState.ChanType, commitKeyRing.ToRemoteKey,
|
||||
)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -361,12 +370,13 @@ func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig,
|
||||
// the remote party allowing them to claim this output before the CSV
|
||||
// delay if we breach.
|
||||
localScript, err := input.CommitScriptToSelf(
|
||||
uint32(localChanCfg.CsvDelay), commitKeyRing.ToLocalKey,
|
||||
commitKeyRing.RevocationKey,
|
||||
uint32(c.cfg.chanState.LocalChanCfg.CsvDelay),
|
||||
commitKeyRing.ToLocalKey, commitKeyRing.RevocationKey,
|
||||
)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
localPkScript, err := input.WitnessScriptHash(localScript)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -375,21 +385,40 @@ func isOurCommitment(localChanCfg, remoteChanCfg channeldb.ChannelConfig,
|
||||
// With all our scripts assembled, we'll examine the outputs of the
|
||||
// commitment transaction to determine if this is a local force close
|
||||
// or not.
|
||||
ourCommit := false
|
||||
for _, output := range commitSpend.SpendingTx.TxOut {
|
||||
pkScript := output.PkScript
|
||||
|
||||
switch {
|
||||
case bytes.Equal(localPkScript, pkScript):
|
||||
return true, nil
|
||||
ourCommit = true
|
||||
|
||||
case bytes.Equal(remoteScript.PkScript, pkScript):
|
||||
return true, nil
|
||||
ourCommit = true
|
||||
}
|
||||
}
|
||||
|
||||
// If neither of these scripts are present, then it isn't a local force
|
||||
// close.
|
||||
return false, nil
|
||||
// If the script is not present, this cannot be our commit.
|
||||
if !ourCommit {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Warnf("Detected local unilateral close of unknown state %v "+
|
||||
"(our state=%v)", broadcastStateNum,
|
||||
chainSet.localCommit.CommitHeight)
|
||||
|
||||
// If this is our commitment transaction, then we try to act even
|
||||
// though we won't be able to sweep HTLCs.
|
||||
chainSet.commitSet.ConfCommitKey = &LocalHtlcSet
|
||||
if err := c.dispatchLocalForceClose(
|
||||
commitSpend, broadcastStateNum, chainSet.commitSet,
|
||||
); err != nil {
|
||||
return false, fmt.Errorf("unable to handle local"+
|
||||
"close for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// chainSet includes all the information we need to dispatch a channel close
|
||||
@ -533,36 +562,34 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
||||
commitTxBroadcast, obfuscator,
|
||||
)
|
||||
|
||||
// Based on the output scripts within this commitment, we'll
|
||||
// determine if this is our commitment transaction or not (a
|
||||
// self force close).
|
||||
isOurCommit, err := isOurCommitment(
|
||||
c.cfg.chanState.LocalChanCfg,
|
||||
c.cfg.chanState.RemoteChanCfg, commitSpend,
|
||||
broadcastStateNum, c.cfg.chanState.RevocationProducer,
|
||||
c.cfg.chanState.ChanType,
|
||||
// We'll go on to check whether it could be our own commitment
|
||||
// that was published and know is confirmed.
|
||||
ok, err = c.handleKnownLocalState(
|
||||
commitSpend, broadcastStateNum, chainSet,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to determine self commit for "+
|
||||
"chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
log.Errorf("Unable to handle known local state: %v",
|
||||
err)
|
||||
return
|
||||
}
|
||||
|
||||
// If this is our commitment transaction, then we can exit here
|
||||
// as we don't have any further processing we need to do (we
|
||||
// can't cheat ourselves :p).
|
||||
if isOurCommit {
|
||||
chainSet.commitSet.ConfCommitKey = &LocalHtlcSet
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.dispatchLocalForceClose(
|
||||
commitSpend, chainSet.localCommit,
|
||||
chainSet.commitSet,
|
||||
); err != nil {
|
||||
log.Errorf("unable to handle local"+
|
||||
"close for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
// Now that we know it is neither a non-cooperative closure nor
|
||||
// a local close with the latest state, we check if it is the
|
||||
// remote that closed with any prior or current state.
|
||||
ok, err = c.handleKnownRemoteState(
|
||||
commitSpend, broadcastStateNum, chainSet,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to handle known remote state: %v",
|
||||
err)
|
||||
return
|
||||
}
|
||||
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
@ -581,141 +608,45 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
||||
return
|
||||
}
|
||||
|
||||
log.Warnf("Unprompted commitment broadcast for "+
|
||||
log.Warnf("Unknown commitment broadcast for "+
|
||||
"ChannelPoint(%v) ", c.cfg.chanState.FundingOutpoint)
|
||||
|
||||
// If this channel has been recovered, then we'll modify our
|
||||
// behavior as it isn't possible for us to close out the
|
||||
// channel off-chain ourselves. It can only be the remote party
|
||||
// force closing, or a cooperative closure we signed off on
|
||||
// before losing data getting confirmed in the chain.
|
||||
isRecoveredChan := c.cfg.chanState.HasChanStatus(
|
||||
channeldb.ChanStatusRestored,
|
||||
// We'll try to recover as best as possible from losing state.
|
||||
// We first check if this was a local unknown state. This could
|
||||
// happen if we force close, then lose state or attempt
|
||||
// recovery before the commitment confirms.
|
||||
ok, err = c.handleUnknownLocalState(
|
||||
commitSpend, broadcastStateNum, chainSet,
|
||||
)
|
||||
|
||||
switch {
|
||||
// If state number spending transaction matches the current
|
||||
// latest state, then they've initiated a unilateral close. So
|
||||
// we'll trigger the unilateral close signal so subscribers can
|
||||
// clean up the state as necessary.
|
||||
case broadcastStateNum == chainSet.remoteStateNum &&
|
||||
!isRecoveredChan:
|
||||
|
||||
log.Infof("Remote party broadcast base set, "+
|
||||
"commit_num=%v", chainSet.remoteStateNum)
|
||||
|
||||
chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet
|
||||
err := c.dispatchRemoteForceClose(
|
||||
commitSpend, chainSet.remoteCommit,
|
||||
chainSet.commitSet,
|
||||
c.cfg.chanState.RemoteCurrentRevocation,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to handle remote "+
|
||||
"close for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
|
||||
// We'll also handle the case of the remote party broadcasting
|
||||
// their commitment transaction which is one height above ours.
|
||||
// This case can arise when we initiate a state transition, but
|
||||
// the remote party has a fail crash _after_ accepting the new
|
||||
// state, but _before_ sending their signature to us.
|
||||
case broadcastStateNum == chainSet.remoteStateNum+1 &&
|
||||
chainSet.remotePendingCommit != nil && !isRecoveredChan:
|
||||
|
||||
log.Infof("Remote party broadcast pending set, "+
|
||||
"commit_num=%v", chainSet.remoteStateNum+1)
|
||||
|
||||
chainSet.commitSet.ConfCommitKey = &RemotePendingHtlcSet
|
||||
err := c.dispatchRemoteForceClose(
|
||||
commitSpend, *chainSet.remotePendingCommit,
|
||||
chainSet.commitSet,
|
||||
c.cfg.chanState.RemoteNextRevocation,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to handle remote "+
|
||||
"close for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
|
||||
// If the remote party has broadcasted a state beyond our best
|
||||
// known state for them, and they don't have a pending
|
||||
// commitment (we write them to disk before sending out), then
|
||||
// this means that we've lost data. In this case, we'll enter
|
||||
// the DLP protocol. Otherwise, if we've recovered our channel
|
||||
// state from scratch, then we don't know what the precise
|
||||
// current state is, so we assume either the remote party
|
||||
// forced closed or we've been breached. In the latter case,
|
||||
// our tower will take care of us.
|
||||
case broadcastStateNum > chainSet.remoteStateNum || isRecoveredChan:
|
||||
log.Warnf("Remote node broadcast state #%v, "+
|
||||
"which is more than 1 beyond best known "+
|
||||
"state #%v!!! Attempting recovery...",
|
||||
broadcastStateNum, chainSet.remoteStateNum)
|
||||
|
||||
// If this isn't a tweakless commitment, then we'll
|
||||
// need to wait for the remote party's latest unrevoked
|
||||
// commitment point to be presented to us as we need
|
||||
// this to sweep. Otherwise, we can dispatch the remote
|
||||
// close and sweep immediately using a fake commitPoint
|
||||
// as it isn't actually needed for recovery anymore.
|
||||
commitPoint := c.cfg.chanState.RemoteCurrentRevocation
|
||||
tweaklessCommit := c.cfg.chanState.ChanType.IsTweakless()
|
||||
if !tweaklessCommit {
|
||||
commitPoint = c.waitForCommitmentPoint()
|
||||
if commitPoint == nil {
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Recovered commit point(%x) for "+
|
||||
"channel(%v)! Now attempting to use it to "+
|
||||
"sweep our funds...",
|
||||
commitPoint.SerializeCompressed(),
|
||||
c.cfg.chanState.FundingOutpoint)
|
||||
|
||||
} else {
|
||||
log.Infof("ChannelPoint(%v) is tweakless, "+
|
||||
"moving to sweep directly on chain",
|
||||
c.cfg.chanState.FundingOutpoint)
|
||||
}
|
||||
|
||||
// Since we don't have the commitment stored for this
|
||||
// state, we'll just pass an empty commitment within
|
||||
// the commitment set. Note that this means we won't be
|
||||
// able to recover any HTLC funds.
|
||||
//
|
||||
// TODO(halseth): can we try to recover some HTLCs?
|
||||
chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet
|
||||
err = c.dispatchRemoteForceClose(
|
||||
commitSpend, channeldb.ChannelCommitment{},
|
||||
chainSet.commitSet, commitPoint,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to handle remote "+
|
||||
"close for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
|
||||
// If the state number broadcast is lower than the remote
|
||||
// node's current un-revoked height, then THEY'RE ATTEMPTING TO
|
||||
// VIOLATE THE CONTRACT LAID OUT WITHIN THE PAYMENT CHANNEL.
|
||||
// Therefore we close the signal indicating a revoked broadcast
|
||||
// to allow subscribers to swiftly dispatch justice!!!
|
||||
case broadcastStateNum < chainSet.remoteStateNum:
|
||||
err := c.dispatchContractBreach(
|
||||
commitSpend, &chainSet.remoteCommit,
|
||||
broadcastStateNum,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to handle channel "+
|
||||
"breach for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("Unable to handle known local state: %v",
|
||||
err)
|
||||
return
|
||||
}
|
||||
|
||||
// Now that a spend has been detected, we've done our job, so
|
||||
// we'll exit immediately.
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
// Since it was neither a known remote state, nor a local state
|
||||
// that was published, it most likely mean we lost state and
|
||||
// the remote node closed. In this case we must start the DLP
|
||||
// protocol in hope of getting our money back.
|
||||
ok, err = c.handleUnknownRemoteState(
|
||||
commitSpend, broadcastStateNum, chainSet,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to handle unknown remote state: %v",
|
||||
err)
|
||||
return
|
||||
}
|
||||
|
||||
if ok {
|
||||
return
|
||||
}
|
||||
|
||||
log.Warnf("Unable to handle spending tx %v of channel point %v",
|
||||
commitTxBroadcast.TxHash(), c.cfg.chanState.FundingOutpoint)
|
||||
return
|
||||
|
||||
// The chainWatcher has been signalled to exit, so we'll do so now.
|
||||
@ -724,6 +655,206 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
||||
}
|
||||
}
|
||||
|
||||
// handleKnownLocalState checks whether the passed spend is a local state that
|
||||
// is known to us (the current state). If so we will act on this state using
|
||||
// the passed chainSet. If this is not a known local state, false is returned.
|
||||
func (c *chainWatcher) handleKnownLocalState(
|
||||
commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64,
|
||||
chainSet *chainSet) (bool, error) {
|
||||
|
||||
// If the channel is recovered, we won't have a local commit to check
|
||||
// against, so immediately return.
|
||||
if c.cfg.chanState.HasChanStatus(channeldb.ChanStatusRestored) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
commitTxBroadcast := commitSpend.SpendingTx
|
||||
commitHash := commitTxBroadcast.TxHash()
|
||||
|
||||
// Check whether our latest local state hit the chain.
|
||||
if chainSet.localCommit.CommitTx.TxHash() != commitHash {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
chainSet.commitSet.ConfCommitKey = &LocalHtlcSet
|
||||
if err := c.dispatchLocalForceClose(
|
||||
commitSpend, broadcastStateNum, chainSet.commitSet,
|
||||
); err != nil {
|
||||
return false, fmt.Errorf("unable to handle local"+
|
||||
"close for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// handleKnownRemoteState checks whether the passed spend is a remote state
|
||||
// that is known to us (a revoked, current or pending state). If so we will act
|
||||
// on this state using the passed chainSet. If this is not a known remote
|
||||
// state, false is returned.
|
||||
func (c *chainWatcher) handleKnownRemoteState(
|
||||
commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64,
|
||||
chainSet *chainSet) (bool, error) {
|
||||
|
||||
// If the channel is recovered, we won't have any remote commit to
|
||||
// check against, so imemdiately return.
|
||||
if c.cfg.chanState.HasChanStatus(channeldb.ChanStatusRestored) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
commitTxBroadcast := commitSpend.SpendingTx
|
||||
commitHash := commitTxBroadcast.TxHash()
|
||||
spendHeight := uint32(commitSpend.SpendingHeight)
|
||||
|
||||
switch {
|
||||
// If the spending transaction matches the current latest state, then
|
||||
// they've initiated a unilateral close. So we'll trigger the
|
||||
// unilateral close signal so subscribers can clean up the state as
|
||||
// necessary.
|
||||
case chainSet.remoteCommit.CommitTx.TxHash() == commitHash:
|
||||
log.Infof("Remote party broadcast base set, "+
|
||||
"commit_num=%v", chainSet.remoteStateNum)
|
||||
|
||||
chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet
|
||||
err := c.dispatchRemoteForceClose(
|
||||
commitSpend, chainSet.remoteCommit,
|
||||
chainSet.commitSet,
|
||||
c.cfg.chanState.RemoteCurrentRevocation,
|
||||
)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("unable to handle remote "+
|
||||
"close for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
|
||||
// We'll also handle the case of the remote party broadcasting
|
||||
// their commitment transaction which is one height above ours.
|
||||
// This case can arise when we initiate a state transition, but
|
||||
// the remote party has a fail crash _after_ accepting the new
|
||||
// state, but _before_ sending their signature to us.
|
||||
case chainSet.remotePendingCommit != nil &&
|
||||
chainSet.remotePendingCommit.CommitTx.TxHash() == commitHash:
|
||||
|
||||
log.Infof("Remote party broadcast pending set, "+
|
||||
"commit_num=%v", chainSet.remoteStateNum+1)
|
||||
|
||||
chainSet.commitSet.ConfCommitKey = &RemotePendingHtlcSet
|
||||
err := c.dispatchRemoteForceClose(
|
||||
commitSpend, *chainSet.remotePendingCommit,
|
||||
chainSet.commitSet,
|
||||
c.cfg.chanState.RemoteNextRevocation,
|
||||
)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("unable to handle remote "+
|
||||
"close for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// We check if we have a revoked state at this state num that matches
|
||||
// the spend transaction.
|
||||
retribution, err := lnwallet.NewBreachRetribution(
|
||||
c.cfg.chanState, broadcastStateNum, spendHeight,
|
||||
)
|
||||
|
||||
switch {
|
||||
|
||||
// If we had no log entry at this height, this was not a revoked state.
|
||||
case err == channeldb.ErrLogEntryNotFound:
|
||||
return false, nil
|
||||
case err == channeldb.ErrNoPastDeltas:
|
||||
return false, nil
|
||||
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("unable to create breach "+
|
||||
"retribution: %v", err)
|
||||
}
|
||||
|
||||
// We found a revoked state at this height, but it could still be our
|
||||
// own broadcasted state we are looking at. Therefore check that the
|
||||
// commit matches before assuming it was a breach.
|
||||
if retribution.BreachTransaction.TxHash() != commitHash {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// THEY'RE ATTEMPTING TO VIOLATE THE CONTRACT LAID OUT WITHIN THE
|
||||
// PAYMENT CHANNEL. Therefore we close the signal indicating a revoked
|
||||
// broadcast to allow subscribers to swiftly dispatch justice!!!
|
||||
err = c.dispatchContractBreach(
|
||||
commitSpend, &chainSet.remoteCommit,
|
||||
broadcastStateNum, retribution,
|
||||
)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("unable to handle channel "+
|
||||
"breach for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// handleUnknownRemoteState is the last attempt we make at reclaiming funds
|
||||
// from the closed channel, by checkin whether the passed spend _could_ be a
|
||||
// remote spend that is unknown to us (we lost state). We will try to initiate
|
||||
// Data Loss Protection in order to restore our commit point and reclaim our
|
||||
// funds from the channel. If we are not able to act on it, false is returned.
|
||||
func (c *chainWatcher) handleUnknownRemoteState(
|
||||
commitSpend *chainntnfs.SpendDetail, broadcastStateNum uint64,
|
||||
chainSet *chainSet) (bool, error) {
|
||||
|
||||
log.Warnf("Remote node broadcast state #%v, "+
|
||||
"which is more than 1 beyond best known "+
|
||||
"state #%v!!! Attempting recovery...",
|
||||
broadcastStateNum, chainSet.remoteStateNum)
|
||||
|
||||
// If this isn't a tweakless commitment, then we'll need to wait for
|
||||
// the remote party's latest unrevoked commitment point to be presented
|
||||
// to us as we need this to sweep. Otherwise, we can dispatch the
|
||||
// remote close and sweep immediately using a fake commitPoint as it
|
||||
// isn't actually needed for recovery anymore.
|
||||
commitPoint := c.cfg.chanState.RemoteCurrentRevocation
|
||||
tweaklessCommit := c.cfg.chanState.ChanType.IsTweakless()
|
||||
if !tweaklessCommit {
|
||||
commitPoint = c.waitForCommitmentPoint()
|
||||
if commitPoint == nil {
|
||||
return false, fmt.Errorf("unable to get commit point")
|
||||
}
|
||||
|
||||
log.Infof("Recovered commit point(%x) for "+
|
||||
"channel(%v)! Now attempting to use it to "+
|
||||
"sweep our funds...",
|
||||
commitPoint.SerializeCompressed(),
|
||||
c.cfg.chanState.FundingOutpoint)
|
||||
|
||||
} else {
|
||||
log.Infof("ChannelPoint(%v) is tweakless, "+
|
||||
"moving to sweep directly on chain",
|
||||
c.cfg.chanState.FundingOutpoint)
|
||||
}
|
||||
|
||||
// Since we don't have the commitment stored for this state, we'll just
|
||||
// pass an empty commitment within the commitment set. Note that this
|
||||
// means we won't be able to recover any HTLC funds.
|
||||
//
|
||||
// TODO(halseth): can we try to recover some HTLCs?
|
||||
chainSet.commitSet.ConfCommitKey = &RemoteHtlcSet
|
||||
err := c.dispatchRemoteForceClose(
|
||||
commitSpend, channeldb.ChannelCommitment{},
|
||||
chainSet.commitSet, commitPoint,
|
||||
)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("unable to handle remote "+
|
||||
"close for chan_point=%v: %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// toSelfAmount takes a transaction and returns the sum of all outputs that pay
|
||||
// to a script that the wallet controls. If no outputs pay to us, then we
|
||||
// return zero. This is possible as our output may have been trimmed due to
|
||||
@ -817,14 +948,14 @@ func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDet
|
||||
// dispatchLocalForceClose processes a unilateral close by us being confirmed.
|
||||
func (c *chainWatcher) dispatchLocalForceClose(
|
||||
commitSpend *chainntnfs.SpendDetail,
|
||||
localCommit channeldb.ChannelCommitment, commitSet CommitSet) error {
|
||||
stateNum uint64, commitSet CommitSet) error {
|
||||
|
||||
log.Infof("Local unilateral close of ChannelPoint(%v) "+
|
||||
"detected", c.cfg.chanState.FundingOutpoint)
|
||||
|
||||
forceClose, err := lnwallet.NewLocalForceCloseSummary(
|
||||
c.cfg.chanState, c.cfg.signer,
|
||||
commitSpend.SpendingTx, localCommit,
|
||||
commitSpend.SpendingTx, stateNum,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -948,8 +1079,8 @@ func (c *chainWatcher) dispatchRemoteForceClose(
|
||||
// materials required to bring the cheater to justice, then notify all
|
||||
// registered subscribers of this event.
|
||||
func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail,
|
||||
remoteCommit *channeldb.ChannelCommitment,
|
||||
broadcastStateNum uint64) error {
|
||||
remoteCommit *channeldb.ChannelCommitment, broadcastStateNum uint64,
|
||||
retribution *lnwallet.BreachRetribution) error {
|
||||
|
||||
log.Warnf("Remote peer has breached the channel contract for "+
|
||||
"ChannelPoint(%v). Revoked state #%v was broadcast!!!",
|
||||
@ -961,17 +1092,6 @@ func (c *chainWatcher) dispatchContractBreach(spendEvent *chainntnfs.SpendDetail
|
||||
|
||||
spendHeight := uint32(spendEvent.SpendingHeight)
|
||||
|
||||
// Create a new reach retribution struct which contains all the data
|
||||
// needed to swiftly bring the cheating peer to justice.
|
||||
//
|
||||
// TODO(roasbeef): move to same package
|
||||
retribution, err := lnwallet.NewBreachRetribution(
|
||||
c.cfg.chanState, broadcastStateNum, spendHeight,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create breach retribution: %v", err)
|
||||
}
|
||||
|
||||
// Nil the curve before printing.
|
||||
if retribution.RemoteOutputSignDesc != nil &&
|
||||
retribution.RemoteOutputSignDesc.DoubleTweak != nil {
|
||||
|
@ -204,9 +204,32 @@ type dlpTestCase struct {
|
||||
NumUpdates uint8
|
||||
}
|
||||
|
||||
// executeStateTransitions execute the given number of state transitions.
|
||||
// Copies of Alice's channel state before each transition (including initial
|
||||
// state) are returned.
|
||||
func executeStateTransitions(t *testing.T, htlcAmount lnwire.MilliSatoshi,
|
||||
aliceChannel, bobChannel *lnwallet.LightningChannel,
|
||||
numUpdates uint8) error {
|
||||
numUpdates uint8) ([]*channeldb.OpenChannel, func(), error) {
|
||||
|
||||
// We'll make a copy of the channel state before each transition.
|
||||
var (
|
||||
chanStates []*channeldb.OpenChannel
|
||||
cleanupFuncs []func()
|
||||
)
|
||||
|
||||
cleanAll := func() {
|
||||
for _, f := range cleanupFuncs {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
state, f, err := copyChannelState(aliceChannel.State())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
chanStates = append(chanStates, state)
|
||||
cleanupFuncs = append(cleanupFuncs, f)
|
||||
|
||||
for i := 0; i < int(numUpdates); i++ {
|
||||
addFakeHTLC(
|
||||
@ -215,11 +238,21 @@ func executeStateTransitions(t *testing.T, htlcAmount lnwire.MilliSatoshi,
|
||||
|
||||
err := lnwallet.ForceStateTransition(aliceChannel, bobChannel)
|
||||
if err != nil {
|
||||
return err
|
||||
cleanAll()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
state, f, err := copyChannelState(aliceChannel.State())
|
||||
if err != nil {
|
||||
cleanAll()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
chanStates = append(chanStates, state)
|
||||
cleanupFuncs = append(cleanupFuncs, f)
|
||||
}
|
||||
|
||||
return nil
|
||||
return chanStates, cleanAll, nil
|
||||
}
|
||||
|
||||
// TestChainWatcherDataLossProtect tests that if we've lost data (and are
|
||||
@ -250,6 +283,24 @@ func TestChainWatcherDataLossProtect(t *testing.T) {
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
// Based on the number of random updates for this state, make a
|
||||
// new HTLC to add to the commitment, and then lock in a state
|
||||
// transition.
|
||||
const htlcAmt = 1000
|
||||
states, cleanStates, err := executeStateTransitions(
|
||||
t, htlcAmt, aliceChannel, bobChannel,
|
||||
testCase.BroadcastStateNum,
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("unable to trigger state "+
|
||||
"transition: %v", err)
|
||||
return false
|
||||
}
|
||||
defer cleanStates()
|
||||
|
||||
// We'll use the state this test case wants Alice to start at.
|
||||
aliceChanState := states[testCase.NumUpdates]
|
||||
|
||||
// With the channels created, we'll now create a chain watcher
|
||||
// instance which will be watching for any closes of Alice's
|
||||
// channel.
|
||||
@ -259,7 +310,7 @@ func TestChainWatcherDataLossProtect(t *testing.T) {
|
||||
ConfChan: make(chan *chainntnfs.TxConfirmation),
|
||||
}
|
||||
aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
|
||||
chanState: aliceChannel.State(),
|
||||
chanState: aliceChanState,
|
||||
notifier: aliceNotifier,
|
||||
signer: aliceChannel.Signer,
|
||||
extractStateNumHint: func(*wire.MsgTx,
|
||||
@ -279,19 +330,6 @@ func TestChainWatcherDataLossProtect(t *testing.T) {
|
||||
}
|
||||
defer aliceChainWatcher.Stop()
|
||||
|
||||
// Based on the number of random updates for this state, make a
|
||||
// new HTLC to add to the commitment, and then lock in a state
|
||||
// transition.
|
||||
const htlcAmt = 1000
|
||||
err = executeStateTransitions(
|
||||
t, htlcAmt, aliceChannel, bobChannel, testCase.NumUpdates,
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("unable to trigger state "+
|
||||
"transition: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// We'll request a new channel event subscription from Alice's
|
||||
// chain watcher so we can be notified of our fake close below.
|
||||
chanEvents := aliceChainWatcher.SubscribeChannelEvents()
|
||||
@ -299,7 +337,7 @@ func TestChainWatcherDataLossProtect(t *testing.T) {
|
||||
// Otherwise, we'll feed in this new state number as a response
|
||||
// to the query, and insert the expected DLP commit point.
|
||||
dlpPoint := aliceChannel.State().RemoteCurrentRevocation
|
||||
err = aliceChannel.State().MarkDataLoss(dlpPoint)
|
||||
err = aliceChanState.MarkDataLoss(dlpPoint)
|
||||
if err != nil {
|
||||
t.Errorf("unable to insert dlp point: %v", err)
|
||||
return false
|
||||
@ -408,7 +446,7 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
|
||||
// table driven tests. We'll assert that for any number of state
|
||||
// updates, and if the commitment transaction has our output or not,
|
||||
// we're able to properly detect a local force close.
|
||||
localForceCloseScenario := func(t *testing.T, numUpdates uint8,
|
||||
localForceCloseScenario := func(t *testing.T, numUpdates, localState uint8,
|
||||
remoteOutputOnly, localOutputOnly bool) bool {
|
||||
|
||||
// First, we'll create two channels which already have
|
||||
@ -421,6 +459,24 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
// We'll execute a number of state transitions based on the
|
||||
// randomly selected number from testing/quick. We do this to
|
||||
// get more coverage of various state hint encodings beyond 0
|
||||
// and 1.
|
||||
const htlcAmt = 1000
|
||||
states, cleanStates, err := executeStateTransitions(
|
||||
t, htlcAmt, aliceChannel, bobChannel, numUpdates,
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("unable to trigger state "+
|
||||
"transition: %v", err)
|
||||
return false
|
||||
}
|
||||
defer cleanStates()
|
||||
|
||||
// We'll use the state this test case wants Alice to start at.
|
||||
aliceChanState := states[localState]
|
||||
|
||||
// With the channels created, we'll now create a chain watcher
|
||||
// instance which will be watching for any closes of Alice's
|
||||
// channel.
|
||||
@ -430,7 +486,7 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
|
||||
ConfChan: make(chan *chainntnfs.TxConfirmation),
|
||||
}
|
||||
aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{
|
||||
chanState: aliceChannel.State(),
|
||||
chanState: aliceChanState,
|
||||
notifier: aliceNotifier,
|
||||
signer: aliceChannel.Signer,
|
||||
extractStateNumHint: lnwallet.GetStateNumHint,
|
||||
@ -443,20 +499,6 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
|
||||
}
|
||||
defer aliceChainWatcher.Stop()
|
||||
|
||||
// We'll execute a number of state transitions based on the
|
||||
// randomly selected number from testing/quick. We do this to
|
||||
// get more coverage of various state hint encodings beyond 0
|
||||
// and 1.
|
||||
const htlcAmt = 1000
|
||||
err = executeStateTransitions(
|
||||
t, htlcAmt, aliceChannel, bobChannel, numUpdates,
|
||||
)
|
||||
if err != nil {
|
||||
t.Errorf("unable to trigger state "+
|
||||
"transition: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// We'll request a new channel event subscription from Alice's
|
||||
// chain watcher so we can be notified of our fake close below.
|
||||
chanEvents := aliceChainWatcher.SubscribeChannelEvents()
|
||||
@ -488,7 +530,19 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
|
||||
// should be able to detect the close based on the commitment
|
||||
// outputs.
|
||||
select {
|
||||
case <-chanEvents.LocalUnilateralClosure:
|
||||
case summary := <-chanEvents.LocalUnilateralClosure:
|
||||
// Make sure we correctly extracted the commit
|
||||
// resolution if we had a local output.
|
||||
if remoteOutputOnly {
|
||||
if summary.CommitResolution != nil {
|
||||
t.Fatalf("expected no commit resolution")
|
||||
}
|
||||
} else {
|
||||
if summary.CommitResolution == nil {
|
||||
t.Fatalf("expected commit resolution")
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
|
||||
case <-time.After(time.Second * 5):
|
||||
@ -502,31 +556,53 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
|
||||
// present and absent with non or some number of updates in the channel.
|
||||
testCases := []struct {
|
||||
numUpdates uint8
|
||||
localState uint8
|
||||
remoteOutputOnly bool
|
||||
localOutputOnly bool
|
||||
}{
|
||||
{
|
||||
numUpdates: 0,
|
||||
localState: 0,
|
||||
remoteOutputOnly: true,
|
||||
},
|
||||
{
|
||||
numUpdates: 0,
|
||||
localState: 0,
|
||||
remoteOutputOnly: false,
|
||||
},
|
||||
{
|
||||
numUpdates: 0,
|
||||
localState: 0,
|
||||
localOutputOnly: true,
|
||||
},
|
||||
{
|
||||
numUpdates: 20,
|
||||
localState: 20,
|
||||
remoteOutputOnly: false,
|
||||
},
|
||||
{
|
||||
numUpdates: 20,
|
||||
localState: 20,
|
||||
remoteOutputOnly: true,
|
||||
},
|
||||
{
|
||||
numUpdates: 20,
|
||||
localState: 20,
|
||||
localOutputOnly: true,
|
||||
},
|
||||
{
|
||||
numUpdates: 20,
|
||||
localState: 5,
|
||||
remoteOutputOnly: false,
|
||||
},
|
||||
{
|
||||
numUpdates: 20,
|
||||
localState: 5,
|
||||
remoteOutputOnly: true,
|
||||
},
|
||||
{
|
||||
numUpdates: 20,
|
||||
localState: 5,
|
||||
localOutputOnly: true,
|
||||
},
|
||||
}
|
||||
@ -542,7 +618,8 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
localForceCloseScenario(
|
||||
t, testCase.numUpdates, testCase.remoteOutputOnly,
|
||||
t, testCase.numUpdates, testCase.localState,
|
||||
testCase.remoteOutputOnly,
|
||||
testCase.localOutputOnly,
|
||||
)
|
||||
})
|
||||
|
@ -1,10 +1,16 @@
|
||||
package contractcourt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime/pprof"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
)
|
||||
|
||||
// timeout implements a test level timeout.
|
||||
@ -24,3 +30,69 @@ func timeout(t *testing.T) func() {
|
||||
close(done)
|
||||
}
|
||||
}
|
||||
|
||||
func copyFile(dest, src string) error {
|
||||
s, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
d, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(d, s); err != nil {
|
||||
d.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
return d.Close()
|
||||
}
|
||||
|
||||
// copyChannelState copies the OpenChannel state by copying the database and
|
||||
// creating a new struct from it. The copied state and a cleanup function are
|
||||
// returned.
|
||||
func copyChannelState(state *channeldb.OpenChannel) (
|
||||
*channeldb.OpenChannel, func(), error) {
|
||||
|
||||
// Make a copy of the DB.
|
||||
dbFile := filepath.Join(state.Db.Path(), "channel.db")
|
||||
tempDbPath, err := ioutil.TempDir("", "past-state")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
os.RemoveAll(tempDbPath)
|
||||
}
|
||||
|
||||
tempDbFile := filepath.Join(tempDbPath, "channel.db")
|
||||
err = copyFile(tempDbFile, dbFile)
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
newDb, err := channeldb.Open(tempDbPath)
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
chans, err := newDb.FetchAllChannels()
|
||||
if err != nil {
|
||||
cleanup()
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// We only support DBs with a single channel, for now.
|
||||
if len(chans) != 1 {
|
||||
cleanup()
|
||||
return nil, nil, fmt.Errorf("found %d chans in the db",
|
||||
len(chans))
|
||||
}
|
||||
|
||||
return chans[0], cleanup, nil
|
||||
}
|
||||
|
@ -6038,7 +6038,7 @@ func (lc *LightningChannel) ForceClose() (*LocalForceCloseSummary, error) {
|
||||
localCommitment := lc.channelState.LocalCommitment
|
||||
summary, err := NewLocalForceCloseSummary(
|
||||
lc.channelState, lc.Signer, commitTx,
|
||||
localCommitment,
|
||||
localCommitment.CommitHeight,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -6054,8 +6054,8 @@ func (lc *LightningChannel) ForceClose() (*LocalForceCloseSummary, error) {
|
||||
// NewLocalForceCloseSummary generates a LocalForceCloseSummary from the given
|
||||
// channel state. The passed commitTx must be a fully signed commitment
|
||||
// transaction corresponding to localCommit.
|
||||
func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Signer,
|
||||
commitTx *wire.MsgTx, localCommit channeldb.ChannelCommitment) (
|
||||
func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel,
|
||||
signer input.Signer, commitTx *wire.MsgTx, stateNum uint64) (
|
||||
*LocalForceCloseSummary, error) {
|
||||
|
||||
// Re-derive the original pkScript for to-self output within the
|
||||
@ -6063,9 +6063,11 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si
|
||||
// output in the commitment transaction and potentially for creating
|
||||
// the sign descriptor.
|
||||
csvTimeout := uint32(chanState.LocalChanCfg.CsvDelay)
|
||||
revocation, err := chanState.RevocationProducer.AtIndex(
|
||||
localCommit.CommitHeight,
|
||||
)
|
||||
|
||||
// We use the passed state num to derive our scripts, since in case
|
||||
// this is after recovery, our latest channels state might not be up to
|
||||
// date.
|
||||
revocation, err := chanState.RevocationProducer.AtIndex(stateNum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -6090,8 +6092,8 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si
|
||||
// We'll return the details of this output to the caller so they can
|
||||
// sweep it once it's mature.
|
||||
var (
|
||||
delayIndex uint32
|
||||
delayScript []byte
|
||||
delayIndex uint32
|
||||
delayOut *wire.TxOut
|
||||
)
|
||||
for i, txOut := range commitTx.TxOut {
|
||||
if !bytes.Equal(payToUsScriptHash, txOut.PkScript) {
|
||||
@ -6099,7 +6101,7 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si
|
||||
}
|
||||
|
||||
delayIndex = uint32(i)
|
||||
delayScript = txOut.PkScript
|
||||
delayOut = txOut
|
||||
break
|
||||
}
|
||||
|
||||
@ -6110,8 +6112,8 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si
|
||||
// If the output is non-existent (dust), have the sign descriptor be
|
||||
// nil.
|
||||
var commitResolution *CommitOutputResolution
|
||||
if len(delayScript) != 0 {
|
||||
localBalance := localCommit.LocalBalance
|
||||
if delayOut != nil {
|
||||
localBalance := delayOut.Value
|
||||
commitResolution = &CommitOutputResolution{
|
||||
SelfOutPoint: wire.OutPoint{
|
||||
Hash: commitTx.TxHash(),
|
||||
@ -6122,8 +6124,8 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si
|
||||
SingleTweak: keyRing.LocalCommitKeyTweak,
|
||||
WitnessScript: selfScript,
|
||||
Output: &wire.TxOut{
|
||||
PkScript: delayScript,
|
||||
Value: int64(localBalance.ToSatoshis()),
|
||||
PkScript: delayOut.PkScript,
|
||||
Value: localBalance,
|
||||
},
|
||||
HashType: txscript.SigHashAll,
|
||||
},
|
||||
@ -6133,8 +6135,11 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si
|
||||
|
||||
// Once the delay output has been found (if it exists), then we'll also
|
||||
// need to create a series of sign descriptors for any lingering
|
||||
// outgoing HTLC's that we'll need to claim as well.
|
||||
// outgoing HTLC's that we'll need to claim as well. If this is after
|
||||
// recovery there is not much we can do with HTLCs, so we'll always
|
||||
// use what we have in our latest state when extracting resolutions.
|
||||
txHash := commitTx.TxHash()
|
||||
localCommit := chanState.LocalCommitment
|
||||
htlcResolutions, err := extractHtlcResolutions(
|
||||
chainfee.SatPerKWeight(localCommit.FeePerKw), true, signer,
|
||||
localCommit.Htlcs, keyRing, &chanState.LocalChanCfg,
|
||||
|
@ -262,7 +262,7 @@ func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
)
|
||||
bobBalance := lnwire.NewMSatFromSatoshis(channelBal)
|
||||
|
||||
aliceCommit := channeldb.ChannelCommitment{
|
||||
aliceLocalCommit := channeldb.ChannelCommitment{
|
||||
CommitHeight: 0,
|
||||
LocalBalance: aliceBalance,
|
||||
RemoteBalance: bobBalance,
|
||||
@ -271,7 +271,16 @@ func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
CommitTx: aliceCommitTx,
|
||||
CommitSig: testSigBytes,
|
||||
}
|
||||
bobCommit := channeldb.ChannelCommitment{
|
||||
aliceRemoteCommit := channeldb.ChannelCommitment{
|
||||
CommitHeight: 0,
|
||||
LocalBalance: aliceBalance,
|
||||
RemoteBalance: bobBalance,
|
||||
CommitFee: commitFee,
|
||||
FeePerKw: btcutil.Amount(feePerKw),
|
||||
CommitTx: bobCommitTx,
|
||||
CommitSig: testSigBytes,
|
||||
}
|
||||
bobLocalCommit := channeldb.ChannelCommitment{
|
||||
CommitHeight: 0,
|
||||
LocalBalance: bobBalance,
|
||||
RemoteBalance: aliceBalance,
|
||||
@ -280,6 +289,15 @@ func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
CommitTx: bobCommitTx,
|
||||
CommitSig: testSigBytes,
|
||||
}
|
||||
bobRemoteCommit := channeldb.ChannelCommitment{
|
||||
CommitHeight: 0,
|
||||
LocalBalance: bobBalance,
|
||||
RemoteBalance: aliceBalance,
|
||||
CommitFee: commitFee,
|
||||
FeePerKw: btcutil.Amount(feePerKw),
|
||||
CommitTx: aliceCommitTx,
|
||||
CommitSig: testSigBytes,
|
||||
}
|
||||
|
||||
var chanIDBytes [8]byte
|
||||
if _, err := io.ReadFull(rand.Reader, chanIDBytes[:]); err != nil {
|
||||
@ -302,8 +320,8 @@ func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
RemoteCurrentRevocation: bobCommitPoint,
|
||||
RevocationProducer: alicePreimageProducer,
|
||||
RevocationStore: shachain.NewRevocationStore(),
|
||||
LocalCommitment: aliceCommit,
|
||||
RemoteCommitment: aliceCommit,
|
||||
LocalCommitment: aliceLocalCommit,
|
||||
RemoteCommitment: aliceRemoteCommit,
|
||||
Db: dbAlice,
|
||||
Packager: channeldb.NewChannelPackager(shortChanID),
|
||||
FundingTxn: testTx,
|
||||
@ -320,8 +338,8 @@ func CreateTestChannels(chanType channeldb.ChannelType) (
|
||||
RemoteCurrentRevocation: aliceCommitPoint,
|
||||
RevocationProducer: bobPreimageProducer,
|
||||
RevocationStore: shachain.NewRevocationStore(),
|
||||
LocalCommitment: bobCommit,
|
||||
RemoteCommitment: bobCommit,
|
||||
LocalCommitment: bobLocalCommit,
|
||||
RemoteCommitment: bobRemoteCommit,
|
||||
Db: dbBob,
|
||||
Packager: channeldb.NewChannelPackager(shortChanID),
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user