lnwallet: fully validate all channel constraints in validateCommitmentSanity

This commit introduces changes to the validateCommitmentSanity
function to fully validate all channel constraints.
validateCommitmentSanity now validates that the
MaxPendingAmount, ChanReserve, MinHTLC, & MaxAcceptedHTLCs
limits are all adhered to during the lifetime of a channel.

When applying a set of updates, the channel constraints are
validated from the point-of-view of either the local or the
remote node, to make sure the updates will be accepted.

Co-authored-by: nsa <elzeigel@gmail.com>
This commit is contained in:
Johan T. Halseth 2017-11-29 14:20:02 +01:00
parent 1873fe1381
commit 7b9f098fe6
No known key found for this signature in database
GPG Key ID: 15BAADA29DA20D26

@ -47,9 +47,22 @@ var (
ErrMaxHTLCNumber = fmt.Errorf("commitment transaction exceed max " + ErrMaxHTLCNumber = fmt.Errorf("commitment transaction exceed max " +
"htlc number") "htlc number")
// ErrInsufficientBalance is returned when a proposed HTLC would // ErrMaxPendingAmount is returned when a proposed HTLC would exceed
// exceed the available balance. // the overall maximum pending value of all HTLCs if committed in a
ErrInsufficientBalance = fmt.Errorf("insufficient local balance") // state transition.
ErrMaxPendingAmount = fmt.Errorf("commitment transaction exceed max" +
"overall pending htlc value")
// ErrBelowChanReserve is returned when a proposed HTLC would cause
// one of the peer's funds to dip below the channel reserve limit.
ErrBelowChanReserve = fmt.Errorf("commitment transaction dips peer " +
"below chan reserve")
// ErrBelowMinHTLC is returned when a proposed HTLC has a value that
// is below the minimum HTLC value constraint for either us or our
// peer depending on which flags are set.
ErrBelowMinHTLC = fmt.Errorf("proposed HTLC value is below minimum " +
"allowed HTLC value")
// ErrCannotSyncCommitChains is returned if, upon receiving a ChanSync // ErrCannotSyncCommitChains is returned if, upon receiving a ChanSync
// message, the state machine deems that is unable to properly // message, the state machine deems that is unable to properly
@ -324,6 +337,8 @@ type commitment struct {
// within the commitment chain. This balance is computed by properly // within the commitment chain. This balance is computed by properly
// evaluating all the add/remove/settle log entries before the listed // evaluating all the add/remove/settle log entries before the listed
// indexes. // indexes.
//
// NOTE: This is the balance *before* subtracting any commitment fee.
ourBalance lnwire.MilliSatoshi ourBalance lnwire.MilliSatoshi
theirBalance lnwire.MilliSatoshi theirBalance lnwire.MilliSatoshi
@ -1928,7 +1943,7 @@ func htlcSuccessFee(feePerKw btcutil.Amount) btcutil.Amount {
// htlcIsDust determines if an HTLC output is dust or not depending on two // htlcIsDust determines if an HTLC output is dust or not depending on two
// bits: if the HTLC is incoming and if the HTLC will be placed on our // bits: if the HTLC is incoming and if the HTLC will be placed on our
// commitment transaction, or theirs. These two pieces of information are // commitment transaction, or theirs. These two pieces of information are
// require as we currently used second-level HTLC transactions ass off-chain // require as we currently used second-level HTLC transactions as off-chain
// covenants. Depending on the two bits, we'll either be using a timeout or // covenants. Depending on the two bits, we'll either be using a timeout or
// success transaction which have different weights. // success transaction which have different weights.
func htlcIsDust(incoming, ourCommit bool, func htlcIsDust(incoming, ourCommit bool,
@ -2665,7 +2680,7 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, erro
// party set up when we initially set up the channel. If we are, then // party set up when we initially set up the channel. If we are, then
// we'll abort this state transition. // we'll abort this state transition.
err := lc.validateCommitmentSanity(remoteACKedIndex, err := lc.validateCommitmentSanity(remoteACKedIndex,
lc.localUpdateLog.logIndex, false, true, true) lc.localUpdateLog.logIndex, true, nil)
if err != nil { if err != nil {
return sig, htlcSigs, err return sig, htlcSigs, err
} }
@ -3129,69 +3144,118 @@ func (lc *LightningChannel) computeView(view *htlcView, remoteChain bool,
return ourBalance, theirBalance, totalCommitWeight, filteredHTLCView, feePerKw return ourBalance, theirBalance, totalCommitWeight, filteredHTLCView, feePerKw
} }
// validateCommitmentSanity is used to validate that on current state the commitment // validateCommitmentSanity is used to validate the current state of the
// transaction is valid in terms of propagating it over Bitcoin network, and // commitment transaction in terms of the ChannelConstraints that we and our
// also that all outputs are meet Bitcoin spec requirements and they are // remote peer agreed upon during the funding workflow. The predictAdded
// spendable. // parameter should be set to a valid PaymentDescriptor if we are validating
// in the state when adding a new HTLC, or nil otherwise.
func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter, func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter,
ourLogCounter uint64, prediction bool, local bool, remote bool) error { ourLogCounter uint64, remoteChain bool,
predictAdded *PaymentDescriptor) error {
// TODO(roasbeef): verify remaining sanity requirements // Fetch all updates not committed.
htlcCount := 0
// If we adding or receiving the htlc we increase the number of htlcs
// by one in order to not overflow the commitment transaction by
// insertion.
if prediction {
htlcCount++
}
// TODO(roasbeef): call availableBalance in here re-using htlcView
// Run through all the HTLCs that will be covered by this transaction
// in order to calculate theirs count.
view := lc.fetchHTLCView(theirLogCounter, ourLogCounter) view := lc.fetchHTLCView(theirLogCounter, ourLogCounter)
if remote { // If we are checking if we can add a new HTLC, we add this to the
for _, entry := range view.theirUpdates { // update log, in order to validate the sanity of the commitment
if entry.EntryType == Add { // resulting from _actually adding_ this HTLC to the state.
htlcCount++ if predictAdded != nil {
} // If we are adding an HTLC, this will be an Add to the
} // local update log.
for _, entry := range view.ourUpdates { view.ourUpdates = append(view.ourUpdates, predictAdded)
if entry.EntryType != Add {
htlcCount--
}
}
} }
if local { commitChain := lc.localCommitChain
for _, entry := range view.ourUpdates { if remoteChain {
if entry.EntryType == Add { commitChain = lc.remoteCommitChain
htlcCount++
}
}
for _, entry := range view.theirUpdates {
if entry.EntryType != Add {
htlcCount--
}
}
} }
ourInitialBalance := commitChain.tip().ourBalance
theirInitialBalance := commitChain.tip().theirBalance
// If we're validating the commitment sanity for HTLC _log_ update by a ourBalance, theirBalance, commitWeight, filteredView, feePerKw :=
// particular side, then we'll only consider half of the available HTLC lc.computeView(view, remoteChain, false)
// bandwidth. However, if we're validating the _creation_ of a new
// commitment state, then we'll use the full value as the sum of the // Calculate the commitment fee, and subtract it from the
// contribution of both sides shouldn't exceed the max number. // initiator's balance.
var maxHTLCNumber int commitFee := btcutil.Amount((int64(feePerKw) * commitWeight) / 1000)
if local && remote { if lc.channelState.IsInitiator {
maxHTLCNumber = MaxHTLCNumber ourBalance -= lnwire.NewMSatFromSatoshis(commitFee)
} else { } else {
maxHTLCNumber = MaxHTLCNumber / 2 theirBalance -= lnwire.NewMSatFromSatoshis(commitFee)
} }
if htlcCount > maxHTLCNumber { // If the added HTLCs will decrease the balance, make sure
return ErrMaxHTLCNumber // they won't dip the local and remote balances below the
// channel reserves.
if ourBalance < ourInitialBalance &&
ourBalance < lnwire.NewMSatFromSatoshis(
lc.localChanCfg.ChanReserve) {
return ErrBelowChanReserve
}
if theirBalance < theirInitialBalance &&
theirBalance < lnwire.NewMSatFromSatoshis(
lc.remoteChanCfg.ChanReserve) {
return ErrBelowChanReserve
}
// validateUpdates take a set of updates, and validates them
// against the passed channel constraints.
validateUpdates := func(updates []*PaymentDescriptor,
constraints *channeldb.ChannelConfig) error {
// We keep track of the number of HTLCs in flight for
// the commitment, and the amount in flight.
var numInFlight uint16
var amtInFlight lnwire.MilliSatoshi
// Go through all updates, checking that they don't
// violate the channel constraints.
for _, entry := range updates {
if entry.EntryType == Add {
// An HTLC is being added, this will
// add to the number and amount in
// flight.
amtInFlight += entry.Amount
numInFlight++
// Check that the value of the HTLC they
// added is above our minimum.
if entry.Amount < constraints.MinHTLC {
return ErrBelowMinHTLC
}
}
}
// Now that we know the total value of added HTLCs,
// we check that this satisfy the MaxPendingAmont
// contraint.
if amtInFlight > constraints.MaxPendingAmount {
return ErrMaxPendingAmount
}
// In this step, we verify that the total number of
// active HTLCs does not exceed the constraint of the
// maximum number of HTLCs in flight.
if numInFlight > constraints.MaxAcceptedHtlcs {
return ErrMaxHTLCNumber
}
return nil
}
// First check that the remote updates won't violate it's
// channel constraints.
err := validateUpdates(filteredView.theirUpdates,
lc.remoteChanCfg)
if err != nil {
return err
}
// Secondly check that our updates won't violate our
// channel constraints.
err = validateUpdates(filteredView.ourUpdates,
lc.localChanCfg)
if err != nil {
return err
} }
return nil return nil
@ -3392,7 +3456,7 @@ func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig,
// the constraints we specified during initial channel setup. If not, // the constraints we specified during initial channel setup. If not,
// then we'll abort the channel as they've violated our constraints. // then we'll abort the channel as they've violated our constraints.
err := lc.validateCommitmentSanity(lc.remoteUpdateLog.logIndex, err := lc.validateCommitmentSanity(lc.remoteUpdateLog.logIndex,
localACKedIndex, false, true, true) localACKedIndex, false, nil)
if err != nil { if err != nil {
return err return err
} }
@ -3411,8 +3475,9 @@ func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig,
lc.remoteChanCfg) lc.remoteChanCfg)
// With the current commitment point re-calculated, construct the new // With the current commitment point re-calculated, construct the new
// commitment view which includes all the entries we know of in their // commitment view which includes all the entries (pending or committed)
// HTLC log, and up to ourLogIndex in our HTLC log. // we know of in the remote node's HTLC log, but only our local changes
// up to the last change the remote node has ACK'd.
localCommitmentView, err := lc.fetchCommitmentView( localCommitmentView, err := lc.fetchCommitmentView(
false, localACKedIndex, localHtlcIndex, false, localACKedIndex, localHtlcIndex,
lc.remoteUpdateLog.logIndex, lc.remoteUpdateLog.htlcCounter, lc.remoteUpdateLog.logIndex, lc.remoteUpdateLog.htlcCounter,
@ -3737,43 +3802,6 @@ func (lc *LightningChannel) AddHTLC(htlc *lnwire.UpdateAddHTLC) (uint64, error)
lc.Lock() lc.Lock()
defer lc.Unlock() defer lc.Unlock()
if err := lc.validateCommitmentSanity(lc.remoteUpdateLog.logIndex,
lc.localUpdateLog.logIndex, true, true, false); err != nil {
return 0, err
}
// To ensure that we can actually fully accept this new HTLC, we'll
// calculate the current available bandwidth, and subtract the value of
// the HTLC from it.
initialBalance, _ := lc.availableBalance()
availableBalance := initialBalance
availableBalance -= htlc.Amount
feePerKw := lc.channelState.LocalCommitment.FeePerKw
dustLimit := lc.channelState.LocalChanCfg.DustLimit
htlcIsDust := htlcIsDust(
false, true, feePerKw, htlc.Amount.ToSatoshis(), dustLimit,
)
// If this HTLC is not dust, and we're the initiator, then we'll also
// subtract the amount we'll need to pay in fees for this HTLC.
if !htlcIsDust && lc.channelState.IsInitiator {
htlcFee := lnwire.NewMSatFromSatoshis(
btcutil.Amount((int64(feePerKw) * HtlcWeight) / 1000),
)
availableBalance -= htlcFee
}
// If this value is negative, then we can't accept the HTLC, so we'll
// reject it with an error.
if availableBalance < 0 {
// TODO(roasbeef): also needs to respect reservation
// * expand to add context err msg
walletLog.Errorf("Unable to carry added HTLC: amt=%v, bal=%v",
htlc.Amount, availableBalance)
return 0, ErrInsufficientBalance
}
pd := &PaymentDescriptor{ pd := &PaymentDescriptor{
EntryType: Add, EntryType: Add,
RHash: PaymentHash(htlc.PaymentHash), RHash: PaymentHash(htlc.PaymentHash),
@ -3784,6 +3812,14 @@ func (lc *LightningChannel) AddHTLC(htlc *lnwire.UpdateAddHTLC) (uint64, error)
OnionBlob: htlc.OnionBlob[:], OnionBlob: htlc.OnionBlob[:],
} }
// Make sure adding this HTLC won't violate any of the constrainst
// we must keep on our commitment transaction.
remoteACKedIndex := lc.localCommitChain.tail().theirMessageIndex
if err := lc.validateCommitmentSanity(remoteACKedIndex,
lc.localUpdateLog.logIndex, true, pd); err != nil {
return 0, err
}
lc.localUpdateLog.appendHtlc(pd) lc.localUpdateLog.appendHtlc(pd)
return pd.HtlcIndex, nil return pd.HtlcIndex, nil
@ -3801,11 +3837,6 @@ func (lc *LightningChannel) ReceiveHTLC(htlc *lnwire.UpdateAddHTLC) (uint64, err
"ID %d", htlc.ID, lc.remoteUpdateLog.htlcCounter) "ID %d", htlc.ID, lc.remoteUpdateLog.htlcCounter)
} }
if err := lc.validateCommitmentSanity(lc.remoteUpdateLog.logIndex,
lc.localUpdateLog.logIndex, true, false, true); err != nil {
return 0, err
}
pd := &PaymentDescriptor{ pd := &PaymentDescriptor{
EntryType: Add, EntryType: Add,
RHash: PaymentHash(htlc.PaymentHash), RHash: PaymentHash(htlc.PaymentHash),
@ -5343,3 +5374,8 @@ func (lc *LightningChannel) ActiveHtlcs() []channeldb.HTLC {
return activeHtlcs return activeHtlcs
} }
// LocalChanReserve returns our local ChanReserve requirement for the remote party.
func (lc *LightningChannel) LocalChanReserve() btcutil.Amount {
return lc.localChanCfg.ChanReserve
}