htlcswitch: move timelock policy verification logic to HtlcSatifiesPolicy

In this commit, we extract the time lock policy verification logic from
the processRemoteAdds method to the HtlcSatifiesPolicy method. With this
change, we fix a lingering bug within the link: we'll no longer verify
time lock polices within the incoming link, instead we'll verify it at
forwarding time like we should. This is a bug left over from the switch
of what the CLTV delta denotes in the channel update message we made
within the spec sometime last year.
This commit is contained in:
Olaoluwa Osuntokun 2018-06-25 20:23:10 -07:00
parent 74ec9147b1
commit 2bb5931bb7
No known key found for this signature in database
GPG Key ID: 964EA263DD637C21

@ -1784,10 +1784,8 @@ func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte,
// any case, we'll cancel this HTLC.
actualFee := incomingHtlcAmt - amtToForward
if incomingHtlcAmt < amtToForward || actualFee < expectedFee {
l.errorf("outgoing htlc(%x) has insufficient "+
"fee: expected %v, got %v", payHash[:],
int64(expectedFee),
int64(actualFee))
l.errorf("outgoing htlc(%x) has insufficient fee: expected %v, "+
"got %v", payHash[:], int64(expectedFee), int64(actualFee))
// As part of the returned error, we'll send our latest routing
// policy so the sending node obtains the most up to date data.
@ -1804,6 +1802,54 @@ func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte,
return failure
}
// We want to avoid accepting an HTLC which will expire in the near
// future, so we'll reject an HTLC if its expiration time is too close
// to the current height.
timeDelta := policy.TimeLockDelta
if incomingTimeout-timeDelta <= heightNow {
log.Errorf("htlc(%x) has an expiry that's too soon: "+
"outgoing_expiry=%v, best_height=%v", payHash[:],
incomingTimeout-timeDelta, heightNow)
var failure lnwire.FailureMessage
update, err := l.cfg.FetchLastChannelUpdate(
l.ShortChanID(),
)
if err != nil {
failure = lnwire.NewTemporaryChannelFailure(update)
} else {
failure = lnwire.NewExpiryTooSoon(*update)
}
return failure
}
// Finally, we'll ensure that the time-lock on the outgoing HTLC meets
// the following constraint: the incoming time-lock minus our time-lock
// delta should equal the outgoing time lock. Otherwise, whether the
// sender messed up, or an intermediate node tampered with the HTLC.
if incomingTimeout-timeDelta < outgoingTimeout {
log.Errorf("Incoming htlc(%x) has incorrect time-lock value: "+
"expected at least %v block delta, got %v block delta",
payHash[:], timeDelta, incomingTimeout-outgoingTimeout)
// Grab the latest routing policy so the sending node is up to
// date with our current policy.
var failure lnwire.FailureMessage
update, err := l.cfg.FetchLastChannelUpdate(
l.ShortChanID(),
)
if err != nil {
failure = lnwire.NewTemporaryChannelFailure(update)
} else {
failure = lnwire.NewIncorrectCltvExpiry(
incomingTimeout, *update,
)
}
return failure
}
return nil
}
@ -2285,8 +2331,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
needUpdate = true
// There are additional channels left within this route. So
// we'll verify that our forwarding constraints have been
// properly met by this incoming HTLC.
// we'll simply do some forwarding package book-keeping.
default:
// If hodl.AddIncoming is requested, we will not
// validate the forwarded ADD, nor will we send the
@ -2344,80 +2389,8 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
continue
}
// We'll consult the forwarding policy for this link
// when checking time locked related constraints.
hopPolicy := l.cfg.FwrdingPolicy
// We want to avoid forwarding an HTLC which will
// expire in the near future, so we'll reject an HTLC
// if its expiration time is too close to the current
// height.
timeDelta := hopPolicy.TimeLockDelta
if pd.Timeout-timeDelta <= heightNow {
log.Errorf("htlc(%x) has an expiry "+
"that's too soon: outgoing_expiry=%v, "+
"best_height=%v", pd.RHash[:],
pd.Timeout-timeDelta, heightNow)
var failure lnwire.FailureMessage
update, err := l.cfg.FetchLastChannelUpdate(
l.ShortChanID(),
)
if err != nil {
failure = lnwire.NewTemporaryChannelFailure(
update,
)
} else {
failure = lnwire.NewExpiryTooSoon(*update)
}
l.sendHTLCError(
pd.HtlcIndex, failure, obfuscator,
pd.SourceRef,
)
needUpdate = true
continue
}
// Finally, we'll ensure that the time-lock on the
// outgoing HTLC meets the following constraint: the
// incoming time-lock minus our time-lock delta should
// equal the outgoing time lock. Otherwise, whether the
// sender messed up, or an intermediate node tampered
// with the HTLC.
if pd.Timeout-timeDelta < fwdInfo.OutgoingCTLV {
log.Errorf("Incoming htlc(%x) has incorrect "+
"time-lock value: expected at least "+
"%v block delta, got %v block delta",
pd.RHash[:], timeDelta,
pd.Timeout-fwdInfo.OutgoingCTLV)
// Grab the latest routing policy so the
// sending node is up to date with our current
// policy.
update, err := l.cfg.FetchLastChannelUpdate(
l.ShortChanID(),
)
if err != nil {
l.fail(LinkFailureError{
code: ErrInternalError},
"unable to create channel "+
"update while handling "+
"the error: %v", err)
return false
}
failure := lnwire.NewIncorrectCltvExpiry(
pd.Timeout, *update)
l.sendHTLCError(
pd.HtlcIndex, failure, obfuscator, pd.SourceRef,
)
needUpdate = true
continue
}
// TODO(roasbeef): also add max timeout value
// TODO(roasbeef): ensure don't accept outrageous
// timeout for htlc
// With all our forwarding constraints met, we'll
// create the outgoing HTLC using the parameters as