Merge pull request #1447 from Roasbeef/forwarding-timelock-fix

routing+htlcswitch: finalize switch of CLTV delta directionality in path finding and link forwarding
This commit is contained in:
Olaoluwa Osuntokun 2018-06-26 19:47:46 -07:00 committed by GitHub
commit ec7cfc6906
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 169 additions and 121 deletions

@ -81,8 +81,10 @@ type ChannelLink interface {
// Otherwise, a valid protocol failure message should be returned in // Otherwise, a valid protocol failure message should be returned in
// order to signal to the source of the HTLC, the policy consistency // order to signal to the source of the HTLC, the policy consistency
// issue. // issue.
HtlcSatifiesPolicy(payHash [32]byte, HtlcSatifiesPolicy(payHash [32]byte, incomingAmt lnwire.MilliSatoshi,
incomingAmt, amtToForward lnwire.MilliSatoshi) lnwire.FailureMessage amtToForward lnwire.MilliSatoshi,
incomingTimeout, outgoingTimeout uint32,
heightNow uint32) lnwire.FailureMessage
// Bandwidth returns the amount of milli-satoshis which current link // Bandwidth returns the amount of milli-satoshis which current link
// might pass through channel link. The value returned from this method // might pass through channel link. The value returned from this method

@ -1747,7 +1747,9 @@ func (l *channelLink) UpdateForwardingPolicy(newPolicy ForwardingPolicy) {
// //
// NOTE: Part of the ChannelLink interface. // NOTE: Part of the ChannelLink interface.
func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte, func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte,
incomingHtlcAmt, amtToForward lnwire.MilliSatoshi) lnwire.FailureMessage { incomingHtlcAmt, amtToForward lnwire.MilliSatoshi,
incomingTimeout, outgoingTimeout uint32,
heightNow uint32) lnwire.FailureMessage {
l.RLock() l.RLock()
policy := l.cfg.FwrdingPolicy policy := l.cfg.FwrdingPolicy
@ -1788,10 +1790,8 @@ func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte,
// any case, we'll cancel this HTLC. // any case, we'll cancel this HTLC.
actualFee := incomingHtlcAmt - amtToForward actualFee := incomingHtlcAmt - amtToForward
if incomingHtlcAmt < amtToForward || actualFee < expectedFee { if incomingHtlcAmt < amtToForward || actualFee < expectedFee {
l.errorf("outgoing htlc(%x) has insufficient "+ l.errorf("outgoing htlc(%x) has insufficient fee: expected %v, "+
"fee: expected %v, got %v", payHash[:], "got %v", payHash[:], int64(expectedFee), int64(actualFee))
int64(expectedFee),
int64(actualFee))
// As part of the returned error, we'll send our latest routing // As part of the returned error, we'll send our latest routing
// policy so the sending node obtains the most up to date data. // policy so the sending node obtains the most up to date data.
@ -1808,6 +1808,54 @@ func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte,
return failure return failure
} }
// We want to avoid accepting an HTLC which will expire in the near
// future, so we'll reject an HTLC if its expiration time is too close
// to the current height.
timeDelta := policy.TimeLockDelta
if incomingTimeout-timeDelta <= heightNow {
l.errorf("htlc(%x) has an expiry that's too soon: "+
"outgoing_expiry=%v, best_height=%v", payHash[:],
incomingTimeout-timeDelta, heightNow)
var failure lnwire.FailureMessage
update, err := l.cfg.FetchLastChannelUpdate(
l.ShortChanID(),
)
if err != nil {
failure = lnwire.NewTemporaryChannelFailure(update)
} else {
failure = lnwire.NewExpiryTooSoon(*update)
}
return failure
}
// Finally, we'll ensure that the time-lock on the outgoing HTLC meets
// the following constraint: the incoming time-lock minus our time-lock
// delta should equal the outgoing time lock. Otherwise, whether the
// sender messed up, or an intermediate node tampered with the HTLC.
if incomingTimeout-timeDelta < outgoingTimeout {
l.errorf("Incoming htlc(%x) has incorrect time-lock value: "+
"expected at least %v block delta, got %v block delta",
payHash[:], timeDelta, incomingTimeout-outgoingTimeout)
// Grab the latest routing policy so the sending node is up to
// date with our current policy.
var failure lnwire.FailureMessage
update, err := l.cfg.FetchLastChannelUpdate(
l.ShortChanID(),
)
if err != nil {
failure = lnwire.NewTemporaryChannelFailure(update)
} else {
failure = lnwire.NewIncorrectCltvExpiry(
incomingTimeout, *update,
)
}
return failure
}
return nil return nil
} }
@ -2289,8 +2337,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
needUpdate = true needUpdate = true
// There are additional channels left within this route. So // There are additional channels left within this route. So
// we'll verify that our forwarding constraints have been // we'll simply do some forwarding package book-keeping.
// properly met by this incoming HTLC.
default: default:
// If hodl.AddIncoming is requested, we will not // If hodl.AddIncoming is requested, we will not
// validate the forwarded ADD, nor will we send the // validate the forwarded ADD, nor will we send the
@ -2340,6 +2387,8 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
amount: addMsg.Amount, amount: addMsg.Amount,
htlc: addMsg, htlc: addMsg,
obfuscator: obfuscator, obfuscator: obfuscator,
incomingTimeout: pd.Timeout,
outgoingTimeout: fwdInfo.OutgoingCTLV,
} }
switchPackets = append( switchPackets = append(
switchPackets, updatePacket, switchPackets, updatePacket,
@ -2348,80 +2397,8 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
continue continue
} }
// We'll consult the forwarding policy for this link // TODO(roasbeef): ensure don't accept outrageous
// when checking time locked related constraints. // timeout for htlc
hopPolicy := l.cfg.FwrdingPolicy
// We want to avoid forwarding an HTLC which will
// expire in the near future, so we'll reject an HTLC
// if its expiration time is too close to the current
// height.
timeDelta := hopPolicy.TimeLockDelta
if pd.Timeout-timeDelta <= heightNow {
log.Errorf("htlc(%x) has an expiry "+
"that's too soon: outgoing_expiry=%v, "+
"best_height=%v", pd.RHash[:],
pd.Timeout-timeDelta, heightNow)
var failure lnwire.FailureMessage
update, err := l.cfg.FetchLastChannelUpdate(
l.ShortChanID(),
)
if err != nil {
failure = lnwire.NewTemporaryChannelFailure(
update,
)
} else {
failure = lnwire.NewExpiryTooSoon(*update)
}
l.sendHTLCError(
pd.HtlcIndex, failure, obfuscator,
pd.SourceRef,
)
needUpdate = true
continue
}
// Finally, we'll ensure that the time-lock on the
// outgoing HTLC meets the following constraint: the
// incoming time-lock minus our time-lock delta should
// equal the outgoing time lock. Otherwise, whether the
// sender messed up, or an intermediate node tampered
// with the HTLC.
if pd.Timeout-timeDelta < fwdInfo.OutgoingCTLV {
log.Errorf("Incoming htlc(%x) has incorrect "+
"time-lock value: expected at least "+
"%v block delta, got %v block delta",
pd.RHash[:], timeDelta,
pd.Timeout-fwdInfo.OutgoingCTLV)
// Grab the latest routing policy so the
// sending node is up to date with our current
// policy.
update, err := l.cfg.FetchLastChannelUpdate(
l.ShortChanID(),
)
if err != nil {
l.fail(LinkFailureError{
code: ErrInternalError},
"unable to create channel "+
"update while handling "+
"the error: %v", err)
return false
}
failure := lnwire.NewIncorrectCltvExpiry(
pd.Timeout, *update)
l.sendHTLCError(
pd.HtlcIndex, failure, obfuscator, pd.SourceRef,
)
needUpdate = true
continue
}
// TODO(roasbeef): also add max timeout value
// With all our forwarding constraints met, we'll // With all our forwarding constraints met, we'll
// create the outgoing HTLC using the parameters as // create the outgoing HTLC using the parameters as
@ -2478,6 +2455,8 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
amount: addMsg.Amount, amount: addMsg.Amount,
htlc: addMsg, htlc: addMsg,
obfuscator: obfuscator, obfuscator: obfuscator,
incomingTimeout: pd.Timeout,
outgoingTimeout: fwdInfo.OutgoingCTLV,
} }
fwdPkg.FwdFilter.Set(idx) fwdPkg.FwdFilter.Set(idx)

@ -618,6 +618,7 @@ func TestLinkForwardTimelockPolicyMismatch(t *testing.T) {
_, err = n.makePayment(n.aliceServer, n.carolServer, _, err = n.makePayment(n.aliceServer, n.carolServer,
n.bobServer.PubKey(), hops, amount, htlcAmt, n.bobServer.PubKey(), hops, amount, htlcAmt,
htlcExpiry).Wait(30 * time.Second) htlcExpiry).Wait(30 * time.Second)
// We should get an error, and that error should indicate that the HTLC // We should get an error, and that error should indicate that the HTLC
// should be rejected due to a policy violation. // should be rejected due to a policy violation.
if err == nil { if err == nil {
@ -637,9 +638,9 @@ func TestLinkForwardTimelockPolicyMismatch(t *testing.T) {
} }
} }
// TestLinkForwardTimelockPolicyMismatch tests that if a node is an // TestLinkForwardFeePolicyMismatch tests that if a node is an intermediate
// intermediate node in a multi-hop payment and receives an HTLC that violates // node in a multi-hop payment and receives an HTLC that violates its current
// its current fee policy, then the HTLC is rejected with the proper error. // fee policy, then the HTLC is rejected with the proper error.
func TestLinkForwardFeePolicyMismatch(t *testing.T) { func TestLinkForwardFeePolicyMismatch(t *testing.T) {
t.Parallel() t.Parallel()
@ -4528,3 +4529,55 @@ func TestExpectedFee(t *testing.T) {
} }
} }
} }
// TestForwardingAsymmetricTimeLockPolicies tests that each link is able to
// properly handle forwarding HTLCs when their outgoing channels have
// asymmetric policies w.r.t what they require for time locks.
func TestForwardingAsymmetricTimeLockPolicies(t *testing.T) {
t.Parallel()
// First, we'll create our traditional three hop network. Bob
// interacting with and asserting the state of two of the end points
// for this test.
channels, cleanUp, _, err := createClusterChannels(
btcutil.SatoshiPerBitcoin*3,
btcutil.SatoshiPerBitcoin*5,
)
if err != nil {
t.Fatalf("unable to create channel: %v", err)
}
defer cleanUp()
n := newThreeHopNetwork(
t, channels.aliceToBob, channels.bobToAlice, channels.bobToCarol,
channels.carolToBob, testStartingHeight,
)
if err := n.start(); err != nil {
t.Fatalf("unable to start three hop network: %v", err)
}
defer n.stop()
// Now that each of the links are up, we'll modify the link from Alice
// -> Bob to have a greater time lock delta than that of the link of
// Bob -> Carol.
n.firstBobChannelLink.UpdateForwardingPolicy(ForwardingPolicy{
TimeLockDelta: 7,
})
// Now that the Alice -> Bob link has been updated, we'll craft and
// send a payment from Alice -> Carol. This should succeed as normal,
// even though Bob has asymmetric time lock policies.
amount := lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin)
htlcAmt, totalTimelock, hops := generateHops(
amount, testStartingHeight, n.firstBobChannelLink,
n.carolChannelLink,
)
_, err = n.makePayment(
n.aliceServer, n.carolServer, n.bobServer.PubKey(), hops,
amount, htlcAmt, totalTimelock,
).Wait(30 * time.Second)
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
}

@ -613,7 +613,7 @@ func (f *mockChannelLink) HandleChannelUpdate(lnwire.Message) {
func (f *mockChannelLink) UpdateForwardingPolicy(_ ForwardingPolicy) { func (f *mockChannelLink) UpdateForwardingPolicy(_ ForwardingPolicy) {
} }
func (f *mockChannelLink) HtlcSatifiesPolicy([32]byte, lnwire.MilliSatoshi, func (f *mockChannelLink) HtlcSatifiesPolicy([32]byte, lnwire.MilliSatoshi,
lnwire.MilliSatoshi) lnwire.FailureMessage { lnwire.MilliSatoshi, uint32, uint32, uint32) lnwire.FailureMessage {
return nil return nil
} }

@ -80,6 +80,15 @@ type htlcPacket struct {
// circuit holds a reference to an Add's circuit which is persisted in // circuit holds a reference to an Add's circuit which is persisted in
// the switch during successful forwarding. // the switch during successful forwarding.
circuit *PaymentCircuit circuit *PaymentCircuit
// incomingTimeout is the timeout that the incoming HTLC carried. This
// is the timeout of the HTLC applied to the incoming link.
incomingTimeout uint32
// outgoingTimeout is the timeout of the proposed outgoing HTLC. This
// will be extraced from the hop payload recevived by the incoming
// link.
outgoingTimeout uint32
} }
// inKey returns the circuit key used to identify the incoming htlc. // inKey returns the circuit key used to identify the incoming htlc.

@ -964,9 +964,11 @@ func (s *Switch) handlePacketForward(packet *htlcPacket) error {
// Before we check the link's bandwidth, we'll ensure // Before we check the link's bandwidth, we'll ensure
// that the HTLC satisfies the current forwarding // that the HTLC satisfies the current forwarding
// policy of this target link. // policy of this target link.
currentHeight := atomic.LoadUint32(&s.bestHeight)
err := link.HtlcSatifiesPolicy( err := link.HtlcSatifiesPolicy(
htlc.PaymentHash, packet.incomingAmount, htlc.PaymentHash, packet.incomingAmount,
packet.amount, packet.amount, packet.incomingTimeout,
packet.outgoingTimeout, currentHeight,
) )
if err != nil { if err != nil {
linkErrs[link.ShortChanID()] = err linkErrs[link.ShortChanID()] = err
@ -1404,6 +1406,7 @@ out:
} }
atomic.StoreUint32(&s.bestHeight, uint32(blockEpoch.Height)) atomic.StoreUint32(&s.bestHeight, uint32(blockEpoch.Height))
// A local close request has arrived, we'll forward this to the // A local close request has arrived, we'll forward this to the
// relevant link (if it exists) so the channel can be // relevant link (if it exists) so the channel can be
// cooperatively closed (if possible). // cooperatively closed (if possible).

@ -601,15 +601,17 @@ func generateHops(payAmt lnwire.MilliSatoshi, startingHeight uint32,
nextHop = path[i+1].channel.ShortChanID() nextHop = path[i+1].channel.ShortChanID()
} }
var timeLock uint32
// If this is the last, hop, then the time lock will be their // If this is the last, hop, then the time lock will be their
// specified delta policy plus our starting height. // specified delta policy plus our starting height.
if i == len(path)-1 {
totalTimelock += lastHop.cfg.FwrdingPolicy.TimeLockDelta totalTimelock += lastHop.cfg.FwrdingPolicy.TimeLockDelta
timeLock := totalTimelock timeLock = totalTimelock
} else {
// Otherwise, the outgoing time lock should be the incoming // Otherwise, the outgoing time lock should be the
// timelock minus their specified delta. // incoming timelock minus their specified delta.
if i != len(path)-1 { delta := path[i+1].cfg.FwrdingPolicy.TimeLockDelta
delta := path[i].cfg.FwrdingPolicy.TimeLockDelta totalTimelock += delta
timeLock = totalTimelock - delta timeLock = totalTimelock - delta
} }

@ -373,14 +373,14 @@ func newRoute(amtToSend, feeLimit lnwire.MilliSatoshi, sourceVertex Vertex,
// route such that each hops time lock increases as we // route such that each hops time lock increases as we
// walk backwards in the route, using the delta of the // walk backwards in the route, using the delta of the
// previous hop. // previous hop.
route.TotalTimeLock += uint32(edge.TimeLockDelta) delta := uint32(pathEdges[i+1].TimeLockDelta)
route.TotalTimeLock += delta
// Otherwise, the value of the outgoing time-lock will // Otherwise, the value of the outgoing time-lock will
// be the value of the time-lock for the _outgoing_ // be the value of the time-lock for the _outgoing_
// HTLC, so we factor in their specified grace period // HTLC, so we factor in their specified grace period
// (time lock delta). // (time lock delta).
nextHop.OutgoingTimeLock = route.TotalTimeLock - nextHop.OutgoingTimeLock = route.TotalTimeLock - delta
uint32(edge.TimeLockDelta)
} }
route.Hops[i] = nextHop route.Hops[i] = nextHop

@ -30,7 +30,7 @@
"channel_id": 12345, "channel_id": 12345,
"channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", "channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0",
"flags": 1, "flags": 1,
"expiry": 20, "expiry": 10,
"min_htlc": 1, "min_htlc": 1,
"fee_base_msat": 100, "fee_base_msat": 100,
"fee_rate": 1000, "fee_rate": 1000,
@ -43,7 +43,7 @@
"channel_id": 12345, "channel_id": 12345,
"channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", "channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0",
"flags": 0, "flags": 0,
"expiry": 10, "expiry": 20,
"min_htlc": 1, "min_htlc": 1,
"fee_base_msat": 200, "fee_base_msat": 200,
"fee_rate": 2000, "fee_rate": 2000,
@ -56,7 +56,7 @@
"channel_id": 12345839, "channel_id": 12345839,
"channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", "channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0",
"flags": 1, "flags": 1,
"expiry": 40, "expiry": 10,
"min_htlc": 1, "min_htlc": 1,
"fee_base_msat": 100, "fee_base_msat": 100,
"fee_rate": 1000, "fee_rate": 1000,
@ -69,7 +69,7 @@
"channel_id": 12345839, "channel_id": 12345839,
"channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", "channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0",
"flags": 0, "flags": 0,
"expiry": 10, "expiry": 40,
"min_htlc": 1, "min_htlc": 1,
"fee_base_msat": 400, "fee_base_msat": 400,
"fee_rate": 4000, "fee_rate": 4000,
@ -95,7 +95,7 @@
"channel_id": 1234583, "channel_id": 1234583,
"channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", "channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0",
"flags": 1, "flags": 1,
"expiry": 40, "expiry": 30,
"min_htlc": 1, "min_htlc": 1,
"fee_base_msat": 300, "fee_base_msat": 300,
"fee_rate": 3000, "fee_rate": 3000,
@ -108,7 +108,7 @@
"channel_id": 1234589, "channel_id": 1234589,
"channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", "channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0",
"flags": 1, "flags": 1,
"expiry": 20, "expiry": 30,
"min_htlc": 1, "min_htlc": 1,
"fee_base_msat": 300, "fee_base_msat": 300,
"fee_rate": 3000, "fee_rate": 3000,
@ -121,7 +121,7 @@
"channel_id": 1234589, "channel_id": 1234589,
"channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0", "channel_point": "89dc56859c6a082d15ba1a7f6cb6be3fea62e1746e2cb8497b1189155c21a233:0",
"flags": 0, "flags": 0,
"expiry": 30, "expiry": 20,
"min_htlc": 1, "min_htlc": 1,
"fee_base_msat": 200, "fee_base_msat": 200,
"fee_rate": 2000, "fee_rate": 2000,