Merge pull request #2887 from joostjager/asym-broadcast-delta

config: lower outgoing htlc broadcast delta
This commit is contained in:
Olaoluwa Osuntokun 2019-05-03 13:28:31 -07:00 committed by GitHub
commit 649991c46f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 210 additions and 94 deletions

@ -68,11 +68,50 @@ const (
defaultTorV2PrivateKeyFilename = "v2_onion_private_key"
defaultTorV3PrivateKeyFilename = "v3_onion_private_key"
defaultIncomingBroadcastDelta = 20
defaultFinalCltvRejectDelta = 2
// defaultIncomingBroadcastDelta defines the number of blocks before the
// expiry of an incoming htlc at which we force close the channel. We
// only go to chain if we also have the preimage to actually pull in the
// htlc. BOLT #2 suggests 7 blocks. We use a few more for extra safety.
// Within this window we need to get our sweep or 2nd level success tx
// confirmed, because after that the remote party is also able to claim
// the htlc using the timeout path.
defaultIncomingBroadcastDelta = 10
defaultOutgoingBroadcastDelta = 10
defaultOutgoingCltvRejectDelta = 0
// defaultFinalCltvRejectDelta defines the number of blocks before the
// expiry of an incoming exit hop htlc at which we cancel it back
// immediately. It is an extra safety measure over the final cltv
// requirement as it is defined in the invoice. It ensures that we
// cancel back htlcs that, when held on to, may cause us to force close
// the channel because we enter the incoming broadcast window. Bolt #11
// suggests 9 blocks here. We use a few more for additional safety.
//
// There is still a small gap that remains between receiving the
// RevokeAndAck and canceling back. If a new block arrives within that
// window, we may still force close the channel. There is currently no
// way to reject an UpdateAddHtlc of which we already know that it will
// push us in the broadcast window.
defaultFinalCltvRejectDelta = defaultIncomingBroadcastDelta + 3
// defaultOutgoingBroadcastDelta defines the number of blocks before the
// expiry of an outgoing htlc at which we force close the channel. We
// are not in a hurry to force close, because there is nothing to claim
// for us. We do need to time the htlc out, because there may be an
// incoming htlc that will time out too (albeit later). Bolt #2 suggests
// a value of -1 here, but we allow one block less to prevent potential
// confusion around the negative value. It means we force close the
// channel at exactly the htlc expiry height.
defaultOutgoingBroadcastDelta = 0
// defaultOutgoingCltvRejectDelta defines the number of blocks before
// the expiry of an outgoing htlc at which we don't want to offer it to
// the next peer anymore. If that happens, we cancel back the incoming
// htlc. This is to prevent the situation where we have an outstanding
// htlc that brings or will soon bring us inside the outgoing broadcast
// window and trigger us to force close the channel. Bolt #2 suggests a
// value of 0. We pad it a bit, to prevent a slow round trip to the next
// peer and a block arriving during that round trip to trigger force
// closure.
defaultOutgoingCltvRejectDelta = defaultOutgoingBroadcastDelta + 3
// minTimeLockDelta is the minimum timelock we require for incoming
// HTLCs on our channels.

@ -107,6 +107,14 @@ type ChannelLink interface {
incomingTimeout, outgoingTimeout uint32,
heightNow uint32) lnwire.FailureMessage
// HtlcSatifiesPolicyLocal should return a nil error if the passed HTLC
// details satisfy the current channel policy. Otherwise, a valid
// protocol failure message should be returned in order to signal the
// violation. This call is intended to be used for locally initiated
// payments for which there is no corresponding incoming htlc.
HtlcSatifiesPolicyLocal(payHash [32]byte, amt lnwire.MilliSatoshi,
timeout uint32, heightNow uint32) lnwire.FailureMessage
// Bandwidth returns the amount of milli-satoshis which current link
// might pass through channel link. The value returned from this method
// represents the up to date available flow through the channel. This

@ -2093,46 +2093,12 @@ func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte,
policy := l.cfg.FwrdingPolicy
l.RUnlock()
// As our first sanity check, we'll ensure that the passed HTLC isn't
// too small for the next hop. If so, then we'll cancel the HTLC
// directly.
if amtToForward < policy.MinHTLC {
l.errorf("outgoing htlc(%x) is too small: min_htlc=%v, "+
"htlc_value=%v", payHash[:], policy.MinHTLC,
amtToForward)
// As part of the returned error, we'll send our latest routing
// policy so the sending node obtains the most up to date data.
var failure lnwire.FailureMessage
update, err := l.cfg.FetchLastChannelUpdate(l.ShortChanID())
if err != nil {
failure = &lnwire.FailTemporaryNodeFailure{}
} else {
failure = lnwire.NewAmountBelowMinimum(
amtToForward, *update,
)
}
return failure
}
// Next, ensure that the passed HTLC isn't too large. If so, we'll cancel
// the HTLC directly.
if policy.MaxHTLC != 0 && amtToForward > policy.MaxHTLC {
l.errorf("outgoing htlc(%x) is too large: max_htlc=%v, "+
"htlc_value=%v", payHash[:], policy.MaxHTLC, amtToForward)
// As part of the returned error, we'll send our latest routing policy
// so the sending node obtains the most up-to-date data.
var failure lnwire.FailureMessage
update, err := l.cfg.FetchLastChannelUpdate(l.ShortChanID())
if err != nil {
failure = &lnwire.FailTemporaryNodeFailure{}
} else {
failure = lnwire.NewTemporaryChannelFailure(update)
}
return failure
// First check whether the outgoing htlc satisfies the channel policy.
err := l.htlcSatifiesPolicyOutgoing(
policy, payHash, amtToForward, outgoingTimeout, heightNow,
)
if err != nil {
return err
}
// Next, using the amount of the incoming HTLC, we'll calculate the
@ -2165,36 +2131,6 @@ func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte,
return failure
}
// We want to avoid offering an HTLC which will expire in the near
// future, so we'll reject an HTLC if the outgoing expiration time is
// too close to the current height.
if outgoingTimeout <= heightNow+l.cfg.OutgoingCltvRejectDelta {
l.errorf("htlc(%x) has an expiry that's too soon: "+
"outgoing_expiry=%v, best_height=%v", payHash[:],
outgoingTimeout, heightNow)
var failure lnwire.FailureMessage
update, err := l.cfg.FetchLastChannelUpdate(
l.ShortChanID(),
)
if err != nil {
failure = lnwire.NewTemporaryChannelFailure(update)
} else {
failure = lnwire.NewExpiryTooSoon(*update)
}
return failure
}
// Check absolute max delta.
if outgoingTimeout > maxCltvExpiry+heightNow {
l.errorf("outgoing htlc(%x) has a time lock too far in the "+
"future: got %v, but maximum is %v", payHash[:],
outgoingTimeout-heightNow, maxCltvExpiry)
return &lnwire.FailExpiryTooFar{}
}
// Finally, we'll ensure that the time-lock on the outgoing HTLC meets
// the following constraint: the incoming time-lock minus our time-lock
// delta should equal the outgoing time lock. Otherwise, whether the
@ -2225,6 +2161,105 @@ func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte,
return nil
}
// HtlcSatifiesPolicyLocal should return a nil error if the passed HTLC details
// satisfy the current channel policy. Otherwise, a valid protocol failure
// message should be returned in order to signal the violation. This call is
// intended to be used for locally initiated payments for which there is no
// corresponding incoming htlc.
func (l *channelLink) HtlcSatifiesPolicyLocal(payHash [32]byte,
amt lnwire.MilliSatoshi, timeout uint32,
heightNow uint32) lnwire.FailureMessage {
l.RLock()
policy := l.cfg.FwrdingPolicy
l.RUnlock()
return l.htlcSatifiesPolicyOutgoing(
policy, payHash, amt, timeout, heightNow,
)
}
// htlcSatifiesPolicyOutgoing checks whether the given htlc parameters satisfy
// the channel's amount and time lock constraints.
func (l *channelLink) htlcSatifiesPolicyOutgoing(policy ForwardingPolicy,
payHash [32]byte, amt lnwire.MilliSatoshi, timeout uint32,
heightNow uint32) lnwire.FailureMessage {
// As our first sanity check, we'll ensure that the passed HTLC isn't
// too small for the next hop. If so, then we'll cancel the HTLC
// directly.
if amt < policy.MinHTLC {
l.errorf("outgoing htlc(%x) is too small: min_htlc=%v, "+
"htlc_value=%v", payHash[:], policy.MinHTLC,
amt)
// As part of the returned error, we'll send our latest routing
// policy so the sending node obtains the most up to date data.
var failure lnwire.FailureMessage
update, err := l.cfg.FetchLastChannelUpdate(l.ShortChanID())
if err != nil {
failure = &lnwire.FailTemporaryNodeFailure{}
} else {
failure = lnwire.NewAmountBelowMinimum(
amt, *update,
)
}
return failure
}
// Next, ensure that the passed HTLC isn't too large. If so, we'll cancel
// the HTLC directly.
if policy.MaxHTLC != 0 && amt > policy.MaxHTLC {
l.errorf("outgoing htlc(%x) is too large: max_htlc=%v, "+
"htlc_value=%v", payHash[:], policy.MaxHTLC, amt)
// As part of the returned error, we'll send our latest routing policy
// so the sending node obtains the most up-to-date data.
var failure lnwire.FailureMessage
update, err := l.cfg.FetchLastChannelUpdate(l.ShortChanID())
if err != nil {
failure = &lnwire.FailTemporaryNodeFailure{}
} else {
failure = lnwire.NewTemporaryChannelFailure(update)
}
return failure
}
// We want to avoid offering an HTLC which will expire in the near
// future, so we'll reject an HTLC if the outgoing expiration time is
// too close to the current height.
if timeout <= heightNow+l.cfg.OutgoingCltvRejectDelta {
l.errorf("htlc(%x) has an expiry that's too soon: "+
"outgoing_expiry=%v, best_height=%v", payHash[:],
timeout, heightNow)
var failure lnwire.FailureMessage
update, err := l.cfg.FetchLastChannelUpdate(
l.ShortChanID(),
)
if err != nil {
failure = lnwire.NewTemporaryChannelFailure(update)
} else {
failure = lnwire.NewExpiryTooSoon(*update)
}
return failure
}
// Check absolute max delta.
if timeout > maxCltvExpiry+heightNow {
l.errorf("outgoing htlc(%x) has a time lock too far in the "+
"future: got %v, but maximum is %v", payHash[:],
timeout-heightNow, maxCltvExpiry)
return &lnwire.FailExpiryTooFar{}
}
return nil
}
// Stats returns the statistics of channel link.
//
// NOTE: Part of the ChannelLink interface.

@ -1365,13 +1365,12 @@ func TestChannelLinkExpiryTooSoonExitNode(t *testing.T) {
amount := lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin)
// We'll craft an HTLC packet, but set the final hop CLTV to 3 blocks
// after the current true height. This is less or equal to the expiry
// grace delta of 3, so we expect the incoming htlc to be failed by the
// We'll craft an HTLC packet, but set the final hop CLTV to 5 blocks
// after the current true height. This is less than the test invoice
// cltv delta of 6, so we expect the incoming htlc to be failed by the
// exit hop.
lastHopDelta := n.firstBobChannelLink.cfg.FwrdingPolicy.TimeLockDelta
htlcAmt, totalTimelock, hops := generateHops(amount,
startingHeight+3-lastHopDelta, n.firstBobChannelLink)
startingHeight-1, n.firstBobChannelLink)
// Now we'll send out the payment from Alice to Bob.
firstHop := n.firstBobChannelLink.ShortChanID()

@ -612,6 +612,8 @@ type mockChannelLink struct {
eligible bool
htlcID uint64
htlcSatifiesPolicyLocalResult lnwire.FailureMessage
}
// completeCircuit is a helper method for adding the finalized payment circuit
@ -675,6 +677,13 @@ func (f *mockChannelLink) HtlcSatifiesPolicy([32]byte, lnwire.MilliSatoshi,
return nil
}
func (f *mockChannelLink) HtlcSatifiesPolicyLocal(payHash [32]byte,
amt lnwire.MilliSatoshi, timeout uint32,
heightNow uint32) lnwire.FailureMessage {
return f.htlcSatifiesPolicyLocalResult
}
func (f *mockChannelLink) Stats() (uint64, lnwire.MilliSatoshi, lnwire.MilliSatoshi) {
return 0, 0, 0
}
@ -728,7 +737,7 @@ func newDB() (*channeldb.DB, func(), error) {
return cdb, cleanUp, nil
}
const testInvoiceCltvExpiry = 4
const testInvoiceCltvExpiry = 6
type mockInvoiceRegistry struct {
settleChan chan lntypes.Hash

@ -799,6 +799,23 @@ func (s *Switch) handleLocalDispatch(pkt *htlcPacket) error {
}
}
// Ensure that the htlc satisfies the outgoing channel policy.
currentHeight := atomic.LoadUint32(&s.bestHeight)
htlcErr := link.HtlcSatifiesPolicyLocal(
htlc.PaymentHash,
htlc.Amount,
htlc.Expiry, currentHeight,
)
if htlcErr != nil {
log.Errorf("Link %v policy for local forward not "+
"satisfied", pkt.outgoingChanID)
return &ForwardingError{
ErrorSource: s.cfg.SelfKey,
FailureMessage: htlcErr,
}
}
if link.Bandwidth() < htlc.Amount {
err := fmt.Errorf("Link %v has insufficient capacity: "+
"need %v, has %v", pkt.outgoingChanID,

@ -1363,6 +1363,21 @@ func TestSkipIneligibleLinksMultiHopForward(t *testing.T) {
func TestSkipIneligibleLinksLocalForward(t *testing.T) {
t.Parallel()
testSkipLinkLocalForward(t, false, nil)
}
// TestSkipPolicyUnsatisfiedLinkLocalForward ensures that the switch will not
// attempt to send locally initiated HTLCs that would violate the channel policy
// down a link.
func TestSkipPolicyUnsatisfiedLinkLocalForward(t *testing.T) {
t.Parallel()
testSkipLinkLocalForward(t, true, lnwire.NewTemporaryChannelFailure(nil))
}
func testSkipLinkLocalForward(t *testing.T, eligible bool,
policyResult lnwire.FailureMessage) {
// We'll create a single link for this test, marking it as being unable
// to forward form the get go.
alicePeer, err := newMockServer(t, "alice", testStartingHeight, nil, 6)
@ -1382,8 +1397,9 @@ func TestSkipIneligibleLinksLocalForward(t *testing.T) {
chanID1, _, aliceChanID, _ := genIDs()
aliceChannelLink := newMockChannelLink(
s, chanID1, aliceChanID, alicePeer, false,
s, chanID1, aliceChanID, alicePeer, eligible,
)
aliceChannelLink.htlcSatifiesPolicyLocalResult = policyResult
if err := s.AddLink(aliceChannelLink); err != nil {
t.Fatalf("unable to add alice link: %v", err)
}

@ -1052,7 +1052,7 @@ func (h *hopNetwork) createChannelLink(server, peer *mockServer,
MinFeeUpdateTimeout: minFeeUpdateTimeout,
MaxFeeUpdateTimeout: maxFeeUpdateTimeout,
OnChannelFailure: func(lnwire.ChannelID, lnwire.ShortChannelID, LinkFailureError) {},
FinalCltvRejectDelta: 3,
FinalCltvRejectDelta: 5,
OutgoingCltvRejectDelta: 3,
},
channel,

@ -9466,7 +9466,8 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) {
// We'll now mine enough blocks to trigger Bob's broadcast of his
// commitment transaction due to the fact that the HTLC is about to
// timeout.
// timeout. With the default outgoing broadcast delta of zero, this will
// be the same height as the htlc expiry height.
numBlocks := uint32(finalCltvDelta - defaultOutgoingBroadcastDelta)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
@ -9503,8 +9504,9 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) {
t.Fatalf("htlc mismatch: %v", predErr)
}
// We'll mine defaultCSV blocks in order to generate the sweep transaction
// of Bob's funding output.
// We'll mine defaultCSV blocks in order to generate the sweep
// transaction of Bob's funding output. This will also bring us to the
// maturity height of the htlc tx output.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
@ -9514,15 +9516,6 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) {
t.Fatalf("unable to find bob's funding output sweep tx: %v", err)
}
// We'll now mine the remaining blocks to cause the HTLC itself to
// timeout.
_, err = net.Miner.Node.Generate(
defaultOutgoingBroadcastDelta - defaultCSV,
)
if err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// The second layer HTLC timeout transaction should now have been
// broadcast on-chain.
secondLayerHash, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)