Merge pull request #5355 from carlaKC/4656-maxhtlcnumber

multi: set zero bandwidth hint for channels that do not have any available slots
This commit is contained in:
Olaoluwa Osuntokun 2021-06-24 15:18:20 -07:00 committed by GitHub
commit 6dd7321a3f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 300 additions and 6 deletions

@ -145,6 +145,10 @@ type ChannelLink interface {
// will use this function in forwarding decisions accordingly. // will use this function in forwarding decisions accordingly.
EligibleToForward() bool EligibleToForward() bool
// MayAddOutgoingHtlc returns an error if we may not add an outgoing
// htlc to the channel.
MayAddOutgoingHtlc() error
// AttachMailBox delivers an active MailBox to the link. The MailBox may // AttachMailBox delivers an active MailBox to the link. The MailBox may
// have buffered messages. // have buffered messages.
AttachMailBox(MailBox) AttachMailBox(MailBox)

@ -2156,6 +2156,12 @@ func (l *channelLink) Bandwidth() lnwire.MilliSatoshi {
return l.channel.AvailableBalance() return l.channel.AvailableBalance()
} }
// MayAddOutgoingHtlc indicates whether we may add any more outgoing htlcs to
// this channel.
func (l *channelLink) MayAddOutgoingHtlc() error {
return l.channel.MayAddOutgoingHtlc()
}
// AttachMailBox updates the current mailbox used by this link, and hooks up // AttachMailBox updates the current mailbox used by this link, and hooks up
// the mailbox's message and packet outboxes to the link's upstream and // the mailbox's message and packet outboxes to the link's upstream and
// downstream chans, respectively. // downstream chans, respectively.

@ -756,6 +756,7 @@ func (f *mockChannelLink) Peer() lnpeer.Peer { return
func (f *mockChannelLink) ChannelPoint() *wire.OutPoint { return &wire.OutPoint{} } func (f *mockChannelLink) ChannelPoint() *wire.OutPoint { return &wire.OutPoint{} }
func (f *mockChannelLink) Stop() {} func (f *mockChannelLink) Stop() {}
func (f *mockChannelLink) EligibleToForward() bool { return f.eligible } func (f *mockChannelLink) EligibleToForward() bool { return f.eligible }
func (f *mockChannelLink) MayAddOutgoingHtlc() error { return nil }
func (f *mockChannelLink) setLiveShortChanID(sid lnwire.ShortChannelID) { f.shortChanID = sid } func (f *mockChannelLink) setLiveShortChanID(sid lnwire.ShortChannelID) { f.shortChanID = sid }
func (f *mockChannelLink) UpdateShortChanID() (lnwire.ShortChannelID, error) { func (f *mockChannelLink) UpdateShortChanID() (lnwire.ShortChannelID, error) {
f.eligible = true f.eligible = true

@ -0,0 +1,229 @@
package itest
import (
"context"
"testing"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
// testMaxHtlcPathfind tests the case where we try to send a payment over a
// channel where we have already reached the limit of the number of htlcs that
// we may add to the remote party's commitment. This test asserts that we do
// not attempt to use the full channel at all in our pathfinding.
func testMaxHtlcPathfind(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
// Setup a channel between Alice and Bob where Alice will only allow
// Bob to add a maximum of 5 htlcs to her commitment.
maxHtlcs := 5
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
chanPoint := openChannelAndAssert(
ctxt, t, net, net.Alice, net.Bob,
lntest.OpenChannelParams{
Amt: 1000000,
PushAmt: 800000,
RemoteMaxHtlcs: uint16(maxHtlcs),
},
)
// Wait for Alice and Bob to receive the channel edge from the
// funding manager.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err := net.Alice.WaitForNetworkChannelOpen(ctxt, chanPoint)
require.NoError(t.t, err, "alice does not have open channel")
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = net.Bob.WaitForNetworkChannelOpen(ctxt, chanPoint)
require.NoError(t.t, err, "bob does not have open channel")
// Alice and bob should have one channel open with each other now.
assertNodeNumChannels(t, net.Alice, 1)
assertNodeNumChannels(t, net.Bob, 1)
// Send our maximum number of htlcs from Bob -> Alice so that we get
// to a point where Alice won't accept any more htlcs on the channel.
subscriptions := make([]*holdSubscription, maxHtlcs)
cancelCtxs := make([]func(), maxHtlcs)
for i := 0; i < maxHtlcs; i++ {
subCtx, cancel := context.WithTimeout(ctxb, defaultTimeout)
cancelCtxs[i] = cancel
subscriptions[i] = acceptHoldInvoice(
subCtx, t.t, i, net.Bob, net.Alice,
)
}
// Cancel all of our subscriptions on exit.
defer func() {
for _, cancel := range cancelCtxs {
cancel()
}
}()
err = assertNumActiveHtlcs([]*lntest.HarnessNode{
net.Alice, net.Bob,
}, maxHtlcs)
require.NoError(t.t, err, "htlcs not active")
// Now we send a payment from Alice -> Bob to sanity check that our
// commitment limit is not applied in the opposite direction.
subCtx, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
aliceBobSub := acceptHoldInvoice(
subCtx, t.t, maxHtlcs, net.Alice, net.Bob,
)
err = assertNumActiveHtlcs([]*lntest.HarnessNode{
net.Alice, net.Bob,
}, maxHtlcs+1)
require.NoError(t.t, err, "htlcs not active")
// Now, we're going to try to send another payment from Bob -> Alice.
// We've hit our max remote htlcs, so we expect this payment to spin
// out dramatically with pathfinding.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
payment, err := net.Bob.RouterClient.SendPaymentV2(
ctxt, &routerrpc.SendPaymentRequest{
Amt: 1000,
Dest: net.Alice.PubKey[:],
TimeoutSeconds: 60,
FeeLimitSat: 1000000,
MaxParts: 10,
Amp: true,
},
)
require.NoError(t.t, err, "send payment failed")
update, err := payment.Recv()
require.NoError(t.t, err, "no payment in flight update")
require.Equal(t.t, lnrpc.Payment_IN_FLIGHT, update.Status,
"payment not inflight")
update, err = payment.Recv()
require.NoError(t.t, err, "no payment failed update")
require.Equal(t.t, lnrpc.Payment_FAILED, update.Status)
require.Len(t.t, update.Htlcs, 0, "expected no htlcs dispatched")
// Now that we're done, we cancel all our pending htlcs so that we
// can cleanup the channel with a coop close.
for _, sub := range subscriptions {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
sub.cancel(ctxt, t.t)
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
aliceBobSub.cancel(ctxt, t.t)
err = assertNumActiveHtlcs([]*lntest.HarnessNode{
net.Alice, net.Bob,
}, 0)
require.NoError(t.t, err, "expected all htlcs canceled")
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
}
type holdSubscription struct {
recipient invoicesrpc.InvoicesClient
hash lntypes.Hash
invSubscription invoicesrpc.Invoices_SubscribeSingleInvoiceClient
paymentSubscription routerrpc.Router_SendPaymentV2Client
}
// cancel updates a hold invoice to cancel from the recipient and consumes
// updates from the payer until it has reached a final, failed state.
func (h *holdSubscription) cancel(ctx context.Context, t *testing.T) {
_, err := h.recipient.CancelInvoice(ctx, &invoicesrpc.CancelInvoiceMsg{
PaymentHash: h.hash[:],
})
require.NoError(t, err, "invoice cancel failed")
invUpdate, err := h.invSubscription.Recv()
require.NoError(t, err, "cancel invoice subscribe failed")
require.Equal(t, lnrpc.Invoice_CANCELED, invUpdate.State,
"expected invoice canceled")
// We expect one in flight update when our htlc is canceled back, and
// another when we fail the payment as a whole.
payUpdate, err := h.paymentSubscription.Recv()
require.NoError(t, err, "cancel payment subscribe failed")
require.Len(t, payUpdate.Htlcs, 1)
require.Equal(t, lnrpc.Payment_IN_FLIGHT, payUpdate.Status)
payUpdate, err = h.paymentSubscription.Recv()
require.NoError(t, err, "cancel payment subscribe failed")
require.Equal(t, lnrpc.Payment_FAILED, payUpdate.Status,
"expected payment failed")
require.Equal(t, lnrpc.PaymentFailureReason_FAILURE_REASON_INCORRECT_PAYMENT_DETAILS,
payUpdate.FailureReason, "expected unknown details")
}
// acceptHoldInvoice adds a hold invoice to the recipient node, pays it from
// the sender and asserts that we have reached the accepted state where htlcs
// are locked in for the payment.
func acceptHoldInvoice(ctx context.Context, t *testing.T, idx int, sender,
receiver *lntest.HarnessNode) *holdSubscription {
hash := [lntypes.HashSize]byte{byte(idx + 1)}
invoice, err := receiver.AddHoldInvoice(
ctx, &invoicesrpc.AddHoldInvoiceRequest{
ValueMsat: 10000,
Hash: hash[:],
},
)
require.NoError(t, err, "couldn't add invoice")
invStream, err := receiver.InvoicesClient.SubscribeSingleInvoice(
ctx, &invoicesrpc.SubscribeSingleInvoiceRequest{
RHash: hash[:],
},
)
require.NoError(t, err, "could not subscribe to invoice")
inv, err := invStream.Recv()
require.NoError(t, err, "invoice open stream failed")
require.Equal(t, lnrpc.Invoice_OPEN, inv.State,
"expected open")
payStream, err := sender.RouterClient.SendPaymentV2(
ctx, &routerrpc.SendPaymentRequest{
PaymentRequest: invoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitSat: 1000000,
},
)
require.NoError(t, err, "send payment failed")
// Finally, assert that we progress to an accepted state. We expect
// the payer to get one update for the creation of the payment, and
// another when a htlc is dispatched.
payment, err := payStream.Recv()
require.NoError(t, err, "payment in flight stream failed")
require.Equal(t, lnrpc.Payment_IN_FLIGHT, payment.Status)
require.Len(t, payment.Htlcs, 0)
payment, err = payStream.Recv()
require.NoError(t, err, "payment in flight stream failed")
require.Equal(t, lnrpc.Payment_IN_FLIGHT, payment.Status)
require.Len(t, payment.Htlcs, 1)
inv, err = invStream.Recv()
require.NoError(t, err, "invoice accepted stream failed")
require.Equal(t, lnrpc.Invoice_ACCEPTED, inv.State,
"expected accepted invoice")
return &holdSubscription{
recipient: receiver.InvoicesClient,
hash: hash,
invSubscription: invStream,
paymentSubscription: payStream,
}
}

@ -322,4 +322,8 @@ var allTestCases = []*testCase{
name: "etcd_failover", name: "etcd_failover",
test: testEtcdFailover, test: testEtcdFailover,
}, },
{
name: "max htlc pathfind",
test: testMaxHtlcPathfind,
},
} }

@ -4924,7 +4924,49 @@ func (lc *LightningChannel) AddHTLC(htlc *lnwire.UpdateAddHTLC,
lc.Lock() lc.Lock()
defer lc.Unlock() defer lc.Unlock()
pd := &PaymentDescriptor{ pd := lc.htlcAddDescriptor(htlc, openKey)
if err := lc.validateAddHtlc(pd); err != nil {
return 0, err
}
lc.localUpdateLog.appendHtlc(pd)
return pd.HtlcIndex, nil
}
// MayAddOutgoingHtlc validates whether we can add an outgoing htlc to this
// channel. We don't have a value or circuit for this htlc, because we just
// want to test that we have slots for a potential htlc so we use a "mock"
// htlc to validate a potential commitment state with one more outgoing htlc.
func (lc *LightningChannel) MayAddOutgoingHtlc() error {
lc.Lock()
defer lc.Unlock()
// Create a "mock" outgoing htlc, using the smallest amount we can add
// to the commitment so that we validate commitment slots rather than
// available balance, since our actual htlc amount is unknown at this
// stage.
pd := lc.htlcAddDescriptor(
&lnwire.UpdateAddHTLC{
Amount: lc.channelState.LocalChanCfg.MinHTLC,
},
&channeldb.CircuitKey{},
)
if err := lc.validateAddHtlc(pd); err != nil {
lc.log.Debugf("May add outgoing htlc rejected: %v", err)
return err
}
return nil
}
// htlcAddDescriptor returns a payment descriptor for the htlc and open key
// provided to add to our local update log.
func (lc *LightningChannel) htlcAddDescriptor(htlc *lnwire.UpdateAddHTLC,
openKey *channeldb.CircuitKey) *PaymentDescriptor {
return &PaymentDescriptor{
EntryType: Add, EntryType: Add,
RHash: PaymentHash(htlc.PaymentHash), RHash: PaymentHash(htlc.PaymentHash),
Timeout: htlc.Expiry, Timeout: htlc.Expiry,
@ -4934,7 +4976,11 @@ func (lc *LightningChannel) AddHTLC(htlc *lnwire.UpdateAddHTLC,
OnionBlob: htlc.OnionBlob[:], OnionBlob: htlc.OnionBlob[:],
OpenCircuitKey: openKey, OpenCircuitKey: openKey,
} }
}
// validateAddHtlc validates the addition of an outgoing htlc to our local and
// remote commitments.
func (lc *LightningChannel) validateAddHtlc(pd *PaymentDescriptor) error {
// Make sure adding this HTLC won't violate any of the constraints we // Make sure adding this HTLC won't violate any of the constraints we
// must keep on the commitment transactions. // must keep on the commitment transactions.
remoteACKedIndex := lc.localCommitChain.tail().theirMessageIndex remoteACKedIndex := lc.localCommitChain.tail().theirMessageIndex
@ -4945,7 +4991,7 @@ func (lc *LightningChannel) AddHTLC(htlc *lnwire.UpdateAddHTLC,
remoteACKedIndex, lc.localUpdateLog.logIndex, true, pd, nil, remoteACKedIndex, lc.localUpdateLog.logIndex, true, pd, nil,
) )
if err != nil { if err != nil {
return 0, err return err
} }
// We must also check whether it can be added to our own commitment // We must also check whether it can be added to our own commitment
@ -4958,12 +5004,10 @@ func (lc *LightningChannel) AddHTLC(htlc *lnwire.UpdateAddHTLC,
false, pd, nil, false, pd, nil,
) )
if err != nil { if err != nil {
return 0, err return err
} }
lc.localUpdateLog.appendHtlc(pd) return nil
return pd.HtlcIndex, nil
} }
// ReceiveHTLC adds an HTLC to the state machine's remote update log. This // ReceiveHTLC adds an HTLC to the state machine's remote update log. This

@ -718,6 +718,12 @@ func newServer(cfg *Config, listenAddrs []net.Addr,
return 0 return 0
} }
// If our link isn't currently in a state where it can
// add another outgoing htlc, treat the link as unusable.
if err := link.MayAddOutgoingHtlc(); err != nil {
return 0
}
// Otherwise, we'll return the current best estimate // Otherwise, we'll return the current best estimate
// for the available bandwidth for the link. // for the available bandwidth for the link.
return link.Bandwidth() return link.Bandwidth()