htlcswitch+test: send switch back error on lnwallet.ErrInsufficientBalance

This commit fixes a bug related to swallowing an error that should go
to the switch in the case of an insufficient balance error when
attempting to add a new HTLC to the channel state machine. In this
case, an error would never be returned back to the client/switch, and
the internal processing within the channelLink would loop forever,
attempting to add an HTLC that can’t be added due to insufficient
balance to state machine itself.

We fix this issue by only treating the lnwallet.ErrMaxHTLCNumber as the
only error that prompts adding an HTLC to the overflow queue rather
than sending the error directly back to the switch.
This commit is contained in:
Olaoluwa Osuntokun 2017-09-25 16:09:48 -07:00
parent 97e730cf51
commit 7ae436e30e
No known key found for this signature in database
GPG Key ID: 964EA263DD637C21
3 changed files with 12 additions and 14 deletions

@ -483,6 +483,8 @@ out:
// Switch. Possible messages sent by the switch include requests to forward new // Switch. Possible messages sent by the switch include requests to forward new
// HTLCs, timeout previously cleared HTLCs, and finally to settle currently // HTLCs, timeout previously cleared HTLCs, and finally to settle currently
// cleared HTLCs with the upstream peer. // cleared HTLCs with the upstream peer.
//
// TODO(roasbeef): add sync ntfn to ensure switch always has consistent view?
func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) { func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
var isSettle bool var isSettle bool
switch htlc := pkt.htlc.(type) { switch htlc := pkt.htlc.(type) {
@ -497,8 +499,6 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
// The channels spare bandwidth is fully allocated, so // The channels spare bandwidth is fully allocated, so
// we'll put this HTLC into the overflow queue. // we'll put this HTLC into the overflow queue.
case lnwallet.ErrInsufficientBalance:
fallthrough
case lnwallet.ErrMaxHTLCNumber: case lnwallet.ErrMaxHTLCNumber:
log.Infof("Downstream htlc add update with "+ log.Infof("Downstream htlc add update with "+
"payment hash(%x) have been added to "+ "payment hash(%x) have been added to "+

@ -389,6 +389,8 @@ func (s *Switch) handleLocalDispatch(payment *pendingPayment, packet *htlcPacket
// we're unable to then we'll bail early. // we're unable to then we'll bail early.
failure, err := payment.deobfuscator.Deobfuscate(htlc.Reason) failure, err := payment.deobfuscator.Deobfuscate(htlc.Reason)
if err != nil { if err != nil {
// TODO(roasbeef): can happen in case of local error in
// link pkt handling
userErr = errors.Errorf("unable to de-obfuscate "+ userErr = errors.Errorf("unable to de-obfuscate "+
"onion failure, htlc with hash(%x): %v", "onion failure, htlc with hash(%x): %v",
payment.paymentHash[:], err) payment.paymentHash[:], err)

@ -3133,25 +3133,21 @@ func testAsyncPayments(net *networkHarness, t *harnessTest) {
} }
} }
// We should receive one insufficient capacity error, because we are // We should receive one insufficient capacity error, because we sent
// sending on one invoice bigger. // one more payment than we can actually handle with the current
// channel capacity.
errorReceived := false errorReceived := false
for i := 0; i < numInvoices; i++ { for i := 0; i < numInvoices; i++ {
if resp, err := alicePayStream.Recv(); err != nil { if resp, err := alicePayStream.Recv(); err != nil {
t.Fatalf("payment stream have been closed: %v", err) t.Fatalf("payment stream have been closed: %v", err)
} else if resp.PaymentError != "" { } else if resp.PaymentError != "" {
if strings.Contains(resp.PaymentError, if errorReceived {
lnwire.CodeTemporaryChannelFailure.String()) { t.Fatalf("redundant payment error: %v",
if errorReceived { resp.PaymentError)
t.Fatalf("redundant payment "+
"error: %v", resp.PaymentError)
}
errorReceived = true
continue
} }
t.Fatalf("unable to send payment: %v", resp.PaymentError) errorReceived = true
continue
} }
} }