htlcswitch: synchronously send the error to the peer on commitment verify fail

In this commit, we fix a slight bug in lnd. Before this commit, we would
send the error to the remote peer, but in an async manner. As a result,
it was possible for the connections to be closed _before_ the error
actually reached the remote party. The fix is simple: wait for the error
to be returned when sending the message. This ensures that the error
reaches the remote party before we kill the connection.
This commit is contained in:
Olaoluwa Osuntokun 2018-04-04 17:38:23 -07:00
parent f53a99e18e
commit 0dbd325fd0
No known key found for this signature in database
GPG Key ID: 964EA263DD637C21

@ -483,7 +483,7 @@ func (l *channelLink) syncChanStates() error {
return fmt.Errorf("unable to generate chan sync message for "+ return fmt.Errorf("unable to generate chan sync message for "+
"ChannelPoint(%v)", l.channel.ChannelPoint()) "ChannelPoint(%v)", l.channel.ChannelPoint())
} }
if err := l.cfg.Peer.SendMessage(localChanSyncMsg); err != nil { if err := l.cfg.Peer.SendMessage(localChanSyncMsg, false); err != nil {
return fmt.Errorf("Unable to send chan sync message for "+ return fmt.Errorf("Unable to send chan sync message for "+
"ChannelPoint(%v)", l.channel.ChannelPoint()) "ChannelPoint(%v)", l.channel.ChannelPoint())
} }
@ -525,7 +525,7 @@ func (l *channelLink) syncChanStates() error {
fundingLockedMsg := lnwire.NewFundingLocked( fundingLockedMsg := lnwire.NewFundingLocked(
l.ChanID(), nextRevocation, l.ChanID(), nextRevocation,
) )
err = l.cfg.Peer.SendMessage(fundingLockedMsg) err = l.cfg.Peer.SendMessage(fundingLockedMsg, false)
if err != nil { if err != nil {
return fmt.Errorf("unable to re-send "+ return fmt.Errorf("unable to re-send "+
"FundingLocked: %v", err) "FundingLocked: %v", err)
@ -575,7 +575,7 @@ func (l *channelLink) syncChanStates() error {
// immediately so we return to a synchronized state as soon as // immediately so we return to a synchronized state as soon as
// possible. // possible.
for _, msg := range msgsToReSend { for _, msg := range msgsToReSend {
l.cfg.Peer.SendMessage(msg) l.cfg.Peer.SendMessage(msg, false)
} }
case <-l.quit: case <-l.quit:
@ -1058,7 +1058,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
l.openedCircuits = append(l.openedCircuits, pkt.inKey()) l.openedCircuits = append(l.openedCircuits, pkt.inKey())
l.keystoneBatch = append(l.keystoneBatch, pkt.keystone()) l.keystoneBatch = append(l.keystoneBatch, pkt.keystone())
l.cfg.Peer.SendMessage(htlc) l.cfg.Peer.SendMessage(htlc, false)
case *lnwire.UpdateFulfillHTLC: case *lnwire.UpdateFulfillHTLC:
// An HTLC we forward to the switch has just settled somewhere // An HTLC we forward to the switch has just settled somewhere
@ -1090,7 +1090,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
// Then we send the HTLC settle message to the connected peer // Then we send the HTLC settle message to the connected peer
// so we can continue the propagation of the settle message. // so we can continue the propagation of the settle message.
l.cfg.Peer.SendMessage(htlc) l.cfg.Peer.SendMessage(htlc, false)
isSettle = true isSettle = true
case *lnwire.UpdateFailHTLC: case *lnwire.UpdateFailHTLC:
@ -1122,7 +1122,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
// Finally, we send the HTLC message to the peer which // Finally, we send the HTLC message to the peer which
// initially created the HTLC. // initially created the HTLC.
l.cfg.Peer.SendMessage(htlc) l.cfg.Peer.SendMessage(htlc, false)
isSettle = true isSettle = true
} }
@ -1241,10 +1241,14 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
// //
// TODO(roasbeef): force close chan // TODO(roasbeef): force close chan
if _, ok := err.(*lnwallet.InvalidCommitSigError); ok { if _, ok := err.(*lnwallet.InvalidCommitSigError); ok {
l.cfg.Peer.SendMessage(&lnwire.Error{ err := l.cfg.Peer.SendMessage(&lnwire.Error{
ChanID: l.ChanID(), ChanID: l.ChanID(),
Data: []byte(err.Error()), Data: []byte(err.Error()),
}) }, true)
if err != nil {
l.errorf("unable to send msg to "+
"remote peer: %v", err)
}
} }
l.fail("ChannelPoint(%v): unable to accept new "+ l.fail("ChannelPoint(%v): unable to accept new "+
@ -1260,7 +1264,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
log.Errorf("unable to revoke commitment: %v", err) log.Errorf("unable to revoke commitment: %v", err)
return return
} }
l.cfg.Peer.SendMessage(nextRevocation) l.cfg.Peer.SendMessage(nextRevocation, false)
// Since we just revoked our commitment, we may have a new set // Since we just revoked our commitment, we may have a new set
// of HTLC's on our commitment, so we'll send them over our // of HTLC's on our commitment, so we'll send them over our
@ -1288,7 +1292,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
// If both commitment chains are fully synced from our PoV, // If both commitment chains are fully synced from our PoV,
// then we don't need to reply with a signature as both sides // then we don't need to reply with a signature as both sides
// already have a commitment with the latest accepted l. // already have a commitment with the latest accepted.
if l.channel.FullySynced() { if l.channel.FullySynced() {
return return
} }
@ -1457,7 +1461,7 @@ func (l *channelLink) updateCommitTx() error {
CommitSig: theirCommitSig, CommitSig: theirCommitSig,
HtlcSigs: htlcSigs, HtlcSigs: htlcSigs,
} }
l.cfg.Peer.SendMessage(commitSig) l.cfg.Peer.SendMessage(commitSig, false)
// We've just initiated a state transition, attempt to stop the // We've just initiated a state transition, attempt to stop the
// logCommitTimer. If the timer already ticked, then we'll consume the // logCommitTimer. If the timer already ticked, then we'll consume the
@ -1665,7 +1669,7 @@ func (l *channelLink) updateChannelFee(feePerKw lnwallet.SatPerKWeight) error {
// We'll then attempt to send a new UpdateFee message, and also lock it // We'll then attempt to send a new UpdateFee message, and also lock it
// in immediately by triggering a commitment update. // in immediately by triggering a commitment update.
msg := lnwire.NewUpdateFee(l.ChanID(), uint32(feePerKw)) msg := lnwire.NewUpdateFee(l.ChanID(), uint32(feePerKw))
if err := l.cfg.Peer.SendMessage(msg); err != nil { if err := l.cfg.Peer.SendMessage(msg, false); err != nil {
return err return err
} }
return l.updateCommitTx() return l.updateCommitTx()
@ -2043,7 +2047,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
ChanID: l.ChanID(), ChanID: l.ChanID(),
ID: pd.HtlcIndex, ID: pd.HtlcIndex,
PaymentPreimage: preimage, PaymentPreimage: preimage,
}) }, false)
needUpdate = true needUpdate = true
// There are additional channels left within this route. So // There are additional channels left within this route. So
@ -2364,7 +2368,7 @@ func (l *channelLink) sendHTLCError(htlcIndex uint64, failure lnwire.FailureMess
ChanID: l.ChanID(), ChanID: l.ChanID(),
ID: htlcIndex, ID: htlcIndex,
Reason: reason, Reason: reason,
}) }, false)
} }
// sendMalformedHTLCError helper function which sends the malformed HTLC update // sendMalformedHTLCError helper function which sends the malformed HTLC update
@ -2384,7 +2388,7 @@ func (l *channelLink) sendMalformedHTLCError(htlcIndex uint64,
ID: htlcIndex, ID: htlcIndex,
ShaOnionBlob: shaOnionBlob, ShaOnionBlob: shaOnionBlob,
FailureCode: code, FailureCode: code,
}) }, false)
} }
// fail helper function which is used to encapsulate the action necessary for // fail helper function which is used to encapsulate the action necessary for