peer: eliminate possibility of concurrent map writes in htlcManager

This commit fixed an issue in the htlcManager goroutine which manages
channel state updates. Due to lack of a mutex protecting the two maps
written in the goroutine launched to forward HTLC’s to the switch.

This issue was detected by golang’s runtime which is able to detect
invalid concurrent map writes.
This commit is contained in:
Olaoluwa Osuntokun 2017-05-18 20:13:33 -07:00
parent a5616a3c84
commit 041f3f1e98

15
peer.go

@ -1203,6 +1203,8 @@ type commitmentState struct {
channel *lnwallet.LightningChannel channel *lnwallet.LightningChannel
chanPoint *wire.OutPoint chanPoint *wire.OutPoint
chanID lnwire.ChannelID chanID lnwire.ChannelID
sync.RWMutex
} }
// htlcManager is the primary goroutine which drives a channel's commitment // htlcManager is the primary goroutine which drives a channel's commitment
@ -1564,7 +1566,9 @@ func (p *peer) handleUpstreamMsg(state *commitmentState, msg lnwire.Message) {
// switch, we'll attach the routing information so the switch // switch, we'll attach the routing information so the switch
// can finalize the circuit. // can finalize the circuit.
case sphinx.MoreHops: case sphinx.MoreHops:
state.Lock()
state.pendingCircuits[index] = sphinxPacket state.pendingCircuits[index] = sphinxPacket
state.Unlock()
default: default:
peerLog.Errorf("mal formed onion packet") peerLog.Errorf("mal formed onion packet")
state.htlcsToCancel[index] = lnwire.SphinxParseError state.htlcsToCancel[index] = lnwire.SphinxParseError
@ -1590,7 +1594,9 @@ func (p *peer) handleUpstreamMsg(state *commitmentState, msg lnwire.Message) {
return return
} }
state.Lock()
state.cancelReasons[idx] = lnwire.FailCode(htlcPkt.Reason[0]) state.cancelReasons[idx] = lnwire.FailCode(htlcPkt.Reason[0])
state.Unlock()
case *lnwire.CommitSig: case *lnwire.CommitSig:
// We just received a new update to our local commitment chain, // We just received a new update to our local commitment chain,
@ -1671,7 +1677,10 @@ func (p *peer) handleUpstreamMsg(state *commitmentState, msg lnwire.Message) {
// Otherwise, the HTLC failed, so we propagate // Otherwise, the HTLC failed, so we propagate
// the error back to the potential caller. // the error back to the potential caller.
case lnwallet.Fail: case lnwallet.Fail:
state.Lock()
errMsg := state.cancelReasons[parentIndex] errMsg := state.cancelReasons[parentIndex]
state.Unlock()
p.preImage <- [32]byte{} p.preImage <- [32]byte{}
p.err <- errors.New(errMsg.String()) p.err <- errors.New(errMsg.String())
} }
@ -1746,18 +1755,24 @@ func (p *peer) handleUpstreamMsg(state *commitmentState, msg lnwire.Message) {
// We don't need to forward any HTLCs that we // We don't need to forward any HTLCs that we
// just settled or cancelled above. // just settled or cancelled above.
// TODO(roasbeef): key by index instead? // TODO(roasbeef): key by index instead?
state.RLock()
if _, ok := settledPayments[htlc.RHash]; ok { if _, ok := settledPayments[htlc.RHash]; ok {
state.RUnlock()
continue continue
} }
if _, ok := cancelledHtlcs[htlc.Index]; ok { if _, ok := cancelledHtlcs[htlc.Index]; ok {
state.RUnlock()
continue continue
} }
state.RUnlock()
state.Lock()
onionPkt := state.pendingCircuits[htlc.Index] onionPkt := state.pendingCircuits[htlc.Index]
delete(state.pendingCircuits, htlc.Index) delete(state.pendingCircuits, htlc.Index)
reason := state.cancelReasons[htlc.ParentIndex] reason := state.cancelReasons[htlc.ParentIndex]
delete(state.cancelReasons, htlc.ParentIndex) delete(state.cancelReasons, htlc.ParentIndex)
state.Unlock()
// Send this fully activated HTLC to the htlc // Send this fully activated HTLC to the htlc
// switch to continue the chained clear/settle. // switch to continue the chained clear/settle.