peer: eliminate excessive queueHandler spinning w/ blocked writeHandler
This commit fixes an existing bug within the iteration between the queueHandler and the writeHandler. Under certain scenarios, if the writeHandler was blocked for a non negligible period of time, then the queueHandler would enter a very tight spinning loop. This was due to the fact that the break statement in the inner select loop of the queueHandler wouldn’t actually break the inner for loop, instead it would cause the execution logic to re-enter that same select loop, causing a very tight spin. In this commit, we fix the issue by adding to things: we now label the inner select loop so we can break out of it if we detect that the writeHandler has blocked. Secondly, we introduce a new channel between the queueHandler and the writeHandler to signal the queueHandler that the writeHandler has finished processing the last message.
This commit is contained in:
parent
cbdf139696
commit
f4e7c36c80
39
peer.go
39
peer.go
@ -111,6 +111,14 @@ type peer struct {
|
|||||||
// written onto the wire. Note that this channel is unbuffered.
|
// written onto the wire. Note that this channel is unbuffered.
|
||||||
sendQueue chan outgoinMsg
|
sendQueue chan outgoinMsg
|
||||||
|
|
||||||
|
// sendQueueSync is a channel that's used to synchronize sends between
|
||||||
|
// the queueHandler and the writeHandler. At times the writeHandler may
|
||||||
|
// get blocked on sending messages. As a result we require a
|
||||||
|
// synchronization mechanism between the two otherwise the queueHandler
|
||||||
|
// would need to continually spin checking to see if the writeHandler
|
||||||
|
// is ready for an additional message.
|
||||||
|
sendQueueSync chan struct{}
|
||||||
|
|
||||||
// outgoingQueue is a buffered channel which allows second/third party
|
// outgoingQueue is a buffered channel which allows second/third party
|
||||||
// objects to queue messages to be sent out on the wire.
|
// objects to queue messages to be sent out on the wire.
|
||||||
outgoingQueue chan outgoinMsg
|
outgoingQueue chan outgoinMsg
|
||||||
@ -172,6 +180,7 @@ func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server,
|
|||||||
server: server,
|
server: server,
|
||||||
|
|
||||||
sendQueue: make(chan outgoinMsg),
|
sendQueue: make(chan outgoinMsg),
|
||||||
|
sendQueueSync: make(chan struct{}),
|
||||||
outgoingQueue: make(chan outgoinMsg),
|
outgoingQueue: make(chan outgoinMsg),
|
||||||
|
|
||||||
activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
|
activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
|
||||||
@ -810,6 +819,8 @@ func (p *peer) writeMessage(msg lnwire.Message) error {
|
|||||||
n, err := lnwire.WriteMessage(b, msg, 0)
|
n, err := lnwire.WriteMessage(b, msg, 0)
|
||||||
atomic.AddUint64(&p.bytesSent, uint64(n))
|
atomic.AddUint64(&p.bytesSent, uint64(n))
|
||||||
|
|
||||||
|
// TODO(roasbeef): add write deadline?
|
||||||
|
|
||||||
// Finally, write the message itself in a single swoop.
|
// Finally, write the message itself in a single swoop.
|
||||||
_, err = p.conn.Write(b.Bytes())
|
_, err = p.conn.Write(b.Bytes())
|
||||||
return err
|
return err
|
||||||
@ -817,7 +828,8 @@ func (p *peer) writeMessage(msg lnwire.Message) error {
|
|||||||
|
|
||||||
// writeHandler is a goroutine dedicated to reading messages off of an incoming
|
// writeHandler is a goroutine dedicated to reading messages off of an incoming
|
||||||
// queue, and writing them out to the wire. This goroutine coordinates with the
|
// queue, and writing them out to the wire. This goroutine coordinates with the
|
||||||
// queueHandler in order to ensure the incoming message queue is quickly drained.
|
// queueHandler in order to ensure the incoming message queue is quickly
|
||||||
|
// drained.
|
||||||
//
|
//
|
||||||
// NOTE: This method MUST be run as a goroutine.
|
// NOTE: This method MUST be run as a goroutine.
|
||||||
func (p *peer) writeHandler() {
|
func (p *peer) writeHandler() {
|
||||||
@ -852,6 +864,14 @@ out:
|
|||||||
break out
|
break out
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the queueHandler was waiting for us to complete
|
||||||
|
// the last write, then we'll send it a sginal that
|
||||||
|
// we're done and are awaiting additional messages.
|
||||||
|
select {
|
||||||
|
case p.sendQueueSync <- struct{}{}:
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
case <-p.quit:
|
case <-p.quit:
|
||||||
exitErr = errors.Errorf("peer exiting")
|
exitErr = errors.Errorf("peer exiting")
|
||||||
break out
|
break out
|
||||||
@ -877,6 +897,7 @@ func (p *peer) queueHandler() {
|
|||||||
// Before add a queue'd message our pending message queue,
|
// Before add a queue'd message our pending message queue,
|
||||||
// we'll first try to aggressively empty out our pending list of
|
// we'll first try to aggressively empty out our pending list of
|
||||||
// messaging.
|
// messaging.
|
||||||
|
drain:
|
||||||
for {
|
for {
|
||||||
// Examine the front of the queue. If this message is
|
// Examine the front of the queue. If this message is
|
||||||
// nil, then we've emptied out the queue and can accept
|
// nil, then we've emptied out the queue and can accept
|
||||||
@ -892,21 +913,27 @@ func (p *peer) queueHandler() {
|
|||||||
case <-p.quit:
|
case <-p.quit:
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
break
|
// If the write handler is currently blocked,
|
||||||
|
// then we'll break out of this loop, to avoid
|
||||||
|
// tightly spinning waiting for a blocked write
|
||||||
|
// handler.
|
||||||
|
break drain
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there weren't any messages to send, or the writehandler
|
// If there weren't any messages to send, or the writehandler
|
||||||
// is still blocked, then we'll accept a new message into the
|
// is still blocked, then we'll accept a new message into the
|
||||||
// queue from outside sub-systems.
|
// queue from outside sub-systems. We'll also attempt to send
|
||||||
//
|
// to the writeHandler again, as if this succeeds we'll once
|
||||||
// TODO(roasbeef): need send clause here as well to account for
|
// again try to aggressively drain the pending message queue.
|
||||||
// writeHandler blocking?
|
|
||||||
select {
|
select {
|
||||||
case <-p.quit:
|
case <-p.quit:
|
||||||
return
|
return
|
||||||
case msg := <-p.outgoingQueue:
|
case msg := <-p.outgoingQueue:
|
||||||
pendingMsgs.PushBack(msg)
|
pendingMsgs.PushBack(msg)
|
||||||
|
case <-p.sendQueueSync:
|
||||||
|
// Fall through so we can go back to the top of the
|
||||||
|
// drain loop.
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user