From 7cbe78eeee434491598b65a169eb066e29f74327 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 6 Apr 2018 12:55:11 -0700 Subject: [PATCH] peer: re-use a static writeBuf within writeMessage optimize memory usage In this commit, we might a very small change to the way writing messages works in the peer, which should have large implications w.r.t reducing memory usage amongst chatty nodes. When profiling the heap on one of my nodes earlier, I noticed this fragment: ``` Showing top 20 nodes out of 68 flat flat% sum% cum cum% 0 0% 0% 75.53MB 54.61% main.(*peer).writeHandler 75.53MB 54.61% 54.61% 75.53MB 54.61% main.(*peer).writeMessage ``` Which points to an inefficiency with the way we handle allocations when writing new messages, drilling down further we see: ``` (pprof) list writeMessage Total: 138.31MB ROUTINE ======================== main.(*peer).writeMessage in /root/go/src/github.com/lightningnetwork/lnd/peer.go 75.53MB 75.53MB (flat, cum) 54.61% of Total . . 1104: p.logWireMessage(msg, false) . . 1105: . . 1106: // As the Lightning wire protocol is fully message oriented, we only . . 1107: // allows one wire message per outer encapsulated crypto message. So . . 1108: // we'll create a temporary buffer to write the message directly to. 75.53MB 75.53MB 1109: var msgPayload [lnwire.MaxMessagePayload]byte . . 1110: b := bytes.NewBuffer(msgPayload[0:0:len(msgPayload)]) . . 1111: . . 1112: // With the temp buffer created and sliced properly (length zero, full . . 1113: // capacity), we'll now encode the message directly into this buffer. . . 1114: n, err := lnwire.WriteMessage(b, msg, 0) (pprof) list writeHandler Total: 138.31MB ROUTINE ======================== main.(*peer).writeHandler in /root/go/src/github.com/lightningnetwork/lnd/peer.go 0 75.53MB (flat, cum) 54.61% of Total . . 1148: . . 1149: // Write out the message to the socket, closing the . . 1150: // 'sentChan' if it's non-nil, The 'sentChan' allows . . 1151: // callers to optionally synchronize sends with the . . 1152: // writeHandler. . 75.53MB 1153: err := p.writeMessage(outMsg.msg) . . 1154: if outMsg.errChan != nil { . . 1155: outMsg.errChan <- err . . 1156: } . . 1157: . . 1158: if err != nil { ``` Ah hah! We create a _new_ buffer each time we want to write a message out. This is unnecessary and _very_ wasteful (as seen by the profile). The fix is simple: re-use a buffer unique to each peer when writing out messages. Since we know what the max message size is, we just allocate one of these 65KB buffers for each peer, and keep it around until the peer is removed. --- peer.go | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/peer.go b/peer.go index b76c099d..6f9be4a4 100644 --- a/peer.go +++ b/peer.go @@ -169,6 +169,12 @@ type peer struct { // TODO(halseth): remove when link failure is properly handled. failedChannels map[lnwire.ChannelID]struct{} + // writeBuf is a buffer that we'll re-use in order to encode wire + // messages to write out directly on the socket. By re-using this + // buffer, we avoid needing to allocate more memory each time a new + // message is to be sent to a peer. + writeBuf [lnwire.MaxMessagePayload]byte + queueQuit chan struct{} quit chan struct{} wg sync.WaitGroup @@ -1103,11 +1109,11 @@ func (p *peer) writeMessage(msg lnwire.Message) error { // TODO(roasbeef): add message summaries p.logWireMessage(msg, false) - // As the Lightning wire protocol is fully message oriented, we only - // allows one wire message per outer encapsulated crypto message. So - // we'll create a temporary buffer to write the message directly to. - var msgPayload [lnwire.MaxMessagePayload]byte - b := bytes.NewBuffer(msgPayload[0:0:len(msgPayload)]) + // We'll re-slice of static write buffer to allow this new message to + // utilize all available space. We also ensure we cap the capacity of + // this new buffer to the static buffer which is sized for the largest + // possible protocol message. + b := bytes.NewBuffer(p.writeBuf[0:0:len(p.writeBuf)]) // With the temp buffer created and sliced properly (length zero, full // capacity), we'll now encode the message directly into this buffer.