discovery: make batch size distinct from chunk size, reduce to 500
This commit reduces the number of channels a syncer will request from the remote node in a single QueryShortChanIDs message. The current size is derived from the chunkSize, which is meant to signal the maximum number of short chan ids that can fit in a single ReplyChannelRange message. For EncodingSortedPlain, this number is 8000, and we use the same number to dictate the size of the batch from the remote peer. We modify this by introducing a separately configurable batchSize, so that both can be tuned independently. The value is chosen to reduce the amount of buffering the remote party will perform, only requiring them queue 500 responses, as opposed to 8000. In turn, this reduces larges spikes in allocation on the remote node at the expense of a few extra round trips for the control messages. However, will be negligible since the control messages are much smaller than the messages being returned.
This commit is contained in:
parent
73791e15a1
commit
a4b4fe666a
@ -628,6 +628,7 @@ func (m *SyncManager) InitSyncState(peer lnpeer.Peer) {
|
||||
channelSeries: m.cfg.ChanSeries,
|
||||
encodingType: encoding,
|
||||
chunkSize: encodingTypeToChunkSize[encoding],
|
||||
batchSize: requestBatchSize,
|
||||
sendToPeer: func(msgs ...lnwire.Message) error {
|
||||
return peer.SendMessage(false, msgs...)
|
||||
},
|
||||
|
@ -128,6 +128,10 @@ const (
|
||||
// syncTransitionTimeout is the default timeout in which we'll wait up
|
||||
// to when attempting to perform a sync transition.
|
||||
syncTransitionTimeout = 5 * time.Second
|
||||
|
||||
// requestBatchSize is the maximum number of channels we will query the
|
||||
// remote peer for in a QueryShortChanIDs message.
|
||||
requestBatchSize = 500
|
||||
)
|
||||
|
||||
var (
|
||||
@ -183,6 +187,10 @@ type gossipSyncerCfg struct {
|
||||
// encoding type that we can fit into a single message safely.
|
||||
chunkSize int32
|
||||
|
||||
// batchSize is the max number of channels the syncer will query from
|
||||
// the remote node in a single QueryShortChanIDs request.
|
||||
batchSize int32
|
||||
|
||||
// sendToPeer is a function closure that should send the set of
|
||||
// targeted messages to the peer we've been assigned to sync the graph
|
||||
// state from.
|
||||
@ -570,7 +578,7 @@ func (g *GossipSyncer) synchronizeChanIDs() (bool, error) {
|
||||
|
||||
// If the number of channels to query for is less than the chunk size,
|
||||
// then we can issue a single query.
|
||||
if int32(len(g.newChansToQuery)) < g.cfg.chunkSize {
|
||||
if int32(len(g.newChansToQuery)) < g.cfg.batchSize {
|
||||
queryChunk = g.newChansToQuery
|
||||
g.newChansToQuery = nil
|
||||
|
||||
@ -578,8 +586,8 @@ func (g *GossipSyncer) synchronizeChanIDs() (bool, error) {
|
||||
// Otherwise, we'll need to only query for the next chunk.
|
||||
// We'll slice into our query chunk, then slide down our main
|
||||
// pointer down by the chunk size.
|
||||
queryChunk = g.newChansToQuery[:g.cfg.chunkSize]
|
||||
g.newChansToQuery = g.newChansToQuery[g.cfg.chunkSize:]
|
||||
queryChunk = g.newChansToQuery[:g.cfg.batchSize]
|
||||
g.newChansToQuery = g.newChansToQuery[g.cfg.batchSize:]
|
||||
}
|
||||
|
||||
log.Infof("GossipSyncer(%x): querying for %v new channels",
|
||||
|
@ -125,6 +125,7 @@ func newTestSyncer(hID lnwire.ShortChannelID,
|
||||
channelSeries: newMockChannelGraphTimeSeries(hID),
|
||||
encodingType: encodingType,
|
||||
chunkSize: chunkSize,
|
||||
batchSize: chunkSize,
|
||||
sendToPeer: func(msgs ...lnwire.Message) error {
|
||||
msgChan <- msgs
|
||||
return nil
|
||||
|
Loading…
Reference in New Issue
Block a user