server: in peerBootstrapper check for enough conns before attempting backoff
In this commit we modify the main loop within the peerBootstrapper slightly to check for a sufficient amount of connections, _before_ checking to see if we need to back off the main loop. With this, we avoid unnecessarily backing off unless an actual error occurs.
This commit is contained in:
parent
4aeaf5aaa9
commit
e5c303375b
25
server.go
25
server.go
@ -550,12 +550,25 @@ func (s *server) peerBootstrapper(numTargetPeers uint32,
|
|||||||
// The ticker has just woken us up, so we'll need to check if
|
// The ticker has just woken us up, so we'll need to check if
|
||||||
// we need to attempt to connect our to any more peers.
|
// we need to attempt to connect our to any more peers.
|
||||||
case <-sampleTicker.C:
|
case <-sampleTicker.C:
|
||||||
|
// Obtain the current number of peers, so we can gauge
|
||||||
|
// if we need to sample more peers or not.
|
||||||
|
s.mu.Lock()
|
||||||
|
numActivePeers := uint32(len(s.peersByPub))
|
||||||
|
s.mu.Unlock()
|
||||||
|
|
||||||
|
// If we have enough peers, then we can loop back
|
||||||
|
// around to the next round as we're done here.
|
||||||
|
if numActivePeers >= numTargetPeers {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
// If all of our attempts failed during this last back
|
// If all of our attempts failed during this last back
|
||||||
// off period, then will increase our backoff to 5
|
// off period, then will increase our backoff to 5
|
||||||
// minute ceiling to avoid an excessive number of
|
// minute ceiling to avoid an excessive number of
|
||||||
// queries
|
// queries
|
||||||
//
|
//
|
||||||
// TODO(roasbeef): add reverse policy too?
|
// TODO(roasbeef): add reverse policy too?
|
||||||
|
|
||||||
if epochAttempts > 0 &&
|
if epochAttempts > 0 &&
|
||||||
atomic.LoadUint32(&epochErrors) >= epochAttempts {
|
atomic.LoadUint32(&epochErrors) >= epochAttempts {
|
||||||
|
|
||||||
@ -575,18 +588,6 @@ func (s *server) peerBootstrapper(numTargetPeers uint32,
|
|||||||
atomic.StoreUint32(&epochErrors, 0)
|
atomic.StoreUint32(&epochErrors, 0)
|
||||||
epochAttempts = 0
|
epochAttempts = 0
|
||||||
|
|
||||||
// Obtain the current number of peers, so we can gauge
|
|
||||||
// if we need to sample more peers or not.
|
|
||||||
s.mu.Lock()
|
|
||||||
numActivePeers := uint32(len(s.peersByPub))
|
|
||||||
s.mu.Unlock()
|
|
||||||
|
|
||||||
// If we have enough peers, then we can loop back
|
|
||||||
// around to the next round as we're done here.
|
|
||||||
if numActivePeers >= numTargetPeers {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since we know need more peers, we'll compute the
|
// Since we know need more peers, we'll compute the
|
||||||
// exact number we need to reach our threshold.
|
// exact number we need to reach our threshold.
|
||||||
numNeeded := numTargetPeers - numActivePeers
|
numNeeded := numTargetPeers - numActivePeers
|
||||||
|
Loading…
Reference in New Issue
Block a user