Browse Source

discovery: don't historical sync when NumActiveSyncers == 0

Currently when numgraphsyncpeers=0, lnd will still attempt to perform
an initial historical sync. We change this behavior here to forgoe
historical sync entirely when numgraphsyncpeers is zero, since the
routing table isn't being updated anyway while the node is active.

This permits a no-graph lnd mode where no syncing occurs at all.
master
Conner Fromknecht 3 years ago
parent
commit
58e924ad1c
No known key found for this signature in database
GPG Key ID: E7D737B67FA592C7
  1. 15
      discovery/sync_manager.go
  2. 48
      discovery/sync_manager_test.go
  3. 2
      lntest/itest/lnd_test.go

15
discovery/sync_manager.go

@ -279,7 +279,8 @@ func (m *SyncManager) syncerHandler() {
case len(m.activeSyncers) == 0 &&
len(m.inactiveSyncers) == 0:
attemptHistoricalSync = true
attemptHistoricalSync =
m.cfg.NumActiveSyncers > 0
fallthrough
// If we've exceeded our total number of active syncers,
@ -353,6 +354,8 @@ func (m *SyncManager) syncerHandler() {
case initialHistoricalSyncer == nil:
fallthrough
case staleSyncer.peer != initialHistoricalSyncer.cfg.peerPub:
fallthrough
case m.cfg.NumActiveSyncers == 0:
continue
}
@ -414,6 +417,16 @@ func (m *SyncManager) syncerHandler() {
// Our HistoricalSyncTicker has ticked, so we'll randomly select
// a peer and force a historical sync with them.
case <-m.cfg.HistoricalSyncTicker.Ticks():
// To be extra cautious, gate the forceHistoricalSync
// call such that it can only execute if we are
// configured to have a non-zero number of sync peers.
// This way even if the historical sync ticker manages
// to tick we can be sure that a historical sync won't
// accidentally begin.
if m.cfg.NumActiveSyncers == 0 {
continue
}
// If we don't have a syncer available we have nothing
// to do.
s := m.forceHistoricalSync()

48
discovery/sync_manager_test.go

@ -3,6 +3,7 @@ package discovery
import (
"fmt"
"reflect"
"sync"
"sync/atomic"
"testing"
"time"
@ -226,6 +227,31 @@ func TestSyncManagerRotateActiveSyncerCandidate(t *testing.T) {
assertPassiveSyncerTransition(t, passiveSyncer, passiveSyncPeer)
}
// TestSyncManagerNoInitialHistoricalSync ensures no initial sync is attempted
// when NumActiveSyncers is set to 0.
func TestSyncManagerNoInitialHistoricalSync(t *testing.T) {
t.Parallel()
syncMgr := newTestSyncManager(0)
syncMgr.Start()
defer syncMgr.Stop()
// We should not expect any messages from the peer.
peer := randPeer(t, syncMgr.quit)
err := syncMgr.InitSyncState(peer)
require.NoError(t, err)
assertNoMsgSent(t, peer)
// Force the historical syncer to tick. This shouldn't happen normally
// since the ticker is never started. However, we will test that even if
// this were to occur that a historical sync does not progress.
syncMgr.cfg.HistoricalSyncTicker.(*ticker.Force).Force <- time.Time{}
assertNoMsgSent(t, peer)
s := assertSyncerExistence(t, syncMgr, peer)
assertSyncerStatus(t, s, chansSynced, PassiveSync)
}
// TestSyncManagerInitialHistoricalSync ensures that we only attempt a single
// historical sync during the SyncManager's startup. If the peer corresponding
// to the initial historical syncer disconnects, we should attempt to find a
@ -233,7 +259,7 @@ func TestSyncManagerRotateActiveSyncerCandidate(t *testing.T) {
func TestSyncManagerInitialHistoricalSync(t *testing.T) {
t.Parallel()
syncMgr := newTestSyncManager(0)
syncMgr := newTestSyncManager(1)
// The graph should not be considered as synced since the sync manager
// has yet to start.
@ -278,13 +304,27 @@ func TestSyncManagerInitialHistoricalSync(t *testing.T) {
if !syncMgr.IsGraphSynced() {
t.Fatal("expected graph to be considered as synced")
}
// The historical syncer should be active after the sync completes.
assertActiveGossipTimestampRange(t, finalHistoricalPeer)
// Once the initial historical sync has succeeded, another one should
// not be attempted by disconnecting the peer who performed it.
extraPeer := randPeer(t, syncMgr.quit)
syncMgr.InitSyncState(extraPeer)
assertNoMsgSent(t, extraPeer)
// Pruning the first peer will cause the passive peer to send an active
// gossip timestamp msg, which we must consume asynchronously for the
// call to return.
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
assertActiveGossipTimestampRange(t, extraPeer)
}()
syncMgr.PruneSyncState(finalHistoricalPeer.PubKey())
wg.Wait()
// No further messages should be sent.
assertNoMsgSent(t, extraPeer)
}
@ -328,7 +368,7 @@ func TestSyncManagerHistoricalSyncOnReconnect(t *testing.T) {
func TestSyncManagerForceHistoricalSync(t *testing.T) {
t.Parallel()
syncMgr := newTestSyncManager(0)
syncMgr := newTestSyncManager(1)
syncMgr.Start()
defer syncMgr.Stop()
@ -364,7 +404,7 @@ func TestSyncManagerForceHistoricalSync(t *testing.T) {
func TestSyncManagerGraphSyncedAfterHistoricalSyncReplacement(t *testing.T) {
t.Parallel()
syncMgr := newTestSyncManager(0)
syncMgr := newTestSyncManager(1)
syncMgr.Start()
defer syncMgr.Stop()

2
lntest/itest/lnd_test.go

@ -10398,7 +10398,7 @@ func testGraphTopologyNtfns(net *lntest.NetworkHarness, t *harnessTest, pinned b
var aliceArgs []string
if pinned {
aliceArgs = []string{
"--numgraphsyncpeers=1",
"--numgraphsyncpeers=0",
fmt.Sprintf("--gossip.pinned-syncers=%s", bobPubkey),
}
}

Loading…
Cancel
Save