discovery: don't historical sync when NumActiveSyncers == 0

Currently when numgraphsyncpeers=0, lnd will still attempt to perform
an initial historical sync. We change this behavior here to forgoe
historical sync entirely when numgraphsyncpeers is zero, since the
routing table isn't being updated anyway while the node is active.

This permits a no-graph lnd mode where no syncing occurs at all.
This commit is contained in:
Conner Fromknecht 2021-01-22 13:01:29 -08:00
parent 5cec468ff4
commit 58e924ad1c
No known key found for this signature in database
GPG Key ID: E7D737B67FA592C7
3 changed files with 59 additions and 6 deletions

View File

@ -279,7 +279,8 @@ func (m *SyncManager) syncerHandler() {
case len(m.activeSyncers) == 0 && case len(m.activeSyncers) == 0 &&
len(m.inactiveSyncers) == 0: len(m.inactiveSyncers) == 0:
attemptHistoricalSync = true attemptHistoricalSync =
m.cfg.NumActiveSyncers > 0
fallthrough fallthrough
// If we've exceeded our total number of active syncers, // If we've exceeded our total number of active syncers,
@ -353,6 +354,8 @@ func (m *SyncManager) syncerHandler() {
case initialHistoricalSyncer == nil: case initialHistoricalSyncer == nil:
fallthrough fallthrough
case staleSyncer.peer != initialHistoricalSyncer.cfg.peerPub: case staleSyncer.peer != initialHistoricalSyncer.cfg.peerPub:
fallthrough
case m.cfg.NumActiveSyncers == 0:
continue continue
} }
@ -414,6 +417,16 @@ func (m *SyncManager) syncerHandler() {
// Our HistoricalSyncTicker has ticked, so we'll randomly select // Our HistoricalSyncTicker has ticked, so we'll randomly select
// a peer and force a historical sync with them. // a peer and force a historical sync with them.
case <-m.cfg.HistoricalSyncTicker.Ticks(): case <-m.cfg.HistoricalSyncTicker.Ticks():
// To be extra cautious, gate the forceHistoricalSync
// call such that it can only execute if we are
// configured to have a non-zero number of sync peers.
// This way even if the historical sync ticker manages
// to tick we can be sure that a historical sync won't
// accidentally begin.
if m.cfg.NumActiveSyncers == 0 {
continue
}
// If we don't have a syncer available we have nothing // If we don't have a syncer available we have nothing
// to do. // to do.
s := m.forceHistoricalSync() s := m.forceHistoricalSync()

View File

@ -3,6 +3,7 @@ package discovery
import ( import (
"fmt" "fmt"
"reflect" "reflect"
"sync"
"sync/atomic" "sync/atomic"
"testing" "testing"
"time" "time"
@ -226,6 +227,31 @@ func TestSyncManagerRotateActiveSyncerCandidate(t *testing.T) {
assertPassiveSyncerTransition(t, passiveSyncer, passiveSyncPeer) assertPassiveSyncerTransition(t, passiveSyncer, passiveSyncPeer)
} }
// TestSyncManagerNoInitialHistoricalSync ensures no initial sync is attempted
// when NumActiveSyncers is set to 0.
func TestSyncManagerNoInitialHistoricalSync(t *testing.T) {
t.Parallel()
syncMgr := newTestSyncManager(0)
syncMgr.Start()
defer syncMgr.Stop()
// We should not expect any messages from the peer.
peer := randPeer(t, syncMgr.quit)
err := syncMgr.InitSyncState(peer)
require.NoError(t, err)
assertNoMsgSent(t, peer)
// Force the historical syncer to tick. This shouldn't happen normally
// since the ticker is never started. However, we will test that even if
// this were to occur that a historical sync does not progress.
syncMgr.cfg.HistoricalSyncTicker.(*ticker.Force).Force <- time.Time{}
assertNoMsgSent(t, peer)
s := assertSyncerExistence(t, syncMgr, peer)
assertSyncerStatus(t, s, chansSynced, PassiveSync)
}
// TestSyncManagerInitialHistoricalSync ensures that we only attempt a single // TestSyncManagerInitialHistoricalSync ensures that we only attempt a single
// historical sync during the SyncManager's startup. If the peer corresponding // historical sync during the SyncManager's startup. If the peer corresponding
// to the initial historical syncer disconnects, we should attempt to find a // to the initial historical syncer disconnects, we should attempt to find a
@ -233,7 +259,7 @@ func TestSyncManagerRotateActiveSyncerCandidate(t *testing.T) {
func TestSyncManagerInitialHistoricalSync(t *testing.T) { func TestSyncManagerInitialHistoricalSync(t *testing.T) {
t.Parallel() t.Parallel()
syncMgr := newTestSyncManager(0) syncMgr := newTestSyncManager(1)
// The graph should not be considered as synced since the sync manager // The graph should not be considered as synced since the sync manager
// has yet to start. // has yet to start.
@ -278,13 +304,27 @@ func TestSyncManagerInitialHistoricalSync(t *testing.T) {
if !syncMgr.IsGraphSynced() { if !syncMgr.IsGraphSynced() {
t.Fatal("expected graph to be considered as synced") t.Fatal("expected graph to be considered as synced")
} }
// The historical syncer should be active after the sync completes.
assertActiveGossipTimestampRange(t, finalHistoricalPeer)
// Once the initial historical sync has succeeded, another one should // Once the initial historical sync has succeeded, another one should
// not be attempted by disconnecting the peer who performed it. // not be attempted by disconnecting the peer who performed it.
extraPeer := randPeer(t, syncMgr.quit) extraPeer := randPeer(t, syncMgr.quit)
syncMgr.InitSyncState(extraPeer) syncMgr.InitSyncState(extraPeer)
assertNoMsgSent(t, extraPeer)
// Pruning the first peer will cause the passive peer to send an active
// gossip timestamp msg, which we must consume asynchronously for the
// call to return.
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
assertActiveGossipTimestampRange(t, extraPeer)
}()
syncMgr.PruneSyncState(finalHistoricalPeer.PubKey()) syncMgr.PruneSyncState(finalHistoricalPeer.PubKey())
wg.Wait()
// No further messages should be sent.
assertNoMsgSent(t, extraPeer) assertNoMsgSent(t, extraPeer)
} }
@ -328,7 +368,7 @@ func TestSyncManagerHistoricalSyncOnReconnect(t *testing.T) {
func TestSyncManagerForceHistoricalSync(t *testing.T) { func TestSyncManagerForceHistoricalSync(t *testing.T) {
t.Parallel() t.Parallel()
syncMgr := newTestSyncManager(0) syncMgr := newTestSyncManager(1)
syncMgr.Start() syncMgr.Start()
defer syncMgr.Stop() defer syncMgr.Stop()
@ -364,7 +404,7 @@ func TestSyncManagerForceHistoricalSync(t *testing.T) {
func TestSyncManagerGraphSyncedAfterHistoricalSyncReplacement(t *testing.T) { func TestSyncManagerGraphSyncedAfterHistoricalSyncReplacement(t *testing.T) {
t.Parallel() t.Parallel()
syncMgr := newTestSyncManager(0) syncMgr := newTestSyncManager(1)
syncMgr.Start() syncMgr.Start()
defer syncMgr.Stop() defer syncMgr.Stop()

View File

@ -10398,7 +10398,7 @@ func testGraphTopologyNtfns(net *lntest.NetworkHarness, t *harnessTest, pinned b
var aliceArgs []string var aliceArgs []string
if pinned { if pinned {
aliceArgs = []string{ aliceArgs = []string{
"--numgraphsyncpeers=1", "--numgraphsyncpeers=0",
fmt.Sprintf("--gossip.pinned-syncers=%s", bobPubkey), fmt.Sprintf("--gossip.pinned-syncers=%s", bobPubkey),
} }
} }