From ea51ec34b1426b7dba0e0b57158f8c58bccae2e9 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Wed, 5 Sep 2018 18:22:29 -0700 Subject: [PATCH] peer: prune persistent peer connection on zero on-disk channels In this commit, we fix a small bug with regards to the persistent peer connection pruning logic. Before this commit, it'd be the case that we'd prune a persistent connection to a peer if all links happen to be inactive. This isn't ideal, as the channels are still open, so we should always be atttempting to connect to them. We fix this by looking at the set of channels on-disk instead and prune the persistent connection if there aren't any. --- peer.go | 26 ++++++++------------------ test_utils.go | 13 +++++++++---- 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/peer.go b/peer.go index de8e54b8..d438a470 100644 --- a/peer.go +++ b/peer.go @@ -299,11 +299,16 @@ func (p *peer) Start() error { return err } + if len(activeChans) == 0 { + p.server.prunePersistentPeerConnection(p.pubKeyBytes) + } + // Next, load all the active channels we have with this peer, // registering them with the switch and launching the necessary // goroutines required to operate them. peerLog.Debugf("Loaded %v active channels from database with "+ "NodeKey(%x)", len(activeChans), p.PubKey()) + if err := p.loadActiveChannels(activeChans); err != nil { return fmt.Errorf("unable to load channels: %v", err) } @@ -1993,6 +1998,7 @@ func (p *peer) finalizeChanClosure(chanCloser *channelCloser) { go waitForChanToClose(chanCloser.negotiationHeight, notifier, errChan, chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, func() { + // Respond to the local subsystem which requested the // channel closure. if closeReq != nil { @@ -2005,18 +2011,6 @@ func (p *peer) finalizeChanClosure(chanCloser *channelCloser) { }, } } - - // Remove the persistent connection to this peer if we - // no longer have open channels with them. - p.activeChanMtx.Lock() - numActiveChans := len(p.activeChannels) - p.activeChanMtx.Unlock() - - if numActiveChans == 0 { - p.server.prunePersistentPeerConnection( - p.pubKeyBytes, - ) - } }) } @@ -2061,19 +2055,15 @@ func waitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier, cb() } -// WipeChannel removes the passed channel point from all indexes associated -// with the peer, and the switch. +// WipeChannel removes the passed channel point from all indexes associated with +// the peer, and the switch. func (p *peer) WipeChannel(chanPoint *wire.OutPoint) error { - chanID := lnwire.NewChanIDFromOutPoint(chanPoint) p.activeChanMtx.Lock() if channel, ok := p.activeChannels[chanID]; ok { channel.Stop() delete(p.activeChannels, chanID) - if len(p.activeChannels) == 0 { - p.server.prunePersistentPeerConnection(p.pubKeyBytes) - } } p.activeChanMtx.Unlock() diff --git a/test_utils.go b/test_utils.go index 6f4b1383..2f954ff2 100644 --- a/test_utils.go +++ b/test_utils.go @@ -272,21 +272,21 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, Packager: channeldb.NewChannelPackager(shortChanID), } - addr := &net.TCPAddr{ + aliceAddr := &net.TCPAddr{ IP: net.ParseIP("127.0.0.1"), Port: 18555, } - if err := aliceChannelState.SyncPending(addr, 0); err != nil { + if err := aliceChannelState.SyncPending(aliceAddr, 0); err != nil { return nil, nil, nil, nil, err } - addr = &net.TCPAddr{ + bobAddr := &net.TCPAddr{ IP: net.ParseIP("127.0.0.1"), Port: 18556, } - if err := bobChannelState.SyncPending(addr, 0); err != nil { + if err := bobChannelState.SyncPending(bobAddr, 0); err != nil { return nil, nil, nil, nil, err } @@ -363,6 +363,11 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, s.htlcSwitch.Start() alicePeer := &peer{ + addr: &lnwire.NetAddress{ + IdentityKey: aliceKeyPub, + Address: aliceAddr, + }, + server: s, sendQueue: make(chan outgoingMsg, 1), outgoingQueue: make(chan outgoingMsg, outgoingQueueLen),