From dcf841c33b18a2bdd985ca02f33d975486ade0ab Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 8 Jun 2018 20:30:17 -0700 Subject: [PATCH 01/12] server: look up latest addr for node in peerTerminationWatcher In this commit, we modify the look up for inbound peers to ensure that we connect to the "freshest" address until we need to execute the peerTerminationWatcher. We do this as it's possible for a channel to be created by the remote peer during our session. If we don't query for the node's address at the latest point, then we'll miss this new node announcement for the node. --- server.go | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/server.go b/server.go index def6c6a7..fd13cb9e 100644 --- a/server.go +++ b/server.go @@ -1384,9 +1384,11 @@ func (s *server) peerTerminationWatcher(p *peer) { // available for use. s.fundingMgr.CancelPeerReservations(p.PubKey()) + pubKey := p.addr.IdentityKey + // We'll also inform the gossiper that this peer is no longer active, // so we don't need to maintain sync state for it any longer. - s.authGossiper.PruneSyncState(p.addr.IdentityKey) + s.authGossiper.PruneSyncState(pubKey) // Tell the switch to remove all links associated with this peer. // Passing nil as the target link indicates that all links associated @@ -1435,7 +1437,7 @@ func (s *server) peerTerminationWatcher(p *peer) { s.removePeer(p) // Next, check to see if this is a persistent peer or not. - pubStr := string(p.addr.IdentityKey.SerializeCompressed()) + pubStr := string(pubKey.SerializeCompressed()) _, ok := s.persistentPeers[pubStr] if ok { // We'll only need to re-launch a connection request if one @@ -1444,6 +1446,23 @@ func (s *server) peerTerminationWatcher(p *peer) { return } + // We'll ensure that we locate an advertised address to use + // within the peer's address for reconnection purposes. + // + // TODO(roasbeef): use them all? + if p.inbound { + advertisedAddr, err := s.fetchNodeAdvertisedAddr( + pubKey, + ) + if err != nil { + srvrLog.Errorf("Unable to retrieve advertised "+ + "address for node %x: %v", + pubKey.SerializeCompressed(), err) + } else { + p.addr.Address = advertisedAddr + } + } + // Otherwise, we'll launch a new connection request in order to // attempt to maintain a persistent connection with this peer. connReq := &connmgr.ConnReq{ @@ -1526,21 +1545,6 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, addr := conn.RemoteAddr() pubKey := brontideConn.RemotePub() - // We'll ensure that we locate an advertised address to use within the - // peer's address for reconnection purposes. - // - // TODO: leave the address field empty if there aren't any? - if inbound { - advertisedAddr, err := s.fetchNodeAdvertisedAddr(pubKey) - if err != nil { - srvrLog.Errorf("Unable to retrieve advertised address "+ - "for node %x: %v", pubKey.SerializeCompressed(), - err) - } else { - addr = advertisedAddr - } - } - peerAddr := &lnwire.NetAddress{ IdentityKey: pubKey, Address: addr, From c975753f1e3a3bb3deb4d9897838c9b889ff4043 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 8 Jun 2018 20:36:41 -0700 Subject: [PATCH 02/12] server: remove pending conn request if we recv a outbound conn after a scheduled callback In this commit, we ensure that if we're already ignoring a connection, then we also ignore the pending persistent connection request. Otherwise, we'll move to accept the replaced connection, but then continue to attempt connection requests. --- server.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/server.go b/server.go index fd13cb9e..322e5011 100644 --- a/server.go +++ b/server.go @@ -1727,6 +1727,11 @@ func (s *server) OutboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn) // ignore this connection. if _, ok := s.scheduledPeerConnection[pubStr]; ok { srvrLog.Debugf("Ignoring connection, peer already scheduled") + + if connReq != nil { + s.connMgr.Remove(connReq.ID()) + } + conn.Close() return } From 7f16e99a80419cd05911e443d2e8fab4d79a7e84 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 8 Jun 2018 20:37:04 -0700 Subject: [PATCH 03/12] lntest: add new StopNode method --- lntest/harness.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lntest/harness.go b/lntest/harness.go index 600c7b02..da462e94 100644 --- a/lntest/harness.go +++ b/lntest/harness.go @@ -566,6 +566,13 @@ func (n *NetworkHarness) ShutdownNode(node *HarnessNode) error { return nil } +// StopNode stops the target node, but doesn't yet clean up its directories. +// This can be used to temporarily bring a node down during a test, to be later +// started up again. +func (n *NetworkHarness) StopNode(node *HarnessNode) error { + return node.stop() +} + // TODO(roasbeef): add a WithChannel higher-order function? // * python-like context manager w.r.t using a channel within a test // * possibly adds more funds to the target wallet if the funds are not From 179b25c580d107854555c258e92141d3a3a5eb53 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 8 Jun 2018 20:42:54 -0700 Subject: [PATCH 04/12] test: update switch persistence tests to account for bug fix in reconnection logic With the recent bug fixes in the peer connection, it's no longer the case that just disconnecting a certain peer causes it to no longer connect to the other. As a result, we now shutdown Alice to ensure no reconnection occurs. We'll then later restart alice when we restart dave. --- lnd_test.go | 54 +++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/lnd_test.go b/lnd_test.go index 96ca8998..7444e18f 100644 --- a/lnd_test.go +++ b/lnd_test.go @@ -6787,6 +6787,27 @@ func assertActiveHtlcs(nodes []*lntest.HarnessNode, payHashes ...[]byte) error { return nil } +func assertNumActiveHtlcsChanPoint(node *lntest.HarnessNode, + chanPoint wire.OutPoint, numHtlcs int) bool { + + req := &lnrpc.ListChannelsRequest{} + ctxb := context.Background() + nodeChans, err := node.ListChannels(ctxb, req) + if err != nil { + return false + } + + for _, channel := range nodeChans.Channels { + if channel.ChannelPoint != chanPoint.String() { + continue + } + + return len(channel.PendingHtlcs) == numHtlcs + } + + return false +} + func assertNumActiveHtlcs(nodes []*lntest.HarnessNode, numHtlcs int) bool { req := &lnrpc.ListChannelsRequest{} ctxb := context.Background() @@ -9079,11 +9100,11 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness t.Fatalf("htlc mismatch: %v", err) } - // Disconnect the two intermediaries, Alice and Dave, so that when carol - // restarts, the response will be held by Dave. + // Disconnect the two intermediaries, Alice and Dave, by shutting down + // Alice. ctxt, _ = context.WithTimeout(ctxb, timeout) - if err := net.DisconnectNodes(ctxt, dave, net.Alice); err != nil { - t.Fatalf("unable to disconnect alice from dave: %v", err) + if err := net.StopNode(net.Alice); err != nil { + t.Fatalf("unable to shutdown alice: %v", err) } // Now restart carol without hodl mode, to settle back the outstanding @@ -9101,10 +9122,12 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness t.Fatalf("unable to reconnect dave and carol: %v", err) } - // Wait for Carol to report no outstanding htlcs. + // Wait for Carol to report no outstanding htlcs, and also for Dav to + // receive all the settles from Carol. carolNode := []*lntest.HarnessNode{carol} err = lntest.WaitPredicate(func() bool { - return assertNumActiveHtlcs(carolNode, 0) + return assertNumActiveHtlcs(carolNode, 0) && + assertNumActiveHtlcsChanPoint(dave, carolFundPoint, 0) }, time.Second*15) if err != nil { t.Fatalf("htlc mismatch: %v", err) @@ -9113,7 +9136,10 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness // Finally, restart dave who received the settles, but was unable to // deliver them to Alice since they were disconnected. if err := net.RestartNode(dave, nil); err != nil { - t.Fatalf("unable to reconnect alice to dave: %v", err) + t.Fatalf("unable to restart dave: %v", err) + } + if err = net.RestartNode(net.Alice, nil); err != nil { + t.Fatalf("unable to restart alice: %v", err) } // Force Dave and Alice to reconnect before waiting for the htlcs to @@ -9124,8 +9150,8 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness t.Fatalf("unable to reconnect dave and carol: %v", err) } - // After reconnection succeeds, the settles should be propagated all the - // way back to the sender. All nodes should report no active htlcs. + // After reconnection succeeds, the settles should be propagated all + // the way back to the sender. All nodes should report no active htlcs. err = lntest.WaitPredicate(func() bool { return assertNumActiveHtlcs(nodes, 0) }, time.Second*15) @@ -9410,8 +9436,8 @@ func testSwitchOfflineDeliveryOutgoingOffline( // Disconnect the two intermediaries, Alice and Dave, so that when carol // restarts, the response will be held by Dave. ctxt, _ = context.WithTimeout(ctxb, timeout) - if err := net.DisconnectNodes(ctxt, dave, net.Alice); err != nil { - t.Fatalf("unable to disconnect alice from dave: %v", err) + if err := net.StopNode(net.Alice); err != nil { + t.Fatalf("unable to shutdown alice: %v", err) } // Now restart carol without hodl mode, to settle back the outstanding @@ -9424,7 +9450,8 @@ func testSwitchOfflineDeliveryOutgoingOffline( // Wait for Carol to report no outstanding htlcs. carolNode := []*lntest.HarnessNode{carol} err = lntest.WaitPredicate(func() bool { - return assertNumActiveHtlcs(carolNode, 0) + return assertNumActiveHtlcs(carolNode, 0) && + assertNumActiveHtlcsChanPoint(dave, carolFundPoint, 0) }, time.Second*15) if err != nil { t.Fatalf("htlc mismatch: %v", err) @@ -9451,6 +9478,9 @@ func testSwitchOfflineDeliveryOutgoingOffline( if err := net.RestartNode(dave, nil); err != nil { t.Fatalf("unable to restart dave: %v", err) } + if err = net.RestartNode(net.Alice, nil); err != nil { + t.Fatalf("unable to restart alice: %v", err) + } // Ensure that Dave is reconnected to Alice before waiting for the htlcs // to clear. From 8885c3de8dddc2d2bf56411dda96ef48087f0050 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 8 Jun 2018 21:16:21 -0700 Subject: [PATCH 05/12] server: defer cancelling the outbound connection until the tie-breaker --- lnwallet/channel.go | 3 +-- server.go | 12 ++++++------ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/lnwallet/channel.go b/lnwallet/channel.go index 7cb3ce62..50bd723b 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -1332,7 +1332,7 @@ type LightningChannel struct { cowg sync.WaitGroup wg sync.WaitGroup - quit chan struct{} + quit chan struct{} } // NewLightningChannel creates a new, active payment channel given an @@ -3184,7 +3184,6 @@ func (lc *LightningChannel) ProcessChanSyncMsg( // revocation, but also initiate a state transition to re-sync // them. if !lc.FullySynced() { - commitSig, htlcSigs, err := lc.SignNextCommitment() switch { diff --git a/server.go b/server.go index 322e5011..80357229 100644 --- a/server.go +++ b/server.go @@ -1545,6 +1545,9 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, addr := conn.RemoteAddr() pubKey := brontideConn.RemotePub() + srvrLog.Infof("finalizing connection to %x, inbound=%v", + pubKey.SerializeCompressed(), inbound) + peerAddr := &lnwire.NetAddress{ IdentityKey: pubKey, Address: addr, @@ -1641,12 +1644,6 @@ func (s *server) InboundPeerConnected(conn net.Conn) { srvrLog.Infof("New inbound connection from %v", conn.RemoteAddr()) - // Cancel all pending connection requests, we either already have an - // outbound connection, or this incoming connection will become our - // primary connection. The incoming connection will not have an - // associated connection request, so we pass nil. - s.cancelConnReqs(pubStr, nil) - // Check to see if we already have a connection with this peer. If so, // we may need to drop our existing connection. This prevents us from // having duplicate connections to the same peer. We forgo adding a @@ -1657,6 +1654,7 @@ func (s *server) InboundPeerConnected(conn net.Conn) { case ErrPeerNotConnected: // We were unable to locate an existing connection with the // target peer, proceed to connect. + s.cancelConnReqs(pubStr, nil) s.peerConnected(conn, nil, true) case nil: @@ -1678,6 +1676,8 @@ func (s *server) InboundPeerConnected(conn net.Conn) { srvrLog.Debugf("Disconnecting stale connection to %v", connectedPeer) + s.cancelConnReqs(pubStr, nil) + // Remove the current peer from the server's internal state and // signal that the peer termination watcher does not need to // execute for this peer. From 15da55effb1c85a9f3e03b1b2a238c7054e22f6e Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Fri, 8 Jun 2018 20:34:31 -0700 Subject: [PATCH 06/12] server: finish correction of inbound/outbound within the server In this commit, we finish the fix for the inbound/outbound peer bool in the server. The prior commit forgot to also flip the inbound/output maps in Inbound/Outbound peer connected. As a result, the checks were incorrect and could cause lnd to refuse to accept any more inbound connections in the case of a concurrent connection attempt. --- server.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/server.go b/server.go index 80357229..5f0becd3 100644 --- a/server.go +++ b/server.go @@ -1625,10 +1625,12 @@ func (s *server) InboundPeerConnected(conn net.Conn) { s.mu.Lock() defer s.mu.Unlock() - // If we already have an inbound connection to this peer, then ignore + // If we already have an outbound connection to this peer, then ignore // this new connection. - if _, ok := s.inboundPeers[pubStr]; ok { - srvrLog.Debugf("Ignoring duplicate inbound connection") + if _, ok := s.outboundPeers[pubStr]; ok { + srvrLog.Debugf("Already have outbound connection for %v, "+ + "ignoring inbound connection", nodePub.SerializeCompressed()) + conn.Close() return } @@ -1705,10 +1707,13 @@ func (s *server) OutboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn) s.mu.Lock() defer s.mu.Unlock() - // If we already have an outbound connection to this peer, then ignore + // If we already have an inbound connection to this peer, then ignore // this new connection. - if _, ok := s.outboundPeers[pubStr]; ok { - srvrLog.Debugf("Ignoring duplicate outbound connection") + if _, ok := s.inboundPeers[pubStr]; ok { + srvrLog.Debugf("Already have inbound connection for %v, "+ + "ignoring outbound connection", + nodePub.SerializeCompressed()) + if connReq != nil { s.connMgr.Remove(connReq.ID()) } From 418ecbaa1525a9ac8f39d5b2785fd4d3c5b40fc7 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Sun, 10 Jun 2018 22:32:25 -0700 Subject: [PATCH 07/12] test: fix flake by allowing channel to load before close --- lnd_test.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/lnd_test.go b/lnd_test.go index 7444e18f..d9e4f2a2 100644 --- a/lnd_test.go +++ b/lnd_test.go @@ -5105,11 +5105,20 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness // broadcasting his current channel state. This is actually the // commitment transaction of a prior *revoked* state, so he'll soon // feel the wrath of Alice's retribution. - force := true - closeUpdates, closeTxId, err := net.CloseChannel(ctxb, carol, - chanPoint, force) + var ( + closeUpdates lnrpc.Lightning_CloseChannelClient + closeTxId *chainhash.Hash + closeErr error + force bool = true + ) + err = lntest.WaitPredicate(func() bool { + closeUpdates, closeTxId, closeErr = net.CloseChannel( + ctxb, carol, chanPoint, force, + ) + return closeErr == nil + }, time.Second*15) if err != nil { - t.Fatalf("unable to close channel: %v", err) + t.Fatalf("unable to close channel: %v", closeErr) } // Query the mempool for the breaching closing transaction, this should From 1a15924d65f27f1fce41cc3efe1710a14267070d Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Mon, 11 Jun 2018 22:58:24 -0700 Subject: [PATCH 08/12] discovery: fix log for adding new gossip syncers In this commit, we fix the logging when adding new gossip syncers. The old log would log the byte array, rather than the byte slice. We fix this by slicing before logging. --- discovery/gossiper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/discovery/gossiper.go b/discovery/gossiper.go index 56bd7929..3819b589 100644 --- a/discovery/gossiper.go +++ b/discovery/gossiper.go @@ -1233,7 +1233,7 @@ func (d *AuthenticatedGossiper) InitSyncState(syncPeer lnpeer.Peer, recvUpdates } log.Infof("Creating new gossipSyncer for peer=%x", - nodeID) + nodeID[:]) syncer := newGossiperSyncer(gossipSyncerCfg{ chainHash: d.cfg.ChainHash, From 03810603ee1926ec23d8c4984a213dedfa03ed34 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Mon, 11 Jun 2018 23:02:07 -0700 Subject: [PATCH 09/12] htlcswitch: modify interfaceIndex to no longer key 2nd lvl by ChannelLink In this commit, we modify the interfaceIndex to no longer key the second level of the index by the ChannelLink. Instead, we'll use the chan ID as it's a stable identifier, unlike a reference to an interface. --- htlcswitch/switch.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/htlcswitch/switch.go b/htlcswitch/switch.go index dcb6714d..d99d0182 100644 --- a/htlcswitch/switch.go +++ b/htlcswitch/switch.go @@ -202,8 +202,8 @@ type Switch struct { forwardingIndex map[lnwire.ShortChannelID]ChannelLink // interfaceIndex maps the compressed public key of a peer to all the - // channels that the switch maintains iwht that peer. - interfaceIndex map[[33]byte]map[ChannelLink]struct{} + // channels that the switch maintains with that peer. + interfaceIndex map[[33]byte]map[lnwire.ChannelID]ChannelLink // htlcPlex is the channel which all connected links use to coordinate // the setup/teardown of Sphinx (onion routing) payment circuits. @@ -253,7 +253,7 @@ func New(cfg Config) (*Switch, error) { linkIndex: make(map[lnwire.ChannelID]ChannelLink), mailOrchestrator: newMailOrchestrator(), forwardingIndex: make(map[lnwire.ShortChannelID]ChannelLink), - interfaceIndex: make(map[[33]byte]map[ChannelLink]struct{}), + interfaceIndex: make(map[[33]byte]map[lnwire.ChannelID]ChannelLink), pendingLinkIndex: make(map[lnwire.ChannelID]ChannelLink), pendingPayments: make(map[uint64]*pendingPayment), htlcPlex: make(chan *plexPacket), @@ -1774,9 +1774,9 @@ func (s *Switch) addLiveLink(link ChannelLink) { // quickly look up all the channels for a particular node. peerPub := link.Peer().PubKey() if _, ok := s.interfaceIndex[peerPub]; !ok { - s.interfaceIndex[peerPub] = make(map[ChannelLink]struct{}) + s.interfaceIndex[peerPub] = make(map[lnwire.ChannelID]ChannelLink) } - s.interfaceIndex[peerPub][link] = struct{}{} + s.interfaceIndex[peerPub][link.ChanID()] = link } // GetLink is used to initiate the handling of the get link command. The @@ -1918,7 +1918,7 @@ func (s *Switch) getLinks(destination [33]byte) ([]ChannelLink, error) { } channelLinks := make([]ChannelLink, 0, len(links)) - for link := range links { + for _, link := range links { channelLinks = append(channelLinks, link) } From 3db06cf7d5722df7b38139bae25bb73e8f11b76b Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Mon, 11 Jun 2018 23:06:08 -0700 Subject: [PATCH 10/12] htlcswitch: in removeLink properly remove items from the interfaceIndex In this commit, we fix a bug in the way we handle removing items from the interfaceIndex. Before this commit, we would delete all items items with the target public key that of the peer that owns the link being removed. However, this is incorrect as the peer may have other links sill active. In this commit, we fix this by first only deleting the link from the peer's index, and then checking to see if the index is empty after this deletion. Only if so do we delete the index for the peer all together. --- htlcswitch/switch.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/htlcswitch/switch.go b/htlcswitch/switch.go index d99d0182..b953e807 100644 --- a/htlcswitch/switch.go +++ b/htlcswitch/switch.go @@ -1840,9 +1840,18 @@ func (s *Switch) removeLink(chanID lnwire.ChannelID) error { delete(s.linkIndex, link.ChanID()) delete(s.forwardingIndex, link.ShortChanID()) - // Remove the channel from channel index. + // If the link has been added to the peer index, then we'll move to + // delete the entry within the index. peerPub := link.Peer().PubKey() - delete(s.interfaceIndex, peerPub) + if peerIndex, ok := s.interfaceIndex[peerPub]; ok { + delete(peerIndex, link.ChanID()) + + // If after deletion, there are no longer any links, then we'll + // remove the interface map all together. + if len(peerIndex) == 0 { + delete(s.interfaceIndex, peerPub) + } + } link.Stop() From e1d8c37708a8e2723f9ea53a21018ed861691629 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Mon, 11 Jun 2018 23:52:38 -0700 Subject: [PATCH 11/12] peer: only add an active link to the channelManager if addPeer succeeds In this commit we fix an existing bug which could cause internal state inconsistency between then switch, funding manager, and the peer. Before this commit, we would _always_ add a new channel to the channelManager. However, due to recent logic, it may be the case that this isn't the channel that will ultimately reside in the link. As a result, we would be unable to process incoming FundingLocked messages properly, as we would mutate the incorrect channel in memory. We remedy this by moving the inserting of the new channel into the activeChannels map until the end of the loadActiveChannels method, where we know that this will be the link that persists. --- peer.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/peer.go b/peer.go index ba9642ea..b5996499 100644 --- a/peer.go +++ b/peer.go @@ -324,10 +324,6 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error { chanID := lnwire.NewChanIDFromOutPoint(chanPoint) - p.activeChanMtx.Lock() - p.activeChannels[chanID] = lnChan - p.activeChanMtx.Unlock() - peerLog.Infof("NodeKey(%x) loading ChannelPoint(%v)", p.PubKey(), chanPoint) @@ -415,12 +411,18 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error { } // Create the link and add it to the switch. - err = p.addLink(chanPoint, lnChan, forwardingPolicy, blockEpoch, - chainEvents, currentHeight, true) + err = p.addLink( + chanPoint, lnChan, forwardingPolicy, blockEpoch, + chainEvents, currentHeight, true, + ) if err != nil { lnChan.Stop() return err } + + p.activeChanMtx.Lock() + p.activeChannels[chanID] = lnChan + p.activeChanMtx.Unlock() } return nil From e60d2b774a35e446081f9be2d4901925cac05379 Mon Sep 17 00:00:00 2001 From: Olaoluwa Osuntokun Date: Tue, 12 Jun 2018 00:43:04 -0700 Subject: [PATCH 12/12] htlcswitch: in event of duplicate link add, prefer newer link --- htlcswitch/switch.go | 6 ++-- htlcswitch/switch_test.go | 59 --------------------------------------- peer.go | 4 +-- 3 files changed, 5 insertions(+), 64 deletions(-) diff --git a/htlcswitch/switch.go b/htlcswitch/switch.go index b953e807..91d6b0e1 100644 --- a/htlcswitch/switch.go +++ b/htlcswitch/switch.go @@ -1723,11 +1723,11 @@ func (s *Switch) AddLink(link ChannelLink) error { chanID := link.ChanID() - // First, ensure that this link is not already active in the switch. + // If a link already exists, then remove the prior one so we can + // replace it with this fresh instance. _, err := s.getLink(chanID) if err == nil { - return fmt.Errorf("unable to add ChannelLink(%v), already "+ - "active", chanID) + s.removeLink(chanID) } // Get and attach the mailbox for this link, which buffers packets in diff --git a/htlcswitch/switch_test.go b/htlcswitch/switch_test.go index 852aa654..82932d3c 100644 --- a/htlcswitch/switch_test.go +++ b/htlcswitch/switch_test.go @@ -25,65 +25,6 @@ func genPreimage() ([32]byte, error) { return preimage, nil } -// TestSwitchAddDuplicateLink tests that the switch will reject duplicate links -// for both pending and live links. It also tests that we can successfully -// add a link after having removed it. -func TestSwitchAddDuplicateLink(t *testing.T) { - t.Parallel() - - alicePeer, err := newMockServer(t, "alice", nil) - if err != nil { - t.Fatalf("unable to create alice server: %v", err) - } - - s, err := initSwitchWithDB(nil) - if err != nil { - t.Fatalf("unable to init switch: %v", err) - } - if err := s.Start(); err != nil { - t.Fatalf("unable to start switch: %v", err) - } - defer s.Stop() - - chanID1, _, aliceChanID, _ := genIDs() - - pendingChanID := lnwire.ShortChannelID{} - - aliceChannelLink := newMockChannelLink( - s, chanID1, pendingChanID, alicePeer, false, - ) - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } - - // Alice should have a pending link, adding again should fail. - if err := s.AddLink(aliceChannelLink); err == nil { - t.Fatalf("adding duplicate link should have failed") - } - - // Update the short chan id of the channel, so that the link goes live. - aliceChannelLink.setLiveShortChanID(aliceChanID) - err = s.UpdateShortChanID(chanID1) - if err != nil { - t.Fatalf("unable to update alice short_chan_id: %v", err) - } - - // Alice should have a live link, adding again should fail. - if err := s.AddLink(aliceChannelLink); err == nil { - t.Fatalf("adding duplicate link should have failed") - } - - // Remove the live link to ensure the indexes are cleared. - if err := s.RemoveLink(chanID1); err != nil { - t.Fatalf("unable to remove alice link: %v", err) - } - - // Alice has no links, adding should succeed. - if err := s.AddLink(aliceChannelLink); err != nil { - t.Fatalf("unable to add alice link: %v", err) - } -} - // TestSwitchSendPending checks the inability of htlc switch to forward adds // over pending links, and the UpdateShortChanID makes a pending link live. func TestSwitchSendPending(t *testing.T) { diff --git a/peer.go b/peer.go index b5996499..f21ece2e 100644 --- a/peer.go +++ b/peer.go @@ -1529,8 +1529,8 @@ out: chainEvents, currentHeight, false) if err != nil { peerLog.Errorf("can't register new channel "+ - "link(%v) with NodeKey(%x)", chanPoint, - p.PubKey()) + "link(%v) with NodeKey(%x): %v", chanPoint, + p.PubKey(), err) } close(newChanReq.done)