diff --git a/channeldb/graph.go b/channeldb/graph.go index f042dcc4..fac89017 100644 --- a/channeldb/graph.go +++ b/channeldb/graph.go @@ -188,6 +188,7 @@ type ChannelGraph struct { // returned instance has its own unique reject cache and channel cache. func newChannelGraph(db *DB, rejectCacheSize, chanCacheSize int, batchCommitInterval time.Duration) *ChannelGraph { + g := &ChannelGraph{ db: db, rejectCache: newRejectCache(rejectCacheSize), @@ -199,6 +200,7 @@ func newChannelGraph(db *DB, rejectCacheSize, chanCacheSize int, g.nodeScheduler = batch.NewTimeScheduler( db.Backend, nil, batchCommitInterval, ) + return g } @@ -953,7 +955,7 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint, // was successfully pruned. err = delChannelEdge( edges, edgeIndex, chanIndex, zombieIndex, nodes, - chanID, false, + chanID, false, false, ) if err != nil && err != ErrEdgeNotFound { return err @@ -1202,7 +1204,7 @@ func (c *ChannelGraph) DisconnectBlockAtHeight(height uint32) ([]*ChannelEdgeInf for _, k := range keys { err = delChannelEdge( edges, edgeIndex, chanIndex, zombieIndex, nodes, - k, false, + k, false, false, ) if err != nil && err != ErrEdgeNotFound { return err @@ -1301,11 +1303,14 @@ func (c *ChannelGraph) PruneTip() (*chainhash.Hash, uint32, error) { return &tipHash, tipHeight, nil } -// DeleteChannelEdges removes edges with the given channel IDs from the database -// and marks them as zombies. This ensures that we're unable to re-add it to our -// database once again. If an edge does not exist within the database, then -// ErrEdgeNotFound will be returned. -func (c *ChannelGraph) DeleteChannelEdges(chanIDs ...uint64) error { +// DeleteChannelEdges removes edges with the given channel IDs from the +// database and marks them as zombies. This ensures that we're unable to re-add +// it to our database once again. If an edge does not exist within the +// database, then ErrEdgeNotFound will be returned. If strictZombiePruning is +// true, then when we mark these edges as zombies, we'll set up the keys such +// that we require the node that failed to send the fresh update to be the one +// that resurrects the channel from its zombie state. +func (c *ChannelGraph) DeleteChannelEdges(strictZombiePruning bool, chanIDs ...uint64) error { // TODO(roasbeef): possibly delete from node bucket if node has no more // channels // TODO(roasbeef): don't delete both edges? @@ -1340,7 +1345,7 @@ func (c *ChannelGraph) DeleteChannelEdges(chanIDs ...uint64) error { byteOrder.PutUint64(rawChanID[:], chanID) err := delChannelEdge( edges, edgeIndex, chanIndex, zombieIndex, nodes, - rawChanID[:], true, + rawChanID[:], true, strictZombiePruning, ) if err != nil { return err @@ -1929,7 +1934,7 @@ func delEdgeUpdateIndexEntry(edgesBucket kvdb.RwBucket, chanID uint64, } func delChannelEdge(edges, edgeIndex, chanIndex, zombieIndex, - nodes kvdb.RwBucket, chanID []byte, isZombie bool) error { + nodes kvdb.RwBucket, chanID []byte, isZombie, strictZombie bool) error { edgeInfo, err := fetchChanEdgeInfo(edgeIndex, chanID) if err != nil { @@ -1997,12 +2002,57 @@ func delChannelEdge(edges, edgeIndex, chanIndex, zombieIndex, return nil } + nodeKey1, nodeKey2 := edgeInfo.NodeKey1Bytes, edgeInfo.NodeKey2Bytes + if strictZombie { + nodeKey1, nodeKey2 = makeZombiePubkeys(&edgeInfo, edge1, edge2) + } + return markEdgeZombie( - zombieIndex, byteOrder.Uint64(chanID), edgeInfo.NodeKey1Bytes, - edgeInfo.NodeKey2Bytes, + zombieIndex, byteOrder.Uint64(chanID), nodeKey1, nodeKey2, ) } +// makeZombiePubkeys derives the node pubkeys to store in the zombie index for a +// particular pair of channel policies. The return values are one of: +// 1. (pubkey1, pubkey2) +// 2. (pubkey1, blank) +// 3. (blank, pubkey2) +// +// A blank pubkey means that corresponding node will be unable to resurrect a +// channel on its own. For example, node1 may continue to publish recent +// updates, but node2 has fallen way behind. After marking an edge as a zombie, +// we don't want another fresh update from node1 to resurrect, as the edge can +// only become live once node2 finally sends something recent. +// +// In the case where we have neither update, we allow either party to resurrect +// the channel. If the channel were to be marked zombie again, it would be +// marked with the correct lagging channel since we received an update from only +// one side. +func makeZombiePubkeys(info *ChannelEdgeInfo, + e1, e2 *ChannelEdgePolicy) ([33]byte, [33]byte) { + + switch { + + // If we don't have either edge policy, we'll return both pubkeys so + // that the channel can be resurrected by either party. + case e1 == nil && e2 == nil: + return info.NodeKey1Bytes, info.NodeKey2Bytes + + // If we're missing edge1, or if both edges are present but edge1 is + // older, we'll return edge1's pubkey and a blank pubkey for edge2. This + // means that only an update from edge1 will be able to resurrect the + // channel. + case e1 == nil || (e2 != nil && e1.LastUpdate.Before(e2.LastUpdate)): + return info.NodeKey1Bytes, [33]byte{} + + // Otherwise, we're missing edge2 or edge2 is the older side, so we + // return a blank pubkey for edge1. In this case, only an update from + // edge2 can resurect the channel. + default: + return [33]byte{}, info.NodeKey2Bytes + } +} + // UpdateEdgePolicy updates the edge routing policy for a single directed edge // within the database for the referenced channel. The `flags` attribute within // the ChannelEdgePolicy determines which of the directed edges are being diff --git a/channeldb/graph_test.go b/channeldb/graph_test.go index 331d1769..0708c3ad 100644 --- a/channeldb/graph_test.go +++ b/channeldb/graph_test.go @@ -368,7 +368,7 @@ func TestEdgeInsertionDeletion(t *testing.T) { // Next, attempt to delete the edge from the database, again this // should proceed without any issues. - if err := graph.DeleteChannelEdges(chanID); err != nil { + if err := graph.DeleteChannelEdges(false, chanID); err != nil { t.Fatalf("unable to delete edge: %v", err) } @@ -387,7 +387,7 @@ func TestEdgeInsertionDeletion(t *testing.T) { // Finally, attempt to delete a (now) non-existent edge within the // database, this should result in an error. - err = graph.DeleteChannelEdges(chanID) + err = graph.DeleteChannelEdges(false, chanID) if err != ErrEdgeNotFound { t.Fatalf("deleting a non-existent edge should fail!") } @@ -1756,7 +1756,7 @@ func TestFilterKnownChanIDs(t *testing.T) { if err := graph.AddChannelEdge(&channel); err != nil { t.Fatalf("unable to create channel edge: %v", err) } - err := graph.DeleteChannelEdges(channel.ChannelID) + err := graph.DeleteChannelEdges(false, channel.ChannelID) if err != nil { t.Fatalf("unable to mark edge zombie: %v", err) } @@ -2038,7 +2038,7 @@ func TestFetchChanInfos(t *testing.T) { if err := graph.AddChannelEdge(&zombieChan); err != nil { t.Fatalf("unable to create channel edge: %v", err) } - err = graph.DeleteChannelEdges(zombieChan.ChannelID) + err = graph.DeleteChannelEdges(false, zombieChan.ChannelID) if err != nil { t.Fatalf("unable to delete and mark edge zombie: %v", err) } @@ -2654,7 +2654,7 @@ func TestNodeIsPublic(t *testing.T) { // graph. This will make Alice be seen as a private node as it no longer // has any advertised edges. for _, graph := range graphs { - err := graph.DeleteChannelEdges(aliceBobEdge.ChannelID) + err := graph.DeleteChannelEdges(false, aliceBobEdge.ChannelID) if err != nil { t.Fatalf("unable to remove edge: %v", err) } @@ -2671,7 +2671,7 @@ func TestNodeIsPublic(t *testing.T) { // completely remove the edge as it is not possible for her to know of // it without it being advertised. for i, graph := range graphs { - err := graph.DeleteChannelEdges(bobCarolEdge.ChannelID) + err := graph.DeleteChannelEdges(false, bobCarolEdge.ChannelID) if err != nil { t.Fatalf("unable to remove edge: %v", err) } @@ -2779,7 +2779,7 @@ func TestDisabledChannelIDs(t *testing.T) { } // Delete the channel edge and ensure it is removed from the disabled list. - if err = graph.DeleteChannelEdges(edgeInfo.ChannelID); err != nil { + if err = graph.DeleteChannelEdges(false, edgeInfo.ChannelID); err != nil { t.Fatalf("unable to delete channel edge: %v", err) } disabledChanIds, err = graph.DisabledChannelIDs() @@ -3017,7 +3017,7 @@ func TestGraphZombieIndex(t *testing.T) { // If we delete the edge and mark it as a zombie, then we should expect // to see it within the index. - err = graph.DeleteChannelEdges(edge.ChannelID) + err = graph.DeleteChannelEdges(false, edge.ChannelID) if err != nil { t.Fatalf("unable to mark edge as zombie: %v", err) } diff --git a/discovery/gossiper.go b/discovery/gossiper.go index 8bb3b96c..5eb5d689 100644 --- a/discovery/gossiper.go +++ b/discovery/gossiper.go @@ -47,6 +47,10 @@ var ( // gossip syncer corresponding to a gossip query message received from // the remote peer. ErrGossipSyncerNotFound = errors.New("gossip syncer not found") + + // emptyPubkey is used to compare compressed pubkeys against an empty + // byte array. + emptyPubkey [33]byte ) // optionalMsgFields is a set of optional message fields that external callers @@ -1881,44 +1885,13 @@ func (d *AuthenticatedGossiper) processNetworkAnnouncement( break case channeldb.ErrZombieEdge: - // Since we've deemed the update as not stale above, - // before marking it live, we'll make sure it has been - // signed by the correct party. The least-significant - // bit in the flag on the channel update tells us which - // edge is being updated. - var pubKey *btcec.PublicKey - switch { - case msg.ChannelFlags&lnwire.ChanUpdateDirection == 0: - pubKey, _ = chanInfo.NodeKey1() - case msg.ChannelFlags&lnwire.ChanUpdateDirection == 1: - pubKey, _ = chanInfo.NodeKey2() - } - - err := routing.VerifyChannelUpdateSignature(msg, pubKey) + err = d.processZombieUpdate(chanInfo, msg) if err != nil { - err := fmt.Errorf("unable to verify channel "+ - "update signature: %v", err) - log.Error(err) + log.Warn(err) nMsg.err <- err return nil, false } - // With the signature valid, we'll proceed to mark the - // edge as live and wait for the channel announcement to - // come through again. - err = d.cfg.Router.MarkEdgeLive(msg.ShortChannelID) - if err != nil { - err := fmt.Errorf("unable to remove edge with "+ - "chan_id=%v from zombie index: %v", - msg.ShortChannelID, err) - log.Error(err) - nMsg.err <- err - return nil, false - } - - log.Debugf("Removed edge with chan_id=%v from zombie "+ - "index", msg.ShortChannelID) - // We'll fallthrough to ensure we stash the update until // we receive its corresponding ChannelAnnouncement. // This is needed to ensure the edge exists in the graph @@ -2447,6 +2420,54 @@ func (d *AuthenticatedGossiper) processNetworkAnnouncement( } } +// processZombieUpdate determines whether the provided channel update should +// resurrect a given zombie edge. +func (d *AuthenticatedGossiper) processZombieUpdate( + chanInfo *channeldb.ChannelEdgeInfo, msg *lnwire.ChannelUpdate) error { + + // The least-significant bit in the flag on the channel update tells us + // which edge is being updated. + isNode1 := msg.ChannelFlags&lnwire.ChanUpdateDirection == 0 + + // Since we've deemed the update as not stale above, before marking it + // live, we'll make sure it has been signed by the correct party. If we + // have both pubkeys, either party can resurect the channel. If we've + // already marked this with the stricter, single-sided resurrection we + // will only have the pubkey of the node with the oldest timestamp. + var pubKey *btcec.PublicKey + switch { + case isNode1 && chanInfo.NodeKey1Bytes != emptyPubkey: + pubKey, _ = chanInfo.NodeKey1() + case !isNode1 && chanInfo.NodeKey2Bytes != emptyPubkey: + pubKey, _ = chanInfo.NodeKey2() + } + if pubKey == nil { + return fmt.Errorf("incorrect pubkey to resurrect zombie "+ + "with chan_id=%v", msg.ShortChannelID) + } + + err := routing.VerifyChannelUpdateSignature(msg, pubKey) + if err != nil { + return fmt.Errorf("unable to verify channel "+ + "update signature: %v", err) + } + + // With the signature valid, we'll proceed to mark the + // edge as live and wait for the channel announcement to + // come through again. + err = d.cfg.Router.MarkEdgeLive(msg.ShortChannelID) + if err != nil { + return fmt.Errorf("unable to remove edge with "+ + "chan_id=%v from zombie index: %v", + msg.ShortChannelID, err) + } + + log.Debugf("Removed edge with chan_id=%v from zombie "+ + "index", msg.ShortChannelID) + + return nil +} + // fetchNodeAnn fetches the latest signed node announcement from our point of // view for the node with the given public key. func (d *AuthenticatedGossiper) fetchNodeAnn( diff --git a/discovery/gossiper_test.go b/discovery/gossiper_test.go index a8b3154a..c12b9b53 100644 --- a/discovery/gossiper_test.go +++ b/discovery/gossiper_test.go @@ -397,7 +397,9 @@ func (r *mockGraphSource) MarkEdgeZombie(chanID lnwire.ShortChannelID, pubKey1, r.mu.Lock() defer r.mu.Unlock() + r.zombies[chanID.ToUint64()] = [][33]byte{pubKey1, pubKey2} + return nil } @@ -2317,15 +2319,34 @@ func TestProcessZombieEdgeNowLive(t *testing.T) { t.Fatalf("unable to sign update with new timestamp: %v", err) } - // We'll also add the edge to our zombie index. + // We'll also add the edge to our zombie index, provide a blank pubkey + // for the first node as we're simulating the sitaution where the first + // ndoe is updating but the second node isn't. In this case we only + // want to allow a new update from the second node to allow the entire + // edge to be resurrected. chanID := batch.chanAnn.ShortChannelID err = ctx.router.MarkEdgeZombie( - chanID, batch.chanAnn.NodeID1, batch.chanAnn.NodeID2, + chanID, [33]byte{}, batch.chanAnn.NodeID2, ) if err != nil { t.Fatalf("unable mark channel %v as zombie: %v", chanID, err) } + // If we send a new update but for the other direction of the channel, + // then it should still be rejected as we want a fresh update from the + // one that was considered stale. + batch.chanUpdAnn1.Timestamp = uint32(time.Now().Unix()) + if err := signUpdate(remoteKeyPriv1, batch.chanUpdAnn1); err != nil { + t.Fatalf("unable to sign update with new timestamp: %v", err) + } + processAnnouncement(batch.chanUpdAnn1, true, true) + + // At this point, the channel should still be consiered a zombie. + _, _, _, err = ctx.router.GetChannelByID(chanID) + if err != channeldb.ErrZombieEdge { + t.Fatalf("channel should still be a zombie") + } + // Attempting to process the current channel update should fail due to // its edge being considered a zombie and its timestamp not being within // the live horizon. We should not expect an error here since it is just diff --git a/lncfg/routing.go b/lncfg/routing.go index 8f62745f..3e6cabc7 100644 --- a/lncfg/routing.go +++ b/lncfg/routing.go @@ -2,5 +2,7 @@ package lncfg // Routing holds the configuration options for routing. type Routing struct { - AssumeChannelValid bool `long:"assumechanvalid" description:"DEPRECATED: This is now turned on by default for Neutrino (use neutrino.validatechannels=true to turn off) and shouldn't be used for any other backend! (default: false)"` + AssumeChannelValid bool `long:"assumechanvalid" description:"Skip checking channel spentness during graph validation. This speedup comes at the risk of using an unvalidated view of the network for routing. (default: false)"` + + StrictZombiePruning bool `long:"strictgraphpruning" description:"If true, then the graph will be pruned more aggressively for zombies. In practice this means that edges with a single stale edge will be considered a zombie."` } diff --git a/routing/router.go b/routing/router.go index 243bde49..a1f50215 100644 --- a/routing/router.go +++ b/routing/router.go @@ -342,6 +342,13 @@ type Config struct { // Clock is mockable time provider. Clock clock.Clock + + // StrictZombiePruning determines if we attempt to prune zombie + // channels according to a stricter criteria. If true, then we'll prune + // a channel if only *one* of the edges is considered a zombie. + // Otherwise, we'll only prune the channel when both edges have a very + // dated last update. + StrictZombiePruning bool } // EdgeLocator is a struct used to identify a specific edge. @@ -824,30 +831,39 @@ func (r *ChannelRouter) pruneZombieChans() error { return nil } - // If *both* edges haven't been updated for a period of + // If either edge hasn't been updated for a period of // chanExpiry, then we'll mark the channel itself as eligible // for graph pruning. - var e1Zombie, e2Zombie bool - if e1 != nil { - e1Zombie = time.Since(e1.LastUpdate) >= chanExpiry - if e1Zombie { - log.Tracef("Edge #1 of ChannelID(%v) last "+ - "update: %v", info.ChannelID, - e1.LastUpdate) - } + e1Zombie := e1 == nil || time.Since(e1.LastUpdate) >= chanExpiry + e2Zombie := e2 == nil || time.Since(e2.LastUpdate) >= chanExpiry + + if e1Zombie { + log.Tracef("Node1 pubkey=%x of chan_id=%v is zombie", + info.NodeKey1Bytes, info.ChannelID) } - if e2 != nil { - e2Zombie = time.Since(e2.LastUpdate) >= chanExpiry - if e2Zombie { - log.Tracef("Edge #2 of ChannelID(%v) last "+ - "update: %v", info.ChannelID, - e2.LastUpdate) - } + if e2Zombie { + log.Tracef("Node2 pubkey=%x of chan_id=%v is zombie", + info.NodeKey2Bytes, info.ChannelID) } - // If the channel is not considered zombie, we can move on to - // the next. - if !e1Zombie || !e2Zombie { + // If we're using strict zombie pruning, then a channel is only + // considered live if both edges have a recent update we know + // of. + var channelIsLive bool + switch { + case r.cfg.StrictZombiePruning: + channelIsLive = !e1Zombie && !e2Zombie + + // Otherwise, if we're using the less strict variant, then a + // channel is considered live if either of the edges have a + // recent update. + default: + channelIsLive = !e1Zombie || !e2Zombie + } + + // Return early if the channel is still considered to be live + // with the current set of configuration parameters. + if channelIsLive { return nil } @@ -908,7 +924,8 @@ func (r *ChannelRouter) pruneZombieChans() error { toPrune = append(toPrune, chanID) log.Tracef("Pruning zombie channel with ChannelID(%v)", chanID) } - if err := r.cfg.Graph.DeleteChannelEdges(toPrune...); err != nil { + err = r.cfg.Graph.DeleteChannelEdges(r.cfg.StrictZombiePruning, toPrune...) + if err != nil { return fmt.Errorf("unable to delete zombie channels: %v", err) } diff --git a/routing/router_test.go b/routing/router_test.go index a7c082e9..c530daec 100644 --- a/routing/router_test.go +++ b/routing/router_test.go @@ -69,16 +69,17 @@ func (c *testCtx) RestartRouter() error { return nil } -func createTestCtxFromGraphInstance(startingHeight uint32, graphInstance *testGraphInstance) ( - *testCtx, func(), error) { +func createTestCtxFromGraphInstance(startingHeight uint32, graphInstance *testGraphInstance, + strictPruning bool) (*testCtx, func(), error) { return createTestCtxFromGraphInstanceAssumeValid( - startingHeight, graphInstance, false, + startingHeight, graphInstance, false, strictPruning, ) } func createTestCtxFromGraphInstanceAssumeValid(startingHeight uint32, - graphInstance *testGraphInstance, assumeValid bool) (*testCtx, func(), error) { + graphInstance *testGraphInstance, assumeValid bool, + strictPruning bool) (*testCtx, func(), error) { // We'll initialize an instance of the channel router with mock // versions of the chain and channel notifier. As we don't need to test @@ -134,9 +135,10 @@ func createTestCtxFromGraphInstanceAssumeValid(startingHeight uint32, next := atomic.AddUint64(&uniquePaymentID, 1) return next, nil }, - PathFindingConfig: pathFindingConfig, - Clock: clock.NewTestClock(time.Unix(1, 0)), - AssumeChannelValid: assumeValid, + PathFindingConfig: pathFindingConfig, + Clock: clock.NewTestClock(time.Unix(1, 0)), + AssumeChannelValid: assumeValid, + StrictZombiePruning: strictPruning, }) if err != nil { return nil, nil, fmt.Errorf("unable to create router %v", err) @@ -187,7 +189,7 @@ func createTestCtxSingleNode(startingHeight uint32) (*testCtx, func(), error) { cleanUp: cleanup, } - return createTestCtxFromGraphInstance(startingHeight, graphInstance) + return createTestCtxFromGraphInstance(startingHeight, graphInstance, false) } func createTestCtxFromFile(startingHeight uint32, testGraph string) (*testCtx, func(), error) { @@ -198,7 +200,7 @@ func createTestCtxFromFile(startingHeight uint32, testGraph string) (*testCtx, f return nil, nil, fmt.Errorf("unable to create test graph: %v", err) } - return createTestCtxFromGraphInstance(startingHeight, graphInstance) + return createTestCtxFromGraphInstance(startingHeight, graphInstance, false) } // TestFindRoutesWithFeeLimit asserts that routes found by the FindRoutes method @@ -365,9 +367,9 @@ func TestChannelUpdateValidation(t *testing.T) { const startingBlockHeight = 101 - ctx, cleanUp, err := createTestCtxFromGraphInstance(startingBlockHeight, - testGraph) - + ctx, cleanUp, err := createTestCtxFromGraphInstance( + startingBlockHeight, testGraph, true, + ) defer cleanUp() if err != nil { t.Fatalf("unable to create router: %v", err) @@ -1028,7 +1030,7 @@ func TestIgnoreChannelEdgePolicyForUnknownChannel(t *testing.T) { defer testGraph.cleanUp() ctx, cleanUp, err := createTestCtxFromGraphInstance( - startingBlockHeight, testGraph, + startingBlockHeight, testGraph, false, ) if err != nil { t.Fatalf("unable to create router: %v", err) @@ -1946,8 +1948,8 @@ func TestPruneChannelGraphStaleEdges(t *testing.T) { freshTimestamp := time.Now() staleTimestamp := time.Unix(0, 0) - // We'll create the following test graph so that only the last channel - // is pruned. + // We'll create the following test graph so that two of the channels + // are pruned. testChannels := []*testChannel{ // No edges. { @@ -1960,7 +1962,7 @@ func TestPruneChannelGraphStaleEdges(t *testing.T) { // Only one edge with a stale timestamp. { Node1: &testChannelEnd{ - Alias: "a", + Alias: "d", testChannelPolicy: &testChannelPolicy{ LastUpdate: staleTimestamp, }, @@ -1970,6 +1972,20 @@ func TestPruneChannelGraphStaleEdges(t *testing.T) { ChannelID: 2, }, + // Only one edge with a stale timestamp, but it's the source + // node so it won't get pruned. + { + Node1: &testChannelEnd{ + Alias: "a", + testChannelPolicy: &testChannelPolicy{ + LastUpdate: staleTimestamp, + }, + }, + Node2: &testChannelEnd{Alias: "b"}, + Capacity: 100000, + ChannelID: 3, + }, + // Only one edge with a fresh timestamp. { Node1: &testChannelEnd{ @@ -1980,10 +1996,11 @@ func TestPruneChannelGraphStaleEdges(t *testing.T) { }, Node2: &testChannelEnd{Alias: "b"}, Capacity: 100000, - ChannelID: 3, + ChannelID: 4, }, - // One edge fresh, one edge stale. + // One edge fresh, one edge stale. This will be pruned with + // strict pruning activated. { Node1: &testChannelEnd{ Alias: "c", @@ -1998,47 +2015,57 @@ func TestPruneChannelGraphStaleEdges(t *testing.T) { }, }, Capacity: 100000, - ChannelID: 4, + ChannelID: 5, }, // Both edges fresh. symmetricTestChannel("g", "h", 100000, &testChannelPolicy{ LastUpdate: freshTimestamp, - }, 5), + }, 6), - // Both edges stale, only one pruned. + // Both edges stale, only one pruned. This should be pruned for + // both normal and strict pruning. symmetricTestChannel("e", "f", 100000, &testChannelPolicy{ LastUpdate: staleTimestamp, - }, 6), + }, 7), } - // We'll create our test graph and router backed with these test - // channels we've created. - testGraph, err := createTestGraphFromChannels(testChannels, "a") - if err != nil { - t.Fatalf("unable to create test graph: %v", err) + for _, strictPruning := range []bool{true, false} { + // We'll create our test graph and router backed with these test + // channels we've created. + testGraph, err := createTestGraphFromChannels(testChannels, "a") + if err != nil { + t.Fatalf("unable to create test graph: %v", err) + } + defer testGraph.cleanUp() + + const startingHeight = 100 + ctx, cleanUp, err := createTestCtxFromGraphInstance( + startingHeight, testGraph, strictPruning, + ) + if err != nil { + t.Fatalf("unable to create test context: %v", err) + } + defer cleanUp() + + // All of the channels should exist before pruning them. + assertChannelsPruned(t, ctx.graph, testChannels) + + // Proceed to prune the channels - only the last one should be pruned. + if err := ctx.router.pruneZombieChans(); err != nil { + t.Fatalf("unable to prune zombie channels: %v", err) + } + + // We expect channels that have either both edges stale, or one edge + // stale with both known. + var prunedChannels []uint64 + if strictPruning { + prunedChannels = []uint64{2, 5, 7} + } else { + prunedChannels = []uint64{2, 7} + } + assertChannelsPruned(t, ctx.graph, testChannels, prunedChannels...) } - defer testGraph.cleanUp() - - const startingHeight = 100 - ctx, cleanUp, err := createTestCtxFromGraphInstance( - startingHeight, testGraph, - ) - if err != nil { - t.Fatalf("unable to create test context: %v", err) - } - defer cleanUp() - - // All of the channels should exist before pruning them. - assertChannelsPruned(t, ctx.graph, testChannels) - - // Proceed to prune the channels - only the last one should be pruned. - if err := ctx.router.pruneZombieChans(); err != nil { - t.Fatalf("unable to prune zombie channels: %v", err) - } - - prunedChannel := testChannels[len(testChannels)-1].ChannelID - assertChannelsPruned(t, ctx.graph, testChannels, prunedChannel) } // TestPruneChannelGraphDoubleDisabled test that we can properly prune channels @@ -2147,7 +2174,7 @@ func testPruneChannelGraphDoubleDisabled(t *testing.T, assumeValid bool) { const startingHeight = 100 ctx, cleanUp, err := createTestCtxFromGraphInstanceAssumeValid( - startingHeight, testGraph, assumeValid, + startingHeight, testGraph, assumeValid, false, ) if err != nil { t.Fatalf("unable to create test context: %v", err) @@ -2529,9 +2556,9 @@ func TestUnknownErrorSource(t *testing.T) { const startingBlockHeight = 101 - ctx, cleanUp, err := createTestCtxFromGraphInstance(startingBlockHeight, - testGraph) - + ctx, cleanUp, err := createTestCtxFromGraphInstance( + startingBlockHeight, testGraph, false, + ) defer cleanUp() if err != nil { t.Fatalf("unable to create router: %v", err) @@ -2668,7 +2695,7 @@ func TestSendToRouteStructuredError(t *testing.T) { const startingBlockHeight = 101 ctx, cleanUp, err := createTestCtxFromGraphInstance( - startingBlockHeight, testGraph, + startingBlockHeight, testGraph, false, ) if err != nil { t.Fatalf("unable to create router: %v", err) @@ -2904,7 +2931,7 @@ func TestSendToRouteMaxHops(t *testing.T) { const startingBlockHeight = 101 ctx, cleanUp, err := createTestCtxFromGraphInstance( - startingBlockHeight, testGraph, + startingBlockHeight, testGraph, false, ) if err != nil { t.Fatalf("unable to create router: %v", err) @@ -3018,7 +3045,7 @@ func TestBuildRoute(t *testing.T) { const startingBlockHeight = 101 ctx, cleanUp, err := createTestCtxFromGraphInstance( - startingBlockHeight, testGraph, + startingBlockHeight, testGraph, false, ) if err != nil { t.Fatalf("unable to create router: %v", err) diff --git a/rpcserver.go b/rpcserver.go index 85a0ecef..dd7e42c9 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -2344,7 +2344,7 @@ func abandonChanFromGraph(chanGraph *channeldb.ChannelGraph, // If the channel ID is still in the graph, then that means the channel // is still open, so we'll now move to purge it from the graph. - return chanGraph.DeleteChannelEdges(chanID) + return chanGraph.DeleteChannelEdges(false, chanID) } // AbandonChannel removes all channel state from the database except for a diff --git a/sample-lnd.conf b/sample-lnd.conf index 615c8563..4659c1ce 100644 --- a/sample-lnd.conf +++ b/sample-lnd.conf @@ -489,6 +489,12 @@ bitcoin.node=btcd ; other backend! ; --routing.assumechanvalid=true +; If set to true, then we'll prune a channel if only a single edge is seen as +; being stale. This results in a more compact channel graph, and also is helpful +; for neutrino nodes as it means they'll only maintain edges where both nodes are +; seen as being live from it's PoV. +; --routing.strictgraphpruning=true + [Btcd] ; The base directory that contains the node's data, logs, configuration file, diff --git a/server.go b/server.go index fbc70699..688b9cda 100644 --- a/server.go +++ b/server.go @@ -768,6 +768,8 @@ func newServer(cfg *Config, listenAddrs []net.Addr, s.controlTower = routing.NewControlTower(paymentControl) + strictPruning := (cfg.Bitcoin.Node == "neutrino" || + cfg.Routing.StrictZombiePruning) s.chanRouter, err = routing.New(routing.Config{ Graph: chanGraph, Chain: cc.ChainIO, @@ -784,6 +786,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, NextPaymentID: sequencer.NextID, PathFindingConfig: pathFindingConfig, Clock: clock.NewDefaultClock(), + StrictZombiePruning: strictPruning, }) if err != nil { return nil, fmt.Errorf("can't create router: %v", err)