routing: disable FilteredChainView when AssumeChannelValid is active

In this commit, we disable attempting to determine when a channel has
been closed out on-chain whenever AssumeChannelValid is active. Since
the flag indicates that performing this operation is expensive, we do
this as a temporary optimization until we can include proofs of channels
being closed in the gossip protocol.

With this change, the only way for channels being removed from the graph
will be once they're considered zombies: which can happen when both
edges of a channel have their disabled bits set or when both edges
haven't had an update within the past two weeks.
This commit is contained in:
Wilmer Paulino 2019-04-17 13:25:05 -07:00
parent 7e7b8a1940
commit fd1aa478a9
No known key found for this signature in database
GPG Key ID: 6DF57B9F9514972F

@ -200,8 +200,7 @@ type Config struct {
// AssumeChannelValid toggles whether or not the router will check for // AssumeChannelValid toggles whether or not the router will check for
// spentness of channel outpoints. For neutrino, this saves long rescans // spentness of channel outpoints. For neutrino, this saves long rescans
// from blocking initial usage of the wallet. This should only be // from blocking initial usage of the daemon.
// enabled on testnet.
AssumeChannelValid bool AssumeChannelValid bool
} }
@ -381,26 +380,15 @@ func (r *ChannelRouter) Start() error {
log.Tracef("Channel Router starting") log.Tracef("Channel Router starting")
// First, we'll start the chain view instance (if it isn't already
// started).
if err := r.cfg.ChainView.Start(); err != nil {
return err
}
// Once the instance is active, we'll fetch the channel we'll receive
// notifications over.
r.newBlocks = r.cfg.ChainView.FilteredBlocks()
r.staleBlocks = r.cfg.ChainView.DisconnectedBlocks()
bestHash, bestHeight, err := r.cfg.Chain.GetBestBlock() bestHash, bestHeight, err := r.cfg.Chain.GetBestBlock()
if err != nil { if err != nil {
return err return err
} }
// If the graph has never been pruned, or hasn't fully been created yet,
// then we don't treat this as an explicit error.
if _, _, err := r.cfg.Graph.PruneTip(); err != nil { if _, _, err := r.cfg.Graph.PruneTip(); err != nil {
switch { switch {
// If the graph has never been pruned, or hasn't fully been
// created yet, then we don't treat this as an explicit error.
case err == channeldb.ErrGraphNeverPruned: case err == channeldb.ErrGraphNeverPruned:
fallthrough fallthrough
case err == channeldb.ErrGraphNotFound: case err == channeldb.ErrGraphNotFound:
@ -418,37 +406,61 @@ func (r *ChannelRouter) Start() error {
} }
} }
// Before we perform our manual block pruning, we'll construct and // If AssumeChannelValid is present, then we won't rely on pruning
// apply a fresh chain filter to the active FilteredChainView instance. // channels from the graph based on their spentness, but whether they
// We do this before, as otherwise we may miss on-chain events as the // are considered zombies or not.
// filter hasn't properly been applied. if r.cfg.AssumeChannelValid {
channelView, err := r.cfg.Graph.ChannelView() if err := r.pruneZombieChans(); err != nil {
if err != nil && err != channeldb.ErrGraphNoEdgesFound { return err
return err }
} } else {
// Otherwise, we'll use our filtered chain view to prune
log.Infof("Filtering chain using %v channels active", len(channelView)) // channels as soon as they are detected as spent on-chain.
if len(channelView) != 0 { if err := r.cfg.ChainView.Start(); err != nil {
err = r.cfg.ChainView.UpdateFilter(
channelView, uint32(bestHeight),
)
if err != nil {
return err return err
} }
}
// Before we begin normal operation of the router, we first need to // Once the instance is active, we'll fetch the channel we'll
// synchronize the channel graph to the latest state of the UTXO set. // receive notifications over.
if err := r.syncGraphWithChain(); err != nil { r.newBlocks = r.cfg.ChainView.FilteredBlocks()
return err r.staleBlocks = r.cfg.ChainView.DisconnectedBlocks()
}
// Finally, before we proceed, we'll prune any unconnected nodes from // Before we perform our manual block pruning, we'll construct
// the graph in order to ensure we maintain a tight graph of "useful" // and apply a fresh chain filter to the active
// nodes. // FilteredChainView instance. We do this before, as otherwise
err = r.cfg.Graph.PruneGraphNodes() // we may miss on-chain events as the filter hasn't properly
if err != nil && err != channeldb.ErrGraphNodesNotFound { // been applied.
return err channelView, err := r.cfg.Graph.ChannelView()
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
return err
}
log.Infof("Filtering chain using %v channels active",
len(channelView))
if len(channelView) != 0 {
err = r.cfg.ChainView.UpdateFilter(
channelView, uint32(bestHeight),
)
if err != nil {
return err
}
}
// Before we begin normal operation of the router, we first need
// to synchronize the channel graph to the latest state of the
// UTXO set.
if err := r.syncGraphWithChain(); err != nil {
return err
}
// Finally, before we proceed, we'll prune any unconnected nodes
// from the graph in order to ensure we maintain a tight graph
// of "useful" nodes.
err = r.cfg.Graph.PruneGraphNodes()
if err != nil && err != channeldb.ErrGraphNodesNotFound {
return err
}
} }
r.wg.Add(1) r.wg.Add(1)
@ -465,10 +477,14 @@ func (r *ChannelRouter) Stop() error {
return nil return nil
} }
log.Infof("Channel Router shutting down") log.Tracef("Channel Router shutting down")
if err := r.cfg.ChainView.Stop(); err != nil { // Our filtered chain view could've only been started if
return err // AssumeChannelValid isn't present.
if !r.cfg.AssumeChannelValid {
if err := r.cfg.ChainView.Stop(); err != nil {
return err
}
} }
close(r.quit) close(r.quit)
@ -562,9 +578,9 @@ func (r *ChannelRouter) syncGraphWithChain() error {
log.Infof("Syncing channel graph from height=%v (hash=%v) to height=%v "+ log.Infof("Syncing channel graph from height=%v (hash=%v) to height=%v "+
"(hash=%v)", pruneHeight, pruneHash, bestHeight, bestHash) "(hash=%v)", pruneHeight, pruneHash, bestHeight, bestHash)
// If we're not yet caught up, then we'll walk forward in the chain in // If we're not yet caught up, then we'll walk forward in the chain
// the chain pruning the channel graph with each new block in the chain // pruning the channel graph with each new block that hasn't yet been
// that hasn't yet been consumed by the channel graph. // consumed by the channel graph.
var numChansClosed uint32 var numChansClosed uint32
for nextHeight := pruneHeight + 1; nextHeight <= uint32(bestHeight); nextHeight++ { for nextHeight := pruneHeight + 1; nextHeight <= uint32(bestHeight); nextHeight++ {
// Using the next height, request a manual block pruning from // Using the next height, request a manual block pruning from
@ -1121,21 +1137,23 @@ func (r *ChannelRouter) processUpdate(msg interface{}) error {
fundingPoint, msg.ChannelID, msg.Capacity) fundingPoint, msg.ChannelID, msg.Capacity)
// As a new edge has been added to the channel graph, we'll // As a new edge has been added to the channel graph, we'll
// update the current UTXO filter within our active // update the current UTXO filter, if AssumeChannelValid is not
// FilteredChainView so we are notified if/when this channel is // present, within our active FilteredChainView so we are
// closed. // notified if/when this channel is closed.
filterUpdate := []channeldb.EdgePoint{ if !r.cfg.AssumeChannelValid {
{ filterUpdate := []channeldb.EdgePoint{
FundingPkScript: fundingPkScript, {
OutPoint: *fundingPoint, FundingPkScript: fundingPkScript,
}, OutPoint: *fundingPoint,
} },
err = r.cfg.ChainView.UpdateFilter( }
filterUpdate, atomic.LoadUint32(&r.bestHeight), err = r.cfg.ChainView.UpdateFilter(
) filterUpdate, atomic.LoadUint32(&r.bestHeight),
if err != nil { )
return errors.Errorf("unable to update chain "+ if err != nil {
"view: %v", err) return errors.Errorf("unable to update chain "+
"view: %v", err)
}
} }
case *channeldb.ChannelEdgePolicy: case *channeldb.ChannelEdgePolicy: