Merge pull request #1687 from halseth/disable-inactive-channels
Send ChannelUpdate with Disabled bit if channel inactive for 20 minutes
This commit is contained in:
commit
147596047f
55
config.go
55
config.go
@ -28,27 +28,28 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultConfigFilename = "lnd.conf"
|
defaultConfigFilename = "lnd.conf"
|
||||||
defaultDataDirname = "data"
|
defaultDataDirname = "data"
|
||||||
defaultChainSubDirname = "chain"
|
defaultChainSubDirname = "chain"
|
||||||
defaultGraphSubDirname = "graph"
|
defaultGraphSubDirname = "graph"
|
||||||
defaultTLSCertFilename = "tls.cert"
|
defaultTLSCertFilename = "tls.cert"
|
||||||
defaultTLSKeyFilename = "tls.key"
|
defaultTLSKeyFilename = "tls.key"
|
||||||
defaultAdminMacFilename = "admin.macaroon"
|
defaultAdminMacFilename = "admin.macaroon"
|
||||||
defaultReadMacFilename = "readonly.macaroon"
|
defaultReadMacFilename = "readonly.macaroon"
|
||||||
defaultInvoiceMacFilename = "invoice.macaroon"
|
defaultInvoiceMacFilename = "invoice.macaroon"
|
||||||
defaultLogLevel = "info"
|
defaultLogLevel = "info"
|
||||||
defaultLogDirname = "logs"
|
defaultLogDirname = "logs"
|
||||||
defaultLogFilename = "lnd.log"
|
defaultLogFilename = "lnd.log"
|
||||||
defaultRPCPort = 10009
|
defaultRPCPort = 10009
|
||||||
defaultRESTPort = 8080
|
defaultRESTPort = 8080
|
||||||
defaultPeerPort = 9735
|
defaultPeerPort = 9735
|
||||||
defaultRPCHost = "localhost"
|
defaultRPCHost = "localhost"
|
||||||
defaultMaxPendingChannels = 1
|
defaultMaxPendingChannels = 1
|
||||||
defaultNoEncryptWallet = false
|
defaultNoEncryptWallet = false
|
||||||
defaultTrickleDelay = 30 * 1000
|
defaultTrickleDelay = 30 * 1000
|
||||||
defaultMaxLogFiles = 3
|
defaultInactiveChanTimeout = 20 * time.Minute
|
||||||
defaultMaxLogFileSize = 10
|
defaultMaxLogFiles = 3
|
||||||
|
defaultMaxLogFileSize = 10
|
||||||
|
|
||||||
defaultTorSOCKSPort = 9050
|
defaultTorSOCKSPort = 9050
|
||||||
defaultTorDNSHost = "soa.nodes.lightning.directory"
|
defaultTorDNSHost = "soa.nodes.lightning.directory"
|
||||||
@ -227,7 +228,8 @@ type config struct {
|
|||||||
|
|
||||||
NoEncryptWallet bool `long:"noencryptwallet" description:"If set, wallet will be encrypted using the default passphrase."`
|
NoEncryptWallet bool `long:"noencryptwallet" description:"If set, wallet will be encrypted using the default passphrase."`
|
||||||
|
|
||||||
TrickleDelay int `long:"trickledelay" description:"Time in milliseconds between each release of announcements to the network"`
|
TrickleDelay int `long:"trickledelay" description:"Time in milliseconds between each release of announcements to the network"`
|
||||||
|
InactiveChanTimeout time.Duration `long:"inactivechantimeout" description:"If a channel has been inactive for the set time, send a ChannelUpdate disabling it."`
|
||||||
|
|
||||||
Alias string `long:"alias" description:"The node alias. Used as a moniker by peers and intelligence services"`
|
Alias string `long:"alias" description:"The node alias. Used as a moniker by peers and intelligence services"`
|
||||||
Color string `long:"color" description:"The color of the node in hex format (i.e. '#3399FF'). Used to customize node appearance in intelligence services"`
|
Color string `long:"color" description:"The color of the node in hex format (i.e. '#3399FF'). Used to customize node appearance in intelligence services"`
|
||||||
@ -300,10 +302,11 @@ func loadConfig() (*config, error) {
|
|||||||
MinChannelSize: int64(minChanFundingSize),
|
MinChannelSize: int64(minChanFundingSize),
|
||||||
MaxChannelSize: int64(maxFundingAmount),
|
MaxChannelSize: int64(maxFundingAmount),
|
||||||
},
|
},
|
||||||
TrickleDelay: defaultTrickleDelay,
|
TrickleDelay: defaultTrickleDelay,
|
||||||
Alias: defaultAlias,
|
InactiveChanTimeout: defaultInactiveChanTimeout,
|
||||||
Color: defaultColor,
|
Alias: defaultAlias,
|
||||||
MinChanSize: int64(minChanFundingSize),
|
Color: defaultColor,
|
||||||
|
MinChanSize: int64(minChanFundingSize),
|
||||||
Tor: &torConfig{
|
Tor: &torConfig{
|
||||||
SOCKS: defaultTorSOCKS,
|
SOCKS: defaultTorSOCKS,
|
||||||
DNS: defaultTorDNS,
|
DNS: defaultTorDNS,
|
||||||
|
@ -1902,6 +1902,16 @@ func (s *Switch) getLinkByShortID(chanID lnwire.ShortChannelID) (ChannelLink, er
|
|||||||
return link, nil
|
return link, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasActiveLink returns true if the given channel ID has a link in the link
|
||||||
|
// index.
|
||||||
|
func (s *Switch) HasActiveLink(chanID lnwire.ChannelID) bool {
|
||||||
|
s.indexMtx.RLock()
|
||||||
|
defer s.indexMtx.RUnlock()
|
||||||
|
|
||||||
|
_, ok := s.linkIndex[chanID]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
// RemoveLink purges the switch of any link associated with chanID. If a pending
|
// RemoveLink purges the switch of any link associated with chanID. If a pending
|
||||||
// or active link is not found, this method does nothing. Otherwise, the method
|
// or active link is not found, this method does nothing. Otherwise, the method
|
||||||
// returns after the link has been completely shutdown.
|
// returns after the link has been completely shutdown.
|
||||||
|
108
lnd_test.go
108
lnd_test.go
@ -10817,6 +10817,8 @@ func testRouteFeeCutoff(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create carol's node: %v", err)
|
t.Fatalf("unable to create carol's node: %v", err)
|
||||||
}
|
}
|
||||||
|
defer shutdownAndAssert(net, t, carol)
|
||||||
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, timeout)
|
ctxt, _ = context.WithTimeout(ctxb, timeout)
|
||||||
if err := net.ConnectNodes(ctxt, carol, net.Alice); err != nil {
|
if err := net.ConnectNodes(ctxt, carol, net.Alice); err != nil {
|
||||||
t.Fatalf("unable to connect carol to alice: %v", err)
|
t.Fatalf("unable to connect carol to alice: %v", err)
|
||||||
@ -10837,6 +10839,8 @@ func testRouteFeeCutoff(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create dave's node: %v", err)
|
t.Fatalf("unable to create dave's node: %v", err)
|
||||||
}
|
}
|
||||||
|
defer shutdownAndAssert(net, t, dave)
|
||||||
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, timeout)
|
ctxt, _ = context.WithTimeout(ctxb, timeout)
|
||||||
if err := net.ConnectNodes(ctxt, dave, net.Bob); err != nil {
|
if err := net.ConnectNodes(ctxt, dave, net.Bob); err != nil {
|
||||||
t.Fatalf("unable to connect dave to bob: %v", err)
|
t.Fatalf("unable to connect dave to bob: %v", err)
|
||||||
@ -11047,13 +11051,6 @@ func testRouteFeeCutoff(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
closeChannelAndAssert(ctxt, t, net, net.Bob, chanPointBobDave, false)
|
closeChannelAndAssert(ctxt, t, net, net.Bob, chanPointBobDave, false)
|
||||||
ctxt, _ = context.WithTimeout(ctxb, timeout)
|
ctxt, _ = context.WithTimeout(ctxb, timeout)
|
||||||
closeChannelAndAssert(ctxt, t, net, carol, chanPointCarolDave, false)
|
closeChannelAndAssert(ctxt, t, net, carol, chanPointCarolDave, false)
|
||||||
|
|
||||||
if err := net.ShutdownNode(carol); err != nil {
|
|
||||||
t.Fatalf("unable to shut down carol: %v", err)
|
|
||||||
}
|
|
||||||
if err := net.ShutdownNode(dave); err != nil {
|
|
||||||
t.Fatalf("unable to shut down dave: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// testSendUpdateDisableChannel ensures that a channel update with the disable
|
// testSendUpdateDisableChannel ensures that a channel update with the disable
|
||||||
@ -11079,6 +11076,7 @@ func testSendUpdateDisableChannel(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
t.Fatalf("unable to create carol's node: %v", err)
|
t.Fatalf("unable to create carol's node: %v", err)
|
||||||
}
|
}
|
||||||
defer shutdownAndAssert(net, t, carol)
|
defer shutdownAndAssert(net, t, carol)
|
||||||
|
|
||||||
if err := net.ConnectNodes(ctxb, net.Alice, carol); err != nil {
|
if err := net.ConnectNodes(ctxb, net.Alice, carol); err != nil {
|
||||||
t.Fatalf("unable to connect alice to carol: %v", err)
|
t.Fatalf("unable to connect alice to carol: %v", err)
|
||||||
}
|
}
|
||||||
@ -11087,6 +11085,34 @@ func testSendUpdateDisableChannel(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
ctxt, t, net, net.Alice, carol, chanAmt, 0, false,
|
ctxt, t, net, net.Alice, carol, chanAmt, 0, false,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// We create a new node Eve that has an inactive channel timeout of
|
||||||
|
// just 2 seconds (down from the default 20m). It will be used to test
|
||||||
|
// channel updates for channels going inactive.
|
||||||
|
eve, err := net.NewNode("Eve", []string{"--inactivechantimeout=2s"})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to create eve's node: %v", err)
|
||||||
|
}
|
||||||
|
defer shutdownAndAssert(net, t, eve)
|
||||||
|
|
||||||
|
// Give Eve some coins.
|
||||||
|
err = net.SendCoins(ctxb, btcutil.SatoshiPerBitcoin, eve)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to send coins to eve: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect Eve to Carol and Bob, and open a channel to carol.
|
||||||
|
if err := net.ConnectNodes(ctxb, eve, carol); err != nil {
|
||||||
|
t.Fatalf("unable to connect alice to carol: %v", err)
|
||||||
|
}
|
||||||
|
if err := net.ConnectNodes(ctxb, eve, net.Bob); err != nil {
|
||||||
|
t.Fatalf("unable to connect eve to bob: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctxt, _ = context.WithTimeout(ctxb, timeout)
|
||||||
|
chanPointEveCarol := openChannelAndAssert(
|
||||||
|
ctxt, t, net, eve, carol, chanAmt, 0, false,
|
||||||
|
)
|
||||||
|
|
||||||
// Launch a node for Dave which will connect to Bob in order to receive
|
// Launch a node for Dave which will connect to Bob in order to receive
|
||||||
// graph updates from. This will ensure that the channel updates are
|
// graph updates from. This will ensure that the channel updates are
|
||||||
// propagated throughout the network.
|
// propagated throughout the network.
|
||||||
@ -11099,7 +11125,7 @@ func testSendUpdateDisableChannel(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
t.Fatalf("unable to connect bob to dave: %v", err)
|
t.Fatalf("unable to connect bob to dave: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
daveUpdates, quit := subscribeGraphNotifications(t, ctxb, net.Alice)
|
daveUpdates, quit := subscribeGraphNotifications(t, ctxb, dave)
|
||||||
defer close(quit)
|
defer close(quit)
|
||||||
|
|
||||||
// We should expect to see a channel update with the default routing
|
// We should expect to see a channel update with the default routing
|
||||||
@ -11111,19 +11137,75 @@ func testSendUpdateDisableChannel(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
Disabled: true,
|
Disabled: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Let Carol go offline. Since Eve has an inactive timeout of 2s, we
|
||||||
|
// expect her to send an update disabling the channel.
|
||||||
|
restartCarol, err := net.SuspendNode(carol)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to suspend carol: %v", err)
|
||||||
|
}
|
||||||
|
waitForChannelUpdate(
|
||||||
|
t, daveUpdates, eve.PubKeyStr, expectedPolicy,
|
||||||
|
chanPointEveCarol,
|
||||||
|
)
|
||||||
|
|
||||||
|
// We restart Carol. Since the channel now becomes active again, Eve
|
||||||
|
// should send a ChannelUpdate setting the channel no longer disabled.
|
||||||
|
if err := restartCarol(); err != nil {
|
||||||
|
t.Fatalf("unable to restart carol: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedPolicy.Disabled = false
|
||||||
|
waitForChannelUpdate(
|
||||||
|
t, daveUpdates, eve.PubKeyStr, expectedPolicy,
|
||||||
|
chanPointEveCarol,
|
||||||
|
)
|
||||||
|
|
||||||
// Close Alice's channels with Bob and Carol cooperatively and
|
// Close Alice's channels with Bob and Carol cooperatively and
|
||||||
// unilaterally respectively.
|
// unilaterally respectively.
|
||||||
ctxt, _ = context.WithTimeout(ctxb, timeout)
|
ctxt, _ = context.WithTimeout(ctxb, timeout)
|
||||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAliceBob, false)
|
_, _, err = net.CloseChannel(ctxt, net.Alice, chanPointAliceBob, false)
|
||||||
ctxt, _ = context.WithTimeout(ctxb, timeout)
|
if err != nil {
|
||||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPointAliceCarol, true)
|
t.Fatalf("unable to close channel: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
// Now that the channels have been closed, we should receive an update
|
ctxt, _ = context.WithTimeout(ctxb, timeout)
|
||||||
// marking each as disabled.
|
_, _, err = net.CloseChannel(ctxt, net.Alice, chanPointAliceCarol, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to close channel: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now that the channel close processes have been started, we should
|
||||||
|
// receive an update marking each as disabled.
|
||||||
|
expectedPolicy.Disabled = true
|
||||||
waitForChannelUpdate(
|
waitForChannelUpdate(
|
||||||
t, daveUpdates, net.Alice.PubKeyStr, expectedPolicy,
|
t, daveUpdates, net.Alice.PubKeyStr, expectedPolicy,
|
||||||
chanPointAliceBob, chanPointAliceCarol,
|
chanPointAliceBob, chanPointAliceCarol,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Finally, close the channels by mining the closing transactions.
|
||||||
|
_, err = waitForNTxsInMempool(net.Miner.Node, 2, timeout)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected transactions not found in mempool: %v", err)
|
||||||
|
}
|
||||||
|
mineBlocks(t, net, 1)
|
||||||
|
|
||||||
|
// Also do this check for Eve's channel with Carol.
|
||||||
|
ctxt, _ = context.WithTimeout(ctxb, timeout)
|
||||||
|
_, _, err = net.CloseChannel(ctxt, eve, chanPointEveCarol, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to close channel: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
waitForChannelUpdate(
|
||||||
|
t, daveUpdates, eve.PubKeyStr, expectedPolicy,
|
||||||
|
chanPointEveCarol,
|
||||||
|
)
|
||||||
|
|
||||||
|
_, err = waitForNTxsInMempool(net.Miner.Node, 1, timeout)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected transactions not found in mempool: %v", err)
|
||||||
|
}
|
||||||
|
mineBlocks(t, net, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
|
@ -532,7 +532,7 @@ func (n *NetworkHarness) DisconnectNodes(ctx context.Context, a, b *HarnessNode)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RestartNode attempts to restart a lightning node by shutting it down
|
// RestartNode attempts to restart a lightning node by shutting it down
|
||||||
// cleanly, then restarting the process. This function is fully blocking. Upon
|
// cleanly, then restarting the process. This function is fully blocking. Upon
|
||||||
// restart, the RPC connection to the node will be re-attempted, continuing iff
|
// restart, the RPC connection to the node will be re-attempted, continuing iff
|
||||||
// the connection attempt is successful. If the callback parameter is non-nil,
|
// the connection attempt is successful. If the callback parameter is non-nil,
|
||||||
@ -556,6 +556,20 @@ func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() error) e
|
|||||||
return node.start(n.lndErrorChan)
|
return node.start(n.lndErrorChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SuspendNode stops the given node and returns a callback that can be used to
|
||||||
|
// start it again.
|
||||||
|
func (n *NetworkHarness) SuspendNode(node *HarnessNode) (func() error, error) {
|
||||||
|
if err := node.stop(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
restart := func() error {
|
||||||
|
return node.start(n.lndErrorChan)
|
||||||
|
}
|
||||||
|
|
||||||
|
return restart, nil
|
||||||
|
}
|
||||||
|
|
||||||
// ShutdownNode stops an active lnd process and returns when the process has
|
// ShutdownNode stops an active lnd process and returns when the process has
|
||||||
// exited and any temporary directories have been cleaned up.
|
// exited and any temporary directories have been cleaned up.
|
||||||
func (n *NetworkHarness) ShutdownNode(node *HarnessNode) error {
|
func (n *NetworkHarness) ShutdownNode(node *HarnessNode) error {
|
||||||
|
33
peer.go
33
peer.go
@ -318,6 +318,7 @@ func (p *peer) Start() error {
|
|||||||
// loadActiveChannels creates indexes within the peer for tracking all active
|
// loadActiveChannels creates indexes within the peer for tracking all active
|
||||||
// channels returned by the database.
|
// channels returned by the database.
|
||||||
func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
|
func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
|
||||||
|
var activeChans []wire.OutPoint
|
||||||
for _, dbChan := range chans {
|
for _, dbChan := range chans {
|
||||||
lnChan, err := lnwallet.NewLightningChannel(
|
lnChan, err := lnwallet.NewLightningChannel(
|
||||||
p.server.cc.signer, p.server.witnessBeacon, dbChan,
|
p.server.cc.signer, p.server.witnessBeacon, dbChan,
|
||||||
@ -425,8 +426,26 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
|
|||||||
p.activeChanMtx.Lock()
|
p.activeChanMtx.Lock()
|
||||||
p.activeChannels[chanID] = lnChan
|
p.activeChannels[chanID] = lnChan
|
||||||
p.activeChanMtx.Unlock()
|
p.activeChanMtx.Unlock()
|
||||||
|
|
||||||
|
activeChans = append(activeChans, *chanPoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// As a final measure we launch a goroutine that will ensure the
|
||||||
|
// channels are not currently disabled, as that will make us skip it
|
||||||
|
// during path finding.
|
||||||
|
go func() {
|
||||||
|
for _, chanPoint := range activeChans {
|
||||||
|
// Set the channel disabled=false by sending out a new
|
||||||
|
// ChannelUpdate. If this channel is already active,
|
||||||
|
// the update won't be sent.
|
||||||
|
err := p.server.announceChanStatus(chanPoint, false)
|
||||||
|
if err != nil {
|
||||||
|
peerLog.Errorf("unable to send out active "+
|
||||||
|
"channel update: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1690,8 +1709,11 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (*channelCloser, e
|
|||||||
channel: channel,
|
channel: channel,
|
||||||
unregisterChannel: p.server.htlcSwitch.RemoveLink,
|
unregisterChannel: p.server.htlcSwitch.RemoveLink,
|
||||||
broadcastTx: p.server.cc.wallet.PublishTransaction,
|
broadcastTx: p.server.cc.wallet.PublishTransaction,
|
||||||
disableChannel: p.server.disableChannel,
|
disableChannel: func(op wire.OutPoint) error {
|
||||||
quit: p.quit,
|
return p.server.announceChanStatus(op,
|
||||||
|
true)
|
||||||
|
},
|
||||||
|
quit: p.quit,
|
||||||
},
|
},
|
||||||
deliveryAddr,
|
deliveryAddr,
|
||||||
feePerKw,
|
feePerKw,
|
||||||
@ -1750,8 +1772,11 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) {
|
|||||||
channel: channel,
|
channel: channel,
|
||||||
unregisterChannel: p.server.htlcSwitch.RemoveLink,
|
unregisterChannel: p.server.htlcSwitch.RemoveLink,
|
||||||
broadcastTx: p.server.cc.wallet.PublishTransaction,
|
broadcastTx: p.server.cc.wallet.PublishTransaction,
|
||||||
disableChannel: p.server.disableChannel,
|
disableChannel: func(op wire.OutPoint) error {
|
||||||
quit: p.quit,
|
return p.server.announceChanStatus(op,
|
||||||
|
true)
|
||||||
|
},
|
||||||
|
quit: p.quit,
|
||||||
},
|
},
|
||||||
deliveryAddr,
|
deliveryAddr,
|
||||||
req.TargetFeePerKw,
|
req.TargetFeePerKw,
|
||||||
|
236
server.go
236
server.go
@ -166,6 +166,11 @@ type server struct {
|
|||||||
// changed since last start.
|
// changed since last start.
|
||||||
currentNodeAnn *lnwire.NodeAnnouncement
|
currentNodeAnn *lnwire.NodeAnnouncement
|
||||||
|
|
||||||
|
// sendDisabled is used to keep track of the disabled flag of the last
|
||||||
|
// sent ChannelUpdate from announceChanStatus.
|
||||||
|
sentDisabled map[wire.OutPoint]bool
|
||||||
|
sentDisabledMtx sync.Mutex
|
||||||
|
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
|
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
@ -275,6 +280,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
|
|||||||
inboundPeers: make(map[string]*peer),
|
inboundPeers: make(map[string]*peer),
|
||||||
outboundPeers: make(map[string]*peer),
|
outboundPeers: make(map[string]*peer),
|
||||||
peerConnectedListeners: make(map[string][]chan<- lnpeer.Peer),
|
peerConnectedListeners: make(map[string][]chan<- lnpeer.Peer),
|
||||||
|
sentDisabled: make(map[wire.OutPoint]bool),
|
||||||
|
|
||||||
globalFeatures: lnwire.NewFeatureVector(globalFeatures,
|
globalFeatures: lnwire.NewFeatureVector(globalFeatures,
|
||||||
lnwire.GlobalFeatures),
|
lnwire.GlobalFeatures),
|
||||||
@ -676,7 +682,9 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
|
|||||||
return ErrServerShuttingDown
|
return ErrServerShuttingDown
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
DisableChannel: s.disableChannel,
|
DisableChannel: func(op wire.OutPoint) error {
|
||||||
|
return s.announceChanStatus(op, true)
|
||||||
|
},
|
||||||
}, chanDB)
|
}, chanDB)
|
||||||
|
|
||||||
s.breachArbiter = newBreachArbiter(&BreachConfig{
|
s.breachArbiter = newBreachArbiter(&BreachConfig{
|
||||||
@ -985,6 +993,11 @@ func (s *server) Start() error {
|
|||||||
srvrLog.Infof("Auto peer bootstrapping is disabled")
|
srvrLog.Infof("Auto peer bootstrapping is disabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start a goroutine that will periodically send out ChannelUpdates
|
||||||
|
// based on a channel's status.
|
||||||
|
s.wg.Add(1)
|
||||||
|
go s.watchChannelStatus()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2855,10 +2868,23 @@ func (s *server) fetchNodeAdvertisedAddr(pub *btcec.PublicKey) (net.Addr, error)
|
|||||||
return node.Addresses[0], nil
|
return node.Addresses[0], nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// disableChannel disables a channel, resulting in it not being able to forward
|
// announceChanStatus disables a channel if disabled=true, otherwise activates
|
||||||
// payments. This is done by sending a new channel update across the network
|
// it. This is done by sending a new channel update across the network with the
|
||||||
// with the disabled flag set.
|
// disabled flag set accordingly. The result of disabling the channel is it not
|
||||||
func (s *server) disableChannel(op wire.OutPoint) error {
|
// being able to forward payments.
|
||||||
|
func (s *server) announceChanStatus(op wire.OutPoint, disabled bool) error {
|
||||||
|
s.sentDisabledMtx.Lock()
|
||||||
|
defer s.sentDisabledMtx.Unlock()
|
||||||
|
|
||||||
|
// If we have already sent out an update reflecting the current status,
|
||||||
|
// skip this channel.
|
||||||
|
alreadyDisabled, ok := s.sentDisabled[op]
|
||||||
|
if ok && alreadyDisabled == disabled {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
srvrLog.Debugf("Announcing channel(%v) disabled=%v", op, disabled)
|
||||||
|
|
||||||
// Retrieve the latest update for this channel. We'll use this
|
// Retrieve the latest update for this channel. We'll use this
|
||||||
// as our starting point to send the new update.
|
// as our starting point to send the new update.
|
||||||
chanUpdate, err := s.fetchLastChanUpdateByOutPoint(op)
|
chanUpdate, err := s.fetchLastChanUpdateByOutPoint(op)
|
||||||
@ -2866,12 +2892,22 @@ func (s *server) disableChannel(op wire.OutPoint) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the bit responsible for marking a channel as disabled.
|
if disabled {
|
||||||
chanUpdate.Flags |= lnwire.ChanUpdateDisabled
|
// Set the bit responsible for marking a channel as disabled.
|
||||||
|
chanUpdate.Flags |= lnwire.ChanUpdateDisabled
|
||||||
|
} else {
|
||||||
|
// Clear the bit responsible for marking a channel as disabled.
|
||||||
|
chanUpdate.Flags &= ^lnwire.ChanUpdateDisabled
|
||||||
|
}
|
||||||
|
|
||||||
// We must now update the message's timestamp and generate a new
|
// We must now update the message's timestamp and generate a new
|
||||||
// signature.
|
// signature.
|
||||||
chanUpdate.Timestamp = uint32(time.Now().Unix())
|
newTimestamp := uint32(time.Now().Unix())
|
||||||
|
if newTimestamp <= chanUpdate.Timestamp {
|
||||||
|
// Timestamp must increase for message to propagate.
|
||||||
|
newTimestamp = chanUpdate.Timestamp + 1
|
||||||
|
}
|
||||||
|
chanUpdate.Timestamp = newTimestamp
|
||||||
|
|
||||||
chanUpdateMsg, err := chanUpdate.DataToSign()
|
chanUpdateMsg, err := chanUpdate.DataToSign()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2889,35 +2925,59 @@ func (s *server) disableChannel(op wire.OutPoint) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Once signed, we'll send the new update to all of our peers.
|
// Once signed, we'll send the new update to all of our peers.
|
||||||
return s.applyChannelUpdate(chanUpdate)
|
if err := s.applyChannelUpdate(chanUpdate); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// We'll keep track of the status set in the last update we sent, to
|
||||||
|
// avoid sending updates if nothing has changed.
|
||||||
|
s.sentDisabled[op] = disabled
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetchLastChanUpdateByOutPoint fetches the latest update for a channel from
|
// fetchLastChanUpdateByOutPoint fetches the latest policy for our direction of
|
||||||
// our point of view.
|
// a channel, and crafts a new ChannelUpdate with this policy. Returns an error
|
||||||
func (s *server) fetchLastChanUpdateByOutPoint(op wire.OutPoint) (*lnwire.ChannelUpdate, error) {
|
// in case our ChannelEdgePolicy is not found in the database.
|
||||||
|
func (s *server) fetchLastChanUpdateByOutPoint(op wire.OutPoint) (
|
||||||
|
*lnwire.ChannelUpdate, error) {
|
||||||
|
|
||||||
|
// Get the edge info and policies for this channel from the graph.
|
||||||
graph := s.chanDB.ChannelGraph()
|
graph := s.chanDB.ChannelGraph()
|
||||||
info, edge1, edge2, err := graph.FetchChannelEdgesByOutpoint(&op)
|
info, edge1, edge2, err := graph.FetchChannelEdgesByOutpoint(&op)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if edge1 == nil || edge2 == nil {
|
// Helper function to extract the owner of the given policy.
|
||||||
return nil, fmt.Errorf("unable to find channel(%v)", op)
|
owner := func(edge *channeldb.ChannelEdgePolicy) []byte {
|
||||||
|
var pubKey *btcec.PublicKey
|
||||||
|
switch {
|
||||||
|
case edge.Flags&lnwire.ChanUpdateDirection == 0:
|
||||||
|
pubKey, _ = info.NodeKey1()
|
||||||
|
case edge.Flags&lnwire.ChanUpdateDirection == 1:
|
||||||
|
pubKey, _ = info.NodeKey2()
|
||||||
|
}
|
||||||
|
|
||||||
|
// If pubKey was not found, just return nil.
|
||||||
|
if pubKey == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return pubKey.SerializeCompressed()
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're the outgoing node on the first edge, then that
|
// Extract the channel update from the policy we own, if any.
|
||||||
// means the second edge is our policy. Otherwise, the first
|
|
||||||
// edge is our policy.
|
|
||||||
var local *channeldb.ChannelEdgePolicy
|
|
||||||
|
|
||||||
ourPubKey := s.identityPriv.PubKey().SerializeCompressed()
|
ourPubKey := s.identityPriv.PubKey().SerializeCompressed()
|
||||||
if bytes.Equal(edge1.Node.PubKeyBytes[:], ourPubKey) {
|
if edge1 != nil && bytes.Equal(ourPubKey, owner(edge1)) {
|
||||||
local = edge2
|
return extractChannelUpdate(info, edge1)
|
||||||
} else {
|
|
||||||
local = edge1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return extractChannelUpdate(info, local)
|
if edge2 != nil && bytes.Equal(ourPubKey, owner(edge2)) {
|
||||||
|
return extractChannelUpdate(info, edge2)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("unable to find channel(%v)", op)
|
||||||
}
|
}
|
||||||
|
|
||||||
// extractChannelUpdate retrieves a lnwire.ChannelUpdate message from an edge's
|
// extractChannelUpdate retrieves a lnwire.ChannelUpdate message from an edge's
|
||||||
@ -2948,22 +3008,6 @@ func extractChannelUpdate(info *channeldb.ChannelEdgeInfo,
|
|||||||
// applyChannelUpdate applies the channel update to the different sub-systems of
|
// applyChannelUpdate applies the channel update to the different sub-systems of
|
||||||
// the server.
|
// the server.
|
||||||
func (s *server) applyChannelUpdate(update *lnwire.ChannelUpdate) error {
|
func (s *server) applyChannelUpdate(update *lnwire.ChannelUpdate) error {
|
||||||
newChannelPolicy := &channeldb.ChannelEdgePolicy{
|
|
||||||
SigBytes: update.Signature.ToSignatureBytes(),
|
|
||||||
ChannelID: update.ShortChannelID.ToUint64(),
|
|
||||||
LastUpdate: time.Unix(int64(update.Timestamp), 0),
|
|
||||||
Flags: update.Flags,
|
|
||||||
TimeLockDelta: update.TimeLockDelta,
|
|
||||||
MinHTLC: update.HtlcMinimumMsat,
|
|
||||||
FeeBaseMSat: lnwire.MilliSatoshi(update.BaseFee),
|
|
||||||
FeeProportionalMillionths: lnwire.MilliSatoshi(update.FeeRate),
|
|
||||||
}
|
|
||||||
|
|
||||||
err := s.chanRouter.UpdateEdge(newChannelPolicy)
|
|
||||||
if err != nil && !routing.IsError(err, routing.ErrIgnored) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
pubKey := s.identityPriv.PubKey()
|
pubKey := s.identityPriv.PubKey()
|
||||||
errChan := s.authGossiper.ProcessLocalAnnouncement(update, pubKey)
|
errChan := s.authGossiper.ProcessLocalAnnouncement(update, pubKey)
|
||||||
select {
|
select {
|
||||||
@ -2973,3 +3017,115 @@ func (s *server) applyChannelUpdate(update *lnwire.ChannelUpdate) error {
|
|||||||
return ErrServerShuttingDown
|
return ErrServerShuttingDown
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// watchChannelStatus periodically queries the Switch for the status of the
|
||||||
|
// open channels, and sends out ChannelUpdates to the network indicating their
|
||||||
|
// active status. Currently we'll send out either a Disabled or Active update
|
||||||
|
// if the channel has been in the same status over a given amount of time.
|
||||||
|
//
|
||||||
|
// NOTE: This MUST be run as a goroutine.
|
||||||
|
func (s *server) watchChannelStatus() {
|
||||||
|
defer s.wg.Done()
|
||||||
|
|
||||||
|
// A map with values activeStatus is used to keep track of the first
|
||||||
|
// time we saw a link changing to the current active status.
|
||||||
|
type activeStatus struct {
|
||||||
|
active bool
|
||||||
|
time time.Time
|
||||||
|
}
|
||||||
|
status := make(map[wire.OutPoint]activeStatus)
|
||||||
|
|
||||||
|
// We'll check in on the channel statuses every 1/4 of the timeout.
|
||||||
|
unchangedTimeout := cfg.InactiveChanTimeout
|
||||||
|
tickerTimeout := unchangedTimeout / 4
|
||||||
|
|
||||||
|
if unchangedTimeout == 0 || tickerTimeout == 0 {
|
||||||
|
srvrLog.Debugf("Won't watch channel statuses")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(tickerTimeout)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ticker.C:
|
||||||
|
channels, err := s.chanDB.FetchAllOpenChannels()
|
||||||
|
if err != nil {
|
||||||
|
srvrLog.Errorf("Unable to fetch open "+
|
||||||
|
"channels: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// For each open channel, update the status. We'll copy
|
||||||
|
// the updated statuses to a new map, to avoid keeping
|
||||||
|
// the status of closed channels around.
|
||||||
|
newStatus := make(map[wire.OutPoint]activeStatus)
|
||||||
|
for _, c := range channels {
|
||||||
|
chanID := lnwire.NewChanIDFromOutPoint(
|
||||||
|
&c.FundingOutpoint)
|
||||||
|
|
||||||
|
// Get the current active stauts from the
|
||||||
|
// Switch.
|
||||||
|
active := s.htlcSwitch.HasActiveLink(chanID)
|
||||||
|
|
||||||
|
var currentStatus activeStatus
|
||||||
|
|
||||||
|
// If this link is not in the map, or the
|
||||||
|
// status has changed, set an updated active
|
||||||
|
// status.
|
||||||
|
st, ok := status[c.FundingOutpoint]
|
||||||
|
if !ok || st.active != active {
|
||||||
|
currentStatus = activeStatus{
|
||||||
|
active: active,
|
||||||
|
time: time.Now(),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// The status is unchanged, we'll keep
|
||||||
|
// it as is.
|
||||||
|
currentStatus = st
|
||||||
|
}
|
||||||
|
|
||||||
|
newStatus[c.FundingOutpoint] = currentStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the status map to the map of new statuses.
|
||||||
|
status = newStatus
|
||||||
|
|
||||||
|
// If no change in status has happened during the last
|
||||||
|
// interval, we'll send out an update. Note that we add
|
||||||
|
// the negative of the timeout to set our limit in the
|
||||||
|
// past.
|
||||||
|
limit := time.Now().Add(-unchangedTimeout)
|
||||||
|
|
||||||
|
// We'll send out an update for all channels that have
|
||||||
|
// had their status unchanged for longer than the limit.
|
||||||
|
// NOTE: We also make sure to activate any channel when
|
||||||
|
// we connect to a peer, to make them available for
|
||||||
|
// path finding immediately.
|
||||||
|
for op, st := range status {
|
||||||
|
disable := !st.active
|
||||||
|
|
||||||
|
if st.time.Before(limit) {
|
||||||
|
// Before we attempt to announce the
|
||||||
|
// status of the channel, we remove it
|
||||||
|
// from the status map such that it
|
||||||
|
// will need a full unchaged interval
|
||||||
|
// before we attempt to announce its
|
||||||
|
// status again.
|
||||||
|
delete(status, op)
|
||||||
|
|
||||||
|
err = s.announceChanStatus(op, disable)
|
||||||
|
if err != nil {
|
||||||
|
srvrLog.Errorf("Unable to "+
|
||||||
|
"disable channel: %v",
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case <-s.quit:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user