2017-03-19 21:40:25 +03:00
|
|
|
package discovery
|
|
|
|
|
|
|
|
import (
|
2017-03-27 20:25:44 +03:00
|
|
|
"bytes"
|
2018-09-05 07:45:57 +03:00
|
|
|
"errors"
|
2017-08-22 09:40:02 +03:00
|
|
|
"fmt"
|
2017-11-30 03:45:08 +03:00
|
|
|
"runtime"
|
2017-03-19 21:40:25 +03:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
2018-07-18 02:43:06 +03:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
2019-04-17 23:25:56 +03:00
|
|
|
"github.com/btcsuite/btcutil"
|
2017-04-01 15:33:17 +03:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
2017-03-19 21:40:25 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2018-06-08 06:11:27 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnpeer"
|
2017-08-22 09:40:02 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
2017-03-19 21:40:25 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2018-01-23 18:26:15 +03:00
|
|
|
"github.com/lightningnetwork/lnd/multimutex"
|
2017-03-19 21:40:25 +03:00
|
|
|
"github.com/lightningnetwork/lnd/routing"
|
2019-03-23 05:56:33 +03:00
|
|
|
"github.com/lightningnetwork/lnd/ticker"
|
2017-03-19 21:40:25 +03:00
|
|
|
)
|
|
|
|
|
2017-11-18 06:21:50 +03:00
|
|
|
var (
|
2018-08-20 15:28:09 +03:00
|
|
|
// ErrGossiperShuttingDown is an error that is returned if the gossiper
|
|
|
|
// is in the process of being shut down.
|
|
|
|
ErrGossiperShuttingDown = errors.New("gossiper is shutting down")
|
2018-09-05 03:31:57 +03:00
|
|
|
|
|
|
|
// ErrGossipSyncerNotFound signals that we were unable to find an active
|
|
|
|
// gossip syncer corresponding to a gossip query message received from
|
|
|
|
// the remote peer.
|
|
|
|
ErrGossipSyncerNotFound = errors.New("gossip syncer not found")
|
2017-11-18 06:21:50 +03:00
|
|
|
)
|
|
|
|
|
2019-04-17 23:25:56 +03:00
|
|
|
// optionalMsgFields is a set of optional message fields that external callers
|
|
|
|
// can provide that serve useful when processing a specific network
|
|
|
|
// announcement.
|
|
|
|
type optionalMsgFields struct {
|
|
|
|
capacity *btcutil.Amount
|
|
|
|
channelPoint *wire.OutPoint
|
|
|
|
}
|
|
|
|
|
|
|
|
// apply applies the optional fields within the functional options.
|
|
|
|
func (f *optionalMsgFields) apply(optionalMsgFields ...OptionalMsgField) {
|
|
|
|
for _, optionalMsgField := range optionalMsgFields {
|
|
|
|
optionalMsgField(f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// OptionalMsgField is a functional option parameter that can be used to provide
|
|
|
|
// external information that is not included within a network message but serves
|
|
|
|
// useful when processing it.
|
|
|
|
type OptionalMsgField func(*optionalMsgFields)
|
|
|
|
|
|
|
|
// ChannelCapacity is an optional field that lets the gossiper know of the
|
|
|
|
// capacity of a channel.
|
|
|
|
func ChannelCapacity(capacity btcutil.Amount) OptionalMsgField {
|
|
|
|
return func(f *optionalMsgFields) {
|
|
|
|
f.capacity = &capacity
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ChannelPoint is an optional field that lets the gossiper know of the outpoint
|
|
|
|
// of a channel.
|
|
|
|
func ChannelPoint(op wire.OutPoint) OptionalMsgField {
|
|
|
|
return func(f *optionalMsgFields) {
|
|
|
|
f.channelPoint = &op
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
// networkMsg couples a routing related wire message with the peer that
|
|
|
|
// originally sent it.
|
|
|
|
type networkMsg struct {
|
2019-04-17 23:25:56 +03:00
|
|
|
peer lnpeer.Peer
|
|
|
|
source *btcec.PublicKey
|
|
|
|
msg lnwire.Message
|
|
|
|
optionalMsgFields *optionalMsgFields
|
2017-04-01 15:33:17 +03:00
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
isRemote bool
|
2017-04-01 15:33:17 +03:00
|
|
|
|
|
|
|
err chan error
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2017-12-15 23:56:11 +03:00
|
|
|
// chanPolicyUpdateRequest is a request that is sent to the server when a caller
|
|
|
|
// wishes to update the channel policy (fees e.g.) for a particular set of
|
|
|
|
// channels. New ChannelUpdate messages will be crafted to be sent out during
|
|
|
|
// the next broadcast epoch and the fee updates committed to the lower layer.
|
|
|
|
type chanPolicyUpdateRequest struct {
|
2017-08-22 09:40:02 +03:00
|
|
|
targetChans []wire.OutPoint
|
2017-12-15 23:56:11 +03:00
|
|
|
newSchema routing.ChannelPolicy
|
2017-08-22 09:40:02 +03:00
|
|
|
|
|
|
|
errResp chan error
|
|
|
|
}
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
// Config defines the configuration for the service. ALL elements within the
|
|
|
|
// configuration MUST be non-nil for the service to carry out its duties.
|
|
|
|
type Config struct {
|
2017-09-04 02:41:01 +03:00
|
|
|
// ChainHash is a hash that indicates which resident chain of the
|
|
|
|
// AuthenticatedGossiper. Any announcements that don't match this
|
|
|
|
// chain hash will be ignored.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): eventually make into map so can de-multiplex
|
|
|
|
// incoming announcements
|
|
|
|
// * also need to do same for Notifier
|
|
|
|
ChainHash chainhash.Hash
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
// Router is the subsystem which is responsible for managing the
|
2017-04-01 15:33:17 +03:00
|
|
|
// topology of lightning network. After incoming channel, node, channel
|
|
|
|
// updates announcements are validated they are sent to the router in
|
|
|
|
// order to be included in the LN graph.
|
2017-03-19 21:40:25 +03:00
|
|
|
Router routing.ChannelGraphSource
|
|
|
|
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
// ChanSeries is an interfaces that provides access to a time series
|
2019-03-23 05:54:46 +03:00
|
|
|
// view of the current known channel graph. Each GossipSyncer enabled
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
// peer will utilize this in order to create and respond to channel
|
|
|
|
// graph time series queries.
|
|
|
|
ChanSeries ChannelGraphTimeSeries
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
// Notifier is used for receiving notifications of incoming blocks.
|
|
|
|
// With each new incoming block found we process previously premature
|
|
|
|
// announcements.
|
2017-04-01 15:33:17 +03:00
|
|
|
//
|
2017-03-19 21:40:25 +03:00
|
|
|
// TODO(roasbeef): could possibly just replace this with an epoch
|
|
|
|
// channel.
|
|
|
|
Notifier chainntnfs.ChainNotifier
|
|
|
|
|
|
|
|
// Broadcast broadcasts a particular set of announcements to all peers
|
|
|
|
// that the daemon is connected to. If supplied, the exclude parameter
|
2017-04-01 15:33:17 +03:00
|
|
|
// indicates that the target peer should be excluded from the
|
|
|
|
// broadcast.
|
2017-12-26 18:26:28 +03:00
|
|
|
Broadcast func(skips map[routing.Vertex]struct{},
|
|
|
|
msg ...lnwire.Message) error
|
2017-03-19 21:40:25 +03:00
|
|
|
|
2017-11-18 06:21:50 +03:00
|
|
|
// NotifyWhenOnline is a function that allows the gossiper to be
|
|
|
|
// notified when a certain peer comes online, allowing it to
|
|
|
|
// retry sending a peer message.
|
2018-07-06 04:19:35 +03:00
|
|
|
//
|
|
|
|
// NOTE: The peerChan channel must be buffered.
|
2019-02-06 04:18:56 +03:00
|
|
|
//
|
|
|
|
// TODO(wilmer): use [33]byte to avoid unnecessary serializations.
|
2018-07-06 04:19:35 +03:00
|
|
|
NotifyWhenOnline func(peer *btcec.PublicKey, peerChan chan<- lnpeer.Peer)
|
2017-11-18 06:21:50 +03:00
|
|
|
|
2019-02-06 04:18:56 +03:00
|
|
|
// NotifyWhenOffline is a function that allows the gossiper to be
|
|
|
|
// notified when a certain peer disconnects, allowing it to request a
|
|
|
|
// notification for when it reconnects.
|
|
|
|
NotifyWhenOffline func(peerPubKey [33]byte) <-chan struct{}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// ProofMatureDelta the number of confirmations which is needed before
|
|
|
|
// exchange the channel announcement proofs.
|
2017-03-28 22:08:14 +03:00
|
|
|
ProofMatureDelta uint32
|
|
|
|
|
2017-09-14 02:33:46 +03:00
|
|
|
// TrickleDelay the period of trickle timer which flushes to the
|
2017-03-28 22:08:14 +03:00
|
|
|
// network the pending batch of new announcements we've received since
|
|
|
|
// the last trickle tick.
|
|
|
|
TrickleDelay time.Duration
|
2017-05-05 20:17:31 +03:00
|
|
|
|
2017-09-25 04:47:48 +03:00
|
|
|
// RetransmitDelay is the period of a timer which indicates that we
|
2017-10-05 05:30:54 +03:00
|
|
|
// should check if we need re-broadcast any of our personal channels.
|
2017-09-25 04:47:48 +03:00
|
|
|
RetransmitDelay time.Duration
|
|
|
|
|
2019-02-06 04:18:34 +03:00
|
|
|
// WaitingProofStore is a persistent storage of partial channel proof
|
|
|
|
// announcement messages. We use it to buffer half of the material
|
|
|
|
// needed to reconstruct a full authenticated channel announcement.
|
|
|
|
// Once we receive the other half the channel proof, we'll be able to
|
|
|
|
// properly validate it and re-broadcast it out to the network.
|
|
|
|
//
|
|
|
|
// TODO(wilmer): make interface to prevent channeldb dependency.
|
|
|
|
WaitingProofStore *channeldb.WaitingProofStore
|
2017-05-05 20:17:31 +03:00
|
|
|
|
2019-02-06 04:18:27 +03:00
|
|
|
// MessageStore is a persistent storage of gossip messages which we will
|
|
|
|
// use to determine which messages need to be resent for a given peer.
|
|
|
|
MessageStore GossipMessageStore
|
|
|
|
|
2017-08-22 09:40:02 +03:00
|
|
|
// AnnSigner is an instance of the MessageSigner interface which will
|
|
|
|
// be used to manually sign any outgoing channel updates. The signer
|
|
|
|
// implementation should be backed by the public key of the backing
|
|
|
|
// Lightning node.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): extract ann crafting + sign from fundingMgr into
|
|
|
|
// here?
|
|
|
|
AnnSigner lnwallet.MessageSigner
|
2019-03-23 05:56:33 +03:00
|
|
|
|
|
|
|
// NumActiveSyncers is the number of peers for which we should have
|
|
|
|
// active syncers with. After reaching NumActiveSyncers, any future
|
|
|
|
// gossip syncers will be passive.
|
|
|
|
NumActiveSyncers int
|
|
|
|
|
|
|
|
// RotateTicker is a ticker responsible for notifying the SyncManager
|
|
|
|
// when it should rotate its active syncers. A single active syncer with
|
|
|
|
// a chansSynced state will be exchanged for a passive syncer in order
|
|
|
|
// to ensure we don't keep syncing with the same peers.
|
|
|
|
RotateTicker ticker.Ticker
|
|
|
|
|
|
|
|
// HistoricalSyncTicker is a ticker responsible for notifying the
|
|
|
|
// syncManager when it should attempt a historical sync with a gossip
|
|
|
|
// sync peer.
|
|
|
|
HistoricalSyncTicker ticker.Ticker
|
|
|
|
|
|
|
|
// ActiveSyncerTimeoutTicker is a ticker responsible for notifying the
|
|
|
|
// syncManager when it should attempt to start the next pending
|
|
|
|
// activeSyncer due to the current one not completing its state machine
|
|
|
|
// within the timeout.
|
|
|
|
ActiveSyncerTimeoutTicker ticker.Ticker
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// AuthenticatedGossiper is a subsystem which is responsible for receiving
|
2017-09-08 04:25:43 +03:00
|
|
|
// announcements, validating them and applying the changes to router, syncing
|
2017-04-01 15:33:17 +03:00
|
|
|
// lightning network with newly connected nodes, broadcasting announcements
|
|
|
|
// after validation, negotiating the channel announcement proofs exchange and
|
|
|
|
// handling the premature announcements. All outgoing announcements are
|
|
|
|
// expected to be properly signed as dictated in BOLT#7, additionally, all
|
|
|
|
// incoming message are expected to be well formed and signed. Invalid messages
|
|
|
|
// will be rejected by this struct.
|
|
|
|
type AuthenticatedGossiper struct {
|
|
|
|
// Parameters which are needed to properly handle the start and stop of
|
2018-06-01 01:41:41 +03:00
|
|
|
// the service. To be used atomically.
|
2017-03-19 21:40:25 +03:00
|
|
|
started uint32
|
|
|
|
stopped uint32
|
2018-06-01 01:41:41 +03:00
|
|
|
|
|
|
|
// bestHeight is the height of the block at the tip of the main chain
|
|
|
|
// as we know it. To be used atomically.
|
|
|
|
bestHeight uint32
|
|
|
|
|
2018-06-08 06:11:27 +03:00
|
|
|
quit chan struct{}
|
|
|
|
wg sync.WaitGroup
|
2017-03-19 21:40:25 +03:00
|
|
|
|
2017-05-05 20:17:31 +03:00
|
|
|
// cfg is a copy of the configuration struct that the gossiper service
|
2017-03-19 21:40:25 +03:00
|
|
|
// was initialized with.
|
|
|
|
cfg *Config
|
|
|
|
|
2018-06-12 03:37:17 +03:00
|
|
|
// blockEpochs encapsulates a stream of block epochs that are sent at
|
|
|
|
// every new block height.
|
|
|
|
blockEpochs *chainntnfs.BlockEpochEvent
|
2017-03-19 21:40:25 +03:00
|
|
|
|
2017-03-28 22:08:14 +03:00
|
|
|
// prematureAnnouncements maps a block height to a set of network
|
2018-04-18 05:02:04 +03:00
|
|
|
// messages which are "premature" from our PoV. A message is premature
|
2017-03-28 22:08:14 +03:00
|
|
|
// if it claims to be anchored in a block which is beyond the current
|
|
|
|
// main chain tip as we know it. Premature network messages will be
|
|
|
|
// processed once the chain tip as we know it extends to/past the
|
|
|
|
// premature height.
|
2017-03-19 21:40:25 +03:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): limit premature networkMsgs to N
|
|
|
|
prematureAnnouncements map[uint32][]*networkMsg
|
|
|
|
|
2018-01-31 07:23:14 +03:00
|
|
|
// prematureChannelUpdates is a map of ChannelUpdates we have received
|
|
|
|
// that wasn't associated with any channel we know about. We store
|
|
|
|
// them temporarily, such that we can reprocess them when a
|
|
|
|
// ChannelAnnouncement for the channel is received.
|
2017-12-14 19:52:41 +03:00
|
|
|
prematureChannelUpdates map[uint64][]*networkMsg
|
|
|
|
pChanUpdMtx sync.Mutex
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
// networkMsgs is a channel that carries new network broadcasted
|
2017-05-05 20:17:31 +03:00
|
|
|
// message from outside the gossiper service to be processed by the
|
2017-03-19 21:40:25 +03:00
|
|
|
// networkHandler.
|
|
|
|
networkMsgs chan *networkMsg
|
|
|
|
|
2018-01-31 07:23:14 +03:00
|
|
|
// chanPolicyUpdates is a channel that requests to update the
|
|
|
|
// forwarding policy of a set of channels is sent over.
|
2017-12-15 23:56:11 +03:00
|
|
|
chanPolicyUpdates chan *chanPolicyUpdateRequest
|
2017-08-22 09:40:02 +03:00
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// selfKey is the identity public key of the backing Lightning node.
|
2017-08-22 09:40:02 +03:00
|
|
|
selfKey *btcec.PublicKey
|
2017-11-30 03:45:08 +03:00
|
|
|
|
2018-01-23 18:26:15 +03:00
|
|
|
// channelMtx is used to restrict the database access to one
|
|
|
|
// goroutine per channel ID. This is done to ensure that when
|
|
|
|
// the gossiper is handling an announcement, the db state stays
|
2018-02-07 06:13:07 +03:00
|
|
|
// consistent between when the DB is first read until it's written.
|
2018-01-23 18:26:15 +03:00
|
|
|
channelMtx *multimutex.Mutex
|
|
|
|
|
2018-01-31 07:41:27 +03:00
|
|
|
rejectMtx sync.RWMutex
|
|
|
|
recentRejects map[uint64]struct{}
|
|
|
|
|
2019-03-23 05:56:33 +03:00
|
|
|
// syncMgr is a subsystem responsible for managing the gossip syncers
|
|
|
|
// for peers currently connected. When a new peer is connected, the
|
|
|
|
// manager will create its accompanying gossip syncer and determine
|
|
|
|
// whether it should have an activeSync or passiveSync sync type based
|
|
|
|
// on how many other gossip syncers are currently active. Any activeSync
|
|
|
|
// gossip syncers are started in a round-robin manner to ensure we're
|
|
|
|
// not syncing with multiple peers at the same time.
|
|
|
|
syncMgr *SyncManager
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
|
2019-02-06 04:18:56 +03:00
|
|
|
// reliableSender is a subsystem responsible for handling reliable
|
2019-03-13 23:36:21 +03:00
|
|
|
// message send requests to peers. This should only be used for channels
|
|
|
|
// that are unadvertised at the time of handling the message since if it
|
|
|
|
// is advertised, then peers should be able to get the message from the
|
|
|
|
// network.
|
2019-02-06 04:18:56 +03:00
|
|
|
reliableSender *reliableSender
|
|
|
|
|
2017-11-30 03:45:08 +03:00
|
|
|
sync.Mutex
|
2017-08-22 09:40:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// New creates a new AuthenticatedGossiper instance, initialized with the
|
|
|
|
// passed configuration parameters.
|
2019-02-06 04:18:34 +03:00
|
|
|
func New(cfg Config, selfKey *btcec.PublicKey) *AuthenticatedGossiper {
|
2019-02-06 04:18:56 +03:00
|
|
|
gossiper := &AuthenticatedGossiper{
|
2017-12-14 19:52:41 +03:00
|
|
|
selfKey: selfKey,
|
|
|
|
cfg: &cfg,
|
|
|
|
networkMsgs: make(chan *networkMsg),
|
|
|
|
quit: make(chan struct{}),
|
2017-12-15 23:56:11 +03:00
|
|
|
chanPolicyUpdates: make(chan *chanPolicyUpdateRequest),
|
2017-12-14 19:52:41 +03:00
|
|
|
prematureAnnouncements: make(map[uint32][]*networkMsg),
|
|
|
|
prematureChannelUpdates: make(map[uint64][]*networkMsg),
|
2018-01-23 18:26:15 +03:00
|
|
|
channelMtx: multimutex.NewMutex(),
|
2018-01-31 07:41:27 +03:00
|
|
|
recentRejects: make(map[uint64]struct{}),
|
2019-03-23 05:56:33 +03:00
|
|
|
syncMgr: newSyncManager(&SyncManagerCfg{
|
|
|
|
ChainHash: cfg.ChainHash,
|
|
|
|
ChanSeries: cfg.ChanSeries,
|
|
|
|
RotateTicker: cfg.RotateTicker,
|
|
|
|
HistoricalSyncTicker: cfg.HistoricalSyncTicker,
|
|
|
|
ActiveSyncerTimeoutTicker: cfg.ActiveSyncerTimeoutTicker,
|
|
|
|
NumActiveSyncers: cfg.NumActiveSyncers,
|
|
|
|
}),
|
2019-02-06 04:18:34 +03:00
|
|
|
}
|
2019-02-06 04:18:56 +03:00
|
|
|
|
|
|
|
gossiper.reliableSender = newReliableSender(&reliableSenderCfg{
|
|
|
|
NotifyWhenOnline: cfg.NotifyWhenOnline,
|
|
|
|
NotifyWhenOffline: cfg.NotifyWhenOffline,
|
|
|
|
MessageStore: cfg.MessageStore,
|
|
|
|
IsMsgStale: gossiper.isMsgStale,
|
|
|
|
})
|
|
|
|
|
|
|
|
return gossiper
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// SynchronizeNode sends a message to the service indicating it should
|
2017-04-01 15:33:17 +03:00
|
|
|
// synchronize lightning topology state with the target node. This method is to
|
|
|
|
// be utilized when a node connections for the first time to provide it with
|
2017-10-24 02:09:24 +03:00
|
|
|
// the latest topology update state. In order to accomplish this, (currently)
|
|
|
|
// the entire network graph is read from disk, then serialized to the format
|
|
|
|
// defined within the current wire protocol. This cache of graph data is then
|
|
|
|
// sent directly to the target node.
|
2018-06-08 06:11:27 +03:00
|
|
|
func (d *AuthenticatedGossiper) SynchronizeNode(syncPeer lnpeer.Peer) error {
|
2017-10-24 02:09:24 +03:00
|
|
|
// TODO(roasbeef): need to also store sig data in db
|
|
|
|
// * will be nice when we switch to pairing sigs would only need one ^_^
|
|
|
|
|
|
|
|
// We'll collate all the gathered routing messages into a single slice
|
|
|
|
// containing all the messages to be sent to the target peer.
|
|
|
|
var announceMessages []lnwire.Message
|
|
|
|
|
2018-04-20 04:37:27 +03:00
|
|
|
// We'll use this map to ensure we don't send the same node
|
|
|
|
// announcement more than one time as one node may have many channel
|
|
|
|
// anns we'll need to send.
|
|
|
|
nodePubsSent := make(map[routing.Vertex]struct{})
|
|
|
|
|
2017-10-24 02:09:24 +03:00
|
|
|
// As peers are expecting channel announcements before node
|
|
|
|
// announcements, we first retrieve the initial announcement, as well as
|
|
|
|
// the latest channel update announcement for both of the directed edges
|
|
|
|
// that make up each channel, and queue these to be sent to the peer.
|
2018-01-17 04:18:15 +03:00
|
|
|
var (
|
|
|
|
numEdges uint32
|
|
|
|
numNodes uint32
|
|
|
|
)
|
2017-10-24 02:09:24 +03:00
|
|
|
if err := d.cfg.Router.ForEachChannel(func(chanInfo *channeldb.ChannelEdgeInfo,
|
|
|
|
e1, e2 *channeldb.ChannelEdgePolicy) error {
|
2018-01-17 04:18:15 +03:00
|
|
|
|
2017-10-24 02:09:24 +03:00
|
|
|
// First, using the parameters of the channel, along with the
|
|
|
|
// channel authentication proof, we'll create re-create the
|
2018-01-17 04:18:15 +03:00
|
|
|
// original authenticated channel announcement. If the channel
|
|
|
|
// also has known validated nodes, then we'll send that as
|
|
|
|
// well.
|
2017-10-24 02:09:24 +03:00
|
|
|
if chanInfo.AuthProof != nil {
|
2018-04-17 05:09:11 +03:00
|
|
|
chanAnn, e1Ann, e2Ann, err := CreateChanAnnouncement(
|
2018-01-31 07:23:14 +03:00
|
|
|
chanInfo.AuthProof, chanInfo, e1, e2,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-10-24 02:09:24 +03:00
|
|
|
|
|
|
|
announceMessages = append(announceMessages, chanAnn)
|
|
|
|
if e1Ann != nil {
|
|
|
|
announceMessages = append(announceMessages, e1Ann)
|
2018-01-17 04:18:15 +03:00
|
|
|
|
|
|
|
// If this edge has a validated node
|
2018-04-20 04:37:27 +03:00
|
|
|
// announcement, that we haven't yet sent, then
|
|
|
|
// we'll send that as well.
|
|
|
|
nodePub := e1.Node.PubKeyBytes
|
|
|
|
hasNodeAnn := e1.Node.HaveNodeAnnouncement
|
|
|
|
if _, ok := nodePubsSent[nodePub]; !ok && hasNodeAnn {
|
2018-09-21 05:22:20 +03:00
|
|
|
nodeAnn, err := e1.Node.NodeAnnouncement(true)
|
2018-01-31 07:23:14 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-20 04:37:27 +03:00
|
|
|
|
2018-01-17 04:18:15 +03:00
|
|
|
announceMessages = append(
|
|
|
|
announceMessages, nodeAnn,
|
|
|
|
)
|
2018-04-20 04:37:27 +03:00
|
|
|
nodePubsSent[nodePub] = struct{}{}
|
|
|
|
|
2018-01-17 04:18:15 +03:00
|
|
|
numNodes++
|
|
|
|
}
|
2017-10-24 02:09:24 +03:00
|
|
|
}
|
|
|
|
if e2Ann != nil {
|
|
|
|
announceMessages = append(announceMessages, e2Ann)
|
2018-01-17 04:18:15 +03:00
|
|
|
|
|
|
|
// If this edge has a validated node
|
2018-04-20 04:37:27 +03:00
|
|
|
// announcement, that we haven't yet sent, then
|
|
|
|
// we'll send that as well.
|
|
|
|
nodePub := e2.Node.PubKeyBytes
|
|
|
|
hasNodeAnn := e2.Node.HaveNodeAnnouncement
|
|
|
|
if _, ok := nodePubsSent[nodePub]; !ok && hasNodeAnn {
|
2018-09-21 05:22:20 +03:00
|
|
|
nodeAnn, err := e2.Node.NodeAnnouncement(true)
|
2018-01-31 07:23:14 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-04-20 04:37:27 +03:00
|
|
|
|
2018-01-17 04:18:15 +03:00
|
|
|
announceMessages = append(
|
|
|
|
announceMessages, nodeAnn,
|
|
|
|
)
|
2018-04-20 04:37:27 +03:00
|
|
|
nodePubsSent[nodePub] = struct{}{}
|
|
|
|
|
2018-01-17 04:18:15 +03:00
|
|
|
numNodes++
|
|
|
|
}
|
2017-10-24 02:09:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
numEdges++
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}); err != nil && err != channeldb.ErrGraphNoEdgesFound {
|
|
|
|
log.Errorf("unable to sync infos with peer: %v", err)
|
|
|
|
return err
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
2017-10-24 02:09:24 +03:00
|
|
|
|
|
|
|
log.Infof("Syncing channel graph state with %x, sending %v "+
|
2018-06-08 06:11:27 +03:00
|
|
|
"vertexes and %v edges", syncPeer.PubKey(),
|
2017-10-24 02:09:24 +03:00
|
|
|
numNodes, numEdges)
|
|
|
|
|
|
|
|
// With all the announcement messages gathered, send them all in a
|
|
|
|
// single batch to the target peer.
|
2019-03-06 04:08:48 +03:00
|
|
|
return syncPeer.SendMessageLazy(false, announceMessages...)
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2017-12-15 23:56:11 +03:00
|
|
|
// PropagateChanPolicyUpdate signals the AuthenticatedGossiper to update the
|
|
|
|
// channel forwarding policies for the specified channels. If no channels are
|
|
|
|
// specified, then the update will be applied to all outgoing channels from the
|
|
|
|
// source node. Policy updates are done in two stages: first, the
|
2018-02-07 06:11:11 +03:00
|
|
|
// AuthenticatedGossiper ensures the update has been committed by dependent
|
2017-12-15 23:56:11 +03:00
|
|
|
// sub-systems, then it signs and broadcasts new updates to the network.
|
|
|
|
func (d *AuthenticatedGossiper) PropagateChanPolicyUpdate(
|
|
|
|
newSchema routing.ChannelPolicy, chanPoints ...wire.OutPoint) error {
|
2017-08-22 09:40:02 +03:00
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
2017-12-15 23:56:11 +03:00
|
|
|
policyUpdate := &chanPolicyUpdateRequest{
|
2017-08-22 09:40:02 +03:00
|
|
|
targetChans: chanPoints,
|
|
|
|
newSchema: newSchema,
|
|
|
|
errResp: errChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2017-12-15 23:56:11 +03:00
|
|
|
case d.chanPolicyUpdates <- policyUpdate:
|
2017-08-22 09:40:02 +03:00
|
|
|
return <-errChan
|
|
|
|
case <-d.quit:
|
|
|
|
return fmt.Errorf("AuthenticatedGossiper shutting down")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
// Start spawns network messages handler goroutine and registers on new block
|
|
|
|
// notifications in order to properly handle the premature announcements.
|
2017-04-01 15:33:17 +03:00
|
|
|
func (d *AuthenticatedGossiper) Start() error {
|
2017-03-19 21:40:25 +03:00
|
|
|
if !atomic.CompareAndSwapUint32(&d.started, 0, 1) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
log.Info("Authenticated Gossiper is starting")
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
// First we register for new notifications of newly discovered blocks.
|
|
|
|
// We do this immediately so we'll later be able to consume any/all
|
|
|
|
// blocks which were discovered.
|
2018-08-09 10:05:27 +03:00
|
|
|
blockEpochs, err := d.cfg.Notifier.RegisterBlockEpochNtfn(nil)
|
2017-03-19 21:40:25 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-06-12 03:37:17 +03:00
|
|
|
d.blockEpochs = blockEpochs
|
2017-03-19 21:40:25 +03:00
|
|
|
|
|
|
|
height, err := d.cfg.Router.CurrentBlockHeight()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
d.bestHeight = height
|
|
|
|
|
2019-02-06 04:18:56 +03:00
|
|
|
// Start the reliable sender. In case we had any pending messages ready
|
|
|
|
// to be sent when the gossiper was last shut down, we must continue on
|
|
|
|
// our quest to deliver them to their respective peers.
|
|
|
|
if err := d.reliableSender.Start(); err != nil {
|
2017-11-18 06:21:50 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-03-23 05:56:33 +03:00
|
|
|
d.syncMgr.Start()
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
d.wg.Add(1)
|
|
|
|
go d.networkHandler()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop signals any active goroutines for a graceful closure.
|
2017-04-01 15:33:17 +03:00
|
|
|
func (d *AuthenticatedGossiper) Stop() {
|
2017-03-19 21:40:25 +03:00
|
|
|
if !atomic.CompareAndSwapUint32(&d.stopped, 0, 1) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
log.Info("Authenticated Gossiper is stopping")
|
|
|
|
|
2018-06-12 03:37:17 +03:00
|
|
|
d.blockEpochs.Cancel()
|
|
|
|
|
2019-03-23 05:56:33 +03:00
|
|
|
d.syncMgr.Stop()
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
close(d.quit)
|
|
|
|
d.wg.Wait()
|
2019-02-06 04:18:56 +03:00
|
|
|
|
|
|
|
// We'll stop our reliable sender after all of the gossiper's goroutines
|
|
|
|
// have exited to ensure nothing can cause it to continue executing.
|
|
|
|
d.reliableSender.Stop()
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2018-04-17 05:09:11 +03:00
|
|
|
// TODO(roasbeef): need method to get current gossip timestamp?
|
|
|
|
// * using mtx, check time rotate forward is needed?
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// ProcessRemoteAnnouncement sends a new remote announcement message along with
|
|
|
|
// the peer that sent the routing message. The announcement will be processed
|
|
|
|
// then added to a queue for batched trickled announcement to all connected
|
|
|
|
// peers. Remote channel announcements should contain the announcement proof
|
|
|
|
// and be fully validated.
|
|
|
|
func (d *AuthenticatedGossiper) ProcessRemoteAnnouncement(msg lnwire.Message,
|
2018-06-08 06:11:27 +03:00
|
|
|
peer lnpeer.Peer) chan error {
|
2017-04-01 15:33:17 +03:00
|
|
|
|
2018-11-02 03:13:13 +03:00
|
|
|
errChan := make(chan error, 1)
|
|
|
|
|
|
|
|
// For messages in the known set of channel series queries, we'll
|
2019-03-23 05:54:46 +03:00
|
|
|
// dispatch the message directly to the GossipSyncer, and skip the main
|
2018-11-02 03:13:13 +03:00
|
|
|
// processing loop.
|
|
|
|
switch m := msg.(type) {
|
|
|
|
case *lnwire.QueryShortChanIDs,
|
|
|
|
*lnwire.QueryChannelRange,
|
|
|
|
*lnwire.ReplyChannelRange,
|
|
|
|
*lnwire.ReplyShortChanIDsEnd:
|
|
|
|
|
2019-03-23 05:56:33 +03:00
|
|
|
syncer, ok := d.syncMgr.GossipSyncer(peer.PubKey())
|
|
|
|
if !ok {
|
|
|
|
log.Warnf("Gossip syncer for peer=%x not found",
|
|
|
|
peer.PubKey())
|
2018-11-02 03:13:13 +03:00
|
|
|
|
2019-03-23 05:56:33 +03:00
|
|
|
errChan <- ErrGossipSyncerNotFound
|
2018-11-02 03:13:13 +03:00
|
|
|
return errChan
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we've found the message target, then we'll dispatch the
|
|
|
|
// message directly to it.
|
2018-11-02 03:35:55 +03:00
|
|
|
syncer.ProcessQueryMsg(m, peer.QuitSignal())
|
2018-11-02 03:13:13 +03:00
|
|
|
|
|
|
|
errChan <- nil
|
|
|
|
return errChan
|
|
|
|
|
|
|
|
// If a peer is updating its current update horizon, then we'll dispatch
|
2019-03-23 05:54:46 +03:00
|
|
|
// that directly to the proper GossipSyncer.
|
2018-11-02 03:13:13 +03:00
|
|
|
case *lnwire.GossipTimestampRange:
|
2019-03-23 05:56:33 +03:00
|
|
|
syncer, ok := d.syncMgr.GossipSyncer(peer.PubKey())
|
|
|
|
if !ok {
|
|
|
|
log.Warnf("Gossip syncer for peer=%x not found",
|
|
|
|
peer.PubKey())
|
2018-11-02 03:13:13 +03:00
|
|
|
|
2019-03-23 05:56:33 +03:00
|
|
|
errChan <- ErrGossipSyncerNotFound
|
2018-11-02 03:13:13 +03:00
|
|
|
return errChan
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we've found the message target, then we'll dispatch the
|
|
|
|
// message directly to it.
|
2019-03-23 05:56:33 +03:00
|
|
|
if err := syncer.ApplyGossipFilter(m); err != nil {
|
|
|
|
log.Warnf("Unable to apply gossip filter for peer=%x: "+
|
|
|
|
"%v", peer.PubKey(), err)
|
2018-11-02 03:13:13 +03:00
|
|
|
|
|
|
|
errChan <- err
|
|
|
|
return errChan
|
|
|
|
}
|
|
|
|
|
|
|
|
errChan <- nil
|
|
|
|
return errChan
|
|
|
|
}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
nMsg := &networkMsg{
|
|
|
|
msg: msg,
|
|
|
|
isRemote: true,
|
2018-06-08 06:11:27 +03:00
|
|
|
peer: peer,
|
|
|
|
source: peer.IdentityKey(),
|
2018-11-02 03:13:13 +03:00
|
|
|
err: errChan,
|
2017-04-01 15:33:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case d.networkMsgs <- nMsg:
|
2018-08-26 03:16:03 +03:00
|
|
|
|
|
|
|
// If the peer that sent us this error is quitting, then we don't need
|
|
|
|
// to send back an error and can return immediately.
|
|
|
|
case <-peer.QuitSignal():
|
|
|
|
return nil
|
2017-04-01 15:33:17 +03:00
|
|
|
case <-d.quit:
|
2018-08-20 15:28:09 +03:00
|
|
|
nMsg.err <- ErrGossiperShuttingDown
|
2017-04-01 15:33:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return nMsg.err
|
|
|
|
}
|
|
|
|
|
|
|
|
// ProcessLocalAnnouncement sends a new remote announcement message along with
|
|
|
|
// the peer that sent the routing message. The announcement will be processed
|
|
|
|
// then added to a queue for batched trickled announcement to all connected
|
|
|
|
// peers. Local channel announcements don't contain the announcement proof and
|
|
|
|
// will not be fully validated. Once the channel proofs are received, the
|
|
|
|
// entire channel announcement and update messages will be re-constructed and
|
|
|
|
// broadcast to the rest of the network.
|
|
|
|
func (d *AuthenticatedGossiper) ProcessLocalAnnouncement(msg lnwire.Message,
|
2019-04-17 23:25:56 +03:00
|
|
|
source *btcec.PublicKey, optionalFields ...OptionalMsgField) chan error {
|
|
|
|
|
|
|
|
optionalMsgFields := &optionalMsgFields{}
|
|
|
|
optionalMsgFields.apply(optionalFields...)
|
2017-04-01 15:33:17 +03:00
|
|
|
|
|
|
|
nMsg := &networkMsg{
|
2019-04-17 23:25:56 +03:00
|
|
|
msg: msg,
|
|
|
|
optionalMsgFields: optionalMsgFields,
|
|
|
|
isRemote: false,
|
|
|
|
source: source,
|
|
|
|
err: make(chan error, 1),
|
2017-04-01 15:33:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case d.networkMsgs <- nMsg:
|
|
|
|
case <-d.quit:
|
2018-08-20 15:28:09 +03:00
|
|
|
nMsg.err <- ErrGossiperShuttingDown
|
2017-04-01 15:33:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return nMsg.err
|
|
|
|
}
|
|
|
|
|
2017-09-14 02:33:46 +03:00
|
|
|
// channelUpdateID is a unique identifier for ChannelUpdate messages, as
|
2019-01-12 20:59:43 +03:00
|
|
|
// channel updates can be identified by the (ShortChannelID, ChannelFlags)
|
2017-09-08 04:25:43 +03:00
|
|
|
// tuple.
|
2017-09-14 02:33:46 +03:00
|
|
|
type channelUpdateID struct {
|
2017-09-08 04:25:43 +03:00
|
|
|
// channelID represents the set of data which is needed to
|
|
|
|
// retrieve all necessary data to validate the channel existence.
|
|
|
|
channelID lnwire.ShortChannelID
|
|
|
|
|
|
|
|
// Flags least-significant bit must be set to 0 if the creating node
|
|
|
|
// corresponds to the first node in the previously sent channel
|
|
|
|
// announcement and 1 otherwise.
|
2019-01-12 20:59:43 +03:00
|
|
|
flags lnwire.ChanUpdateChanFlags
|
2017-09-08 04:25:43 +03:00
|
|
|
}
|
|
|
|
|
2017-12-26 18:18:56 +03:00
|
|
|
// msgWithSenders is a wrapper struct around a message, and the set of peers
|
2018-01-08 20:16:36 +03:00
|
|
|
// that originally sent us this message. Using this struct, we can ensure that
|
2017-12-26 18:18:56 +03:00
|
|
|
// we don't re-send a message to the peer that sent it to us in the first
|
|
|
|
// place.
|
|
|
|
type msgWithSenders struct {
|
|
|
|
// msg is the wire message itself.
|
|
|
|
msg lnwire.Message
|
|
|
|
|
|
|
|
// sender is the set of peers that sent us this message.
|
|
|
|
senders map[routing.Vertex]struct{}
|
|
|
|
}
|
|
|
|
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
// mergeSyncerMap is used to merge the set of senders of a particular message
|
2019-03-23 05:54:46 +03:00
|
|
|
// with peers that we have an active GossipSyncer with. We do this to ensure
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
// that we don't broadcast messages to any peers that we have active gossip
|
|
|
|
// syncers for.
|
2019-03-23 05:54:46 +03:00
|
|
|
func (m *msgWithSenders) mergeSyncerMap(syncers map[routing.Vertex]*GossipSyncer) {
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
for peerPub := range syncers {
|
|
|
|
m.senders[peerPub] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-30 03:21:08 +03:00
|
|
|
// deDupedAnnouncements de-duplicates announcements that have been added to the
|
|
|
|
// batch. Internally, announcements are stored in three maps
|
2017-09-08 04:25:43 +03:00
|
|
|
// (one each for channel announcements, channel updates, and node
|
2017-11-30 03:21:08 +03:00
|
|
|
// announcements). These maps keep track of unique announcements and ensure no
|
2018-01-07 22:10:02 +03:00
|
|
|
// announcements are duplicated. We keep the three message types separate, such
|
|
|
|
// that we can send channel announcements first, then channel updates, and
|
|
|
|
// finally node announcements when it's time to broadcast them.
|
2017-09-08 04:25:43 +03:00
|
|
|
type deDupedAnnouncements struct {
|
|
|
|
// channelAnnouncements are identified by the short channel id field.
|
2017-12-26 18:18:56 +03:00
|
|
|
channelAnnouncements map[lnwire.ShortChannelID]msgWithSenders
|
2017-09-08 04:25:43 +03:00
|
|
|
|
|
|
|
// channelUpdates are identified by the channel update id field.
|
2017-12-26 18:18:56 +03:00
|
|
|
channelUpdates map[channelUpdateID]msgWithSenders
|
2017-09-08 04:25:43 +03:00
|
|
|
|
2017-09-14 02:33:46 +03:00
|
|
|
// nodeAnnouncements are identified by the Vertex field.
|
2017-12-26 18:18:56 +03:00
|
|
|
nodeAnnouncements map[routing.Vertex]msgWithSenders
|
2017-11-30 03:21:08 +03:00
|
|
|
|
|
|
|
sync.Mutex
|
2017-09-08 04:25:43 +03:00
|
|
|
}
|
|
|
|
|
2017-11-30 03:21:08 +03:00
|
|
|
// Reset operates on deDupedAnnouncements to reset the storage of
|
|
|
|
// announcements.
|
2017-09-08 04:25:43 +03:00
|
|
|
func (d *deDupedAnnouncements) Reset() {
|
2017-11-30 03:21:08 +03:00
|
|
|
d.Lock()
|
|
|
|
defer d.Unlock()
|
|
|
|
|
|
|
|
d.reset()
|
|
|
|
}
|
|
|
|
|
|
|
|
// reset is the private version of the Reset method. We have this so we can
|
|
|
|
// call this method within method that are already holding the lock.
|
|
|
|
func (d *deDupedAnnouncements) reset() {
|
2018-02-07 06:11:11 +03:00
|
|
|
// Storage of each type of announcement (channel announcements, channel
|
2017-09-08 04:25:43 +03:00
|
|
|
// updates, node announcements) is set to an empty map where the
|
2017-11-30 03:21:08 +03:00
|
|
|
// appropriate key points to the corresponding lnwire.Message.
|
2017-12-26 18:18:56 +03:00
|
|
|
d.channelAnnouncements = make(map[lnwire.ShortChannelID]msgWithSenders)
|
|
|
|
d.channelUpdates = make(map[channelUpdateID]msgWithSenders)
|
|
|
|
d.nodeAnnouncements = make(map[routing.Vertex]msgWithSenders)
|
2017-09-08 04:25:43 +03:00
|
|
|
}
|
|
|
|
|
2017-12-26 18:18:56 +03:00
|
|
|
// addMsg adds a new message to the current batch. If the message is already
|
2018-02-07 06:11:11 +03:00
|
|
|
// present in the current batch, then this new instance replaces the latter,
|
2017-12-26 18:18:56 +03:00
|
|
|
// and the set of senders is updated to reflect which node sent us this
|
|
|
|
// message.
|
|
|
|
func (d *deDupedAnnouncements) addMsg(message networkMsg) {
|
2017-11-30 03:21:08 +03:00
|
|
|
// Depending on the message type (channel announcement, channel update,
|
|
|
|
// or node announcement), the message is added to the corresponding map
|
|
|
|
// in deDupedAnnouncements. Because each identifying key can have at
|
|
|
|
// most one value, the announcements are de-duplicated, with newer ones
|
|
|
|
// replacing older ones.
|
2017-12-26 18:18:56 +03:00
|
|
|
switch msg := message.msg.(type) {
|
2017-11-30 03:21:08 +03:00
|
|
|
|
|
|
|
// Channel announcements are identified by the short channel id field.
|
2017-09-08 04:25:43 +03:00
|
|
|
case *lnwire.ChannelAnnouncement:
|
2017-12-26 18:18:56 +03:00
|
|
|
deDupKey := msg.ShortChannelID
|
2018-06-08 06:11:27 +03:00
|
|
|
sender := routing.NewVertex(message.source)
|
2017-12-26 18:18:56 +03:00
|
|
|
|
|
|
|
mws, ok := d.channelAnnouncements[deDupKey]
|
|
|
|
if !ok {
|
|
|
|
mws = msgWithSenders{
|
|
|
|
msg: msg,
|
|
|
|
senders: make(map[routing.Vertex]struct{}),
|
|
|
|
}
|
|
|
|
mws.senders[sender] = struct{}{}
|
|
|
|
|
|
|
|
d.channelAnnouncements[deDupKey] = mws
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
mws.msg = msg
|
|
|
|
mws.senders[sender] = struct{}{}
|
|
|
|
d.channelAnnouncements[deDupKey] = mws
|
2017-11-30 03:21:08 +03:00
|
|
|
|
2019-01-12 20:59:43 +03:00
|
|
|
// Channel updates are identified by the (short channel id,
|
|
|
|
// channelflags) tuple.
|
2017-09-08 04:25:43 +03:00
|
|
|
case *lnwire.ChannelUpdate:
|
2018-06-08 06:11:27 +03:00
|
|
|
sender := routing.NewVertex(message.source)
|
2017-12-26 18:18:56 +03:00
|
|
|
deDupKey := channelUpdateID{
|
2017-09-08 04:25:43 +03:00
|
|
|
msg.ShortChannelID,
|
2019-01-12 20:59:43 +03:00
|
|
|
msg.ChannelFlags,
|
2017-09-08 04:25:43 +03:00
|
|
|
}
|
|
|
|
|
2018-01-07 22:10:02 +03:00
|
|
|
oldTimestamp := uint32(0)
|
2017-12-26 18:18:56 +03:00
|
|
|
mws, ok := d.channelUpdates[deDupKey]
|
2018-01-07 22:10:02 +03:00
|
|
|
if ok {
|
|
|
|
// If we already have seen this message, record its
|
|
|
|
// timestamp.
|
|
|
|
oldTimestamp = mws.msg.(*lnwire.ChannelUpdate).Timestamp
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we already had this message with a strictly newer
|
|
|
|
// timestamp, then we'll just discard the message we got.
|
|
|
|
if oldTimestamp > msg.Timestamp {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the message we just got is newer than what we previously
|
|
|
|
// have seen, or this is the first time we see it, then we'll
|
|
|
|
// add it to our map of announcements.
|
|
|
|
if oldTimestamp < msg.Timestamp {
|
2017-12-26 18:18:56 +03:00
|
|
|
mws = msgWithSenders{
|
|
|
|
msg: msg,
|
|
|
|
senders: make(map[routing.Vertex]struct{}),
|
|
|
|
}
|
2018-01-07 22:10:02 +03:00
|
|
|
|
|
|
|
// We'll mark the sender of the message in the
|
|
|
|
// senders map.
|
2017-12-26 18:18:56 +03:00
|
|
|
mws.senders[sender] = struct{}{}
|
|
|
|
|
|
|
|
d.channelUpdates[deDupKey] = mws
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-01-07 22:10:02 +03:00
|
|
|
// Lastly, if we had seen this exact message from before, with
|
|
|
|
// the same timestamp, we'll add the sender to the map of
|
|
|
|
// senders, such that we can skip sending this message back in
|
|
|
|
// the next batch.
|
2017-12-26 18:18:56 +03:00
|
|
|
mws.msg = msg
|
|
|
|
mws.senders[sender] = struct{}{}
|
|
|
|
d.channelUpdates[deDupKey] = mws
|
2017-11-30 03:21:08 +03:00
|
|
|
|
|
|
|
// Node announcements are identified by the Vertex field. Use the
|
|
|
|
// NodeID to create the corresponding Vertex.
|
2017-09-08 04:25:43 +03:00
|
|
|
case *lnwire.NodeAnnouncement:
|
2018-06-08 06:11:27 +03:00
|
|
|
sender := routing.NewVertex(message.source)
|
2018-01-31 07:23:14 +03:00
|
|
|
deDupKey := routing.Vertex(msg.NodeID)
|
2017-12-26 18:18:56 +03:00
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// We do the same for node announcements as we did for channel
|
2018-01-07 22:10:02 +03:00
|
|
|
// updates, as they also carry a timestamp.
|
|
|
|
oldTimestamp := uint32(0)
|
2017-12-26 18:18:56 +03:00
|
|
|
mws, ok := d.nodeAnnouncements[deDupKey]
|
2018-01-07 22:10:02 +03:00
|
|
|
if ok {
|
|
|
|
oldTimestamp = mws.msg.(*lnwire.NodeAnnouncement).Timestamp
|
|
|
|
}
|
|
|
|
|
|
|
|
// Discard the message if it's old.
|
|
|
|
if oldTimestamp > msg.Timestamp {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Replace if it's newer.
|
|
|
|
if oldTimestamp < msg.Timestamp {
|
2017-12-26 18:18:56 +03:00
|
|
|
mws = msgWithSenders{
|
|
|
|
msg: msg,
|
|
|
|
senders: make(map[routing.Vertex]struct{}),
|
|
|
|
}
|
2018-01-07 22:10:02 +03:00
|
|
|
|
2017-12-26 18:18:56 +03:00
|
|
|
mws.senders[sender] = struct{}{}
|
|
|
|
|
|
|
|
d.nodeAnnouncements[deDupKey] = mws
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-01-07 22:10:02 +03:00
|
|
|
// Add to senders map if it's the same as we had.
|
2017-12-26 18:18:56 +03:00
|
|
|
mws.msg = msg
|
|
|
|
mws.senders[sender] = struct{}{}
|
|
|
|
d.nodeAnnouncements[deDupKey] = mws
|
2017-09-08 04:25:43 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-30 03:21:08 +03:00
|
|
|
// AddMsgs is a helper method to add multiple messages to the announcement
|
|
|
|
// batch.
|
2017-12-26 18:18:56 +03:00
|
|
|
func (d *deDupedAnnouncements) AddMsgs(msgs ...networkMsg) {
|
2017-11-30 03:21:08 +03:00
|
|
|
d.Lock()
|
|
|
|
defer d.Unlock()
|
|
|
|
|
2017-09-08 04:25:43 +03:00
|
|
|
for _, msg := range msgs {
|
2017-11-30 03:21:08 +03:00
|
|
|
d.addMsg(msg)
|
2017-09-08 04:25:43 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-30 03:21:08 +03:00
|
|
|
// Emit returns the set of de-duplicated announcements to be sent out during
|
|
|
|
// the next announcement epoch, in the order of channel announcements, channel
|
2017-12-26 18:18:56 +03:00
|
|
|
// updates, and node announcements. Each message emitted, contains the set of
|
|
|
|
// peers that sent us the message. This way, we can ensure that we don't waste
|
|
|
|
// bandwidth by re-sending a message to the peer that sent it to us in the
|
|
|
|
// first place. Additionally, the set of stored messages are reset.
|
|
|
|
func (d *deDupedAnnouncements) Emit() []msgWithSenders {
|
2017-11-30 03:21:08 +03:00
|
|
|
d.Lock()
|
|
|
|
defer d.Unlock()
|
|
|
|
|
2017-09-08 04:25:43 +03:00
|
|
|
// Get the total number of announcements.
|
|
|
|
numAnnouncements := len(d.channelAnnouncements) + len(d.channelUpdates) +
|
|
|
|
len(d.nodeAnnouncements)
|
|
|
|
|
|
|
|
// Create an empty array of lnwire.Messages with a length equal to
|
|
|
|
// the total number of announcements.
|
2017-12-26 18:18:56 +03:00
|
|
|
msgs := make([]msgWithSenders, 0, numAnnouncements)
|
2017-09-08 04:25:43 +03:00
|
|
|
|
|
|
|
// Add the channel announcements to the array first.
|
|
|
|
for _, message := range d.channelAnnouncements {
|
2017-12-26 18:18:56 +03:00
|
|
|
msgs = append(msgs, message)
|
2017-09-08 04:25:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Then add the channel updates.
|
|
|
|
for _, message := range d.channelUpdates {
|
2017-12-26 18:18:56 +03:00
|
|
|
msgs = append(msgs, message)
|
2017-09-08 04:25:43 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Finally add the node announcements.
|
|
|
|
for _, message := range d.nodeAnnouncements {
|
2017-12-26 18:18:56 +03:00
|
|
|
msgs = append(msgs, message)
|
2017-09-08 04:25:43 +03:00
|
|
|
}
|
|
|
|
|
2017-11-30 03:21:08 +03:00
|
|
|
d.reset()
|
|
|
|
|
2017-09-08 04:25:43 +03:00
|
|
|
// Return the array of lnwire.messages.
|
2017-12-26 18:18:56 +03:00
|
|
|
return msgs
|
2017-09-08 04:25:43 +03:00
|
|
|
}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// networkHandler is the primary goroutine that drives this service. The roles
|
|
|
|
// of this goroutine includes answering queries related to the state of the
|
|
|
|
// network, syncing up newly connected peers, and also periodically
|
|
|
|
// broadcasting our latest topology state to all connected peers.
|
2017-03-19 21:40:25 +03:00
|
|
|
//
|
|
|
|
// NOTE: This MUST be run as a goroutine.
|
2017-04-01 15:33:17 +03:00
|
|
|
func (d *AuthenticatedGossiper) networkHandler() {
|
2017-03-19 21:40:25 +03:00
|
|
|
defer d.wg.Done()
|
|
|
|
|
2017-09-08 04:25:43 +03:00
|
|
|
// Initialize empty deDupedAnnouncements to store announcement batch.
|
|
|
|
announcements := deDupedAnnouncements{}
|
|
|
|
announcements.Reset()
|
2017-03-19 21:40:25 +03:00
|
|
|
|
2017-09-25 04:47:48 +03:00
|
|
|
retransmitTimer := time.NewTicker(d.cfg.RetransmitDelay)
|
2017-03-19 21:40:25 +03:00
|
|
|
defer retransmitTimer.Stop()
|
|
|
|
|
2017-03-28 22:08:14 +03:00
|
|
|
trickleTimer := time.NewTicker(d.cfg.TrickleDelay)
|
2017-03-19 21:40:25 +03:00
|
|
|
defer trickleTimer.Stop()
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// To start, we'll first check to see if there are any stale channels
|
2017-10-05 06:07:54 +03:00
|
|
|
// that we need to re-transmit.
|
|
|
|
if err := d.retransmitStaleChannels(); err != nil {
|
2018-08-29 04:35:24 +03:00
|
|
|
log.Errorf("Unable to rebroadcast stale channels: %v", err)
|
2017-10-05 06:07:54 +03:00
|
|
|
}
|
|
|
|
|
2017-11-30 03:45:08 +03:00
|
|
|
// We'll use this validation to ensure that we process jobs in their
|
|
|
|
// dependency order during parallel validation.
|
|
|
|
validationBarrier := routing.NewValidationBarrier(
|
2018-01-29 01:55:24 +03:00
|
|
|
runtime.NumCPU()*4, d.quit,
|
2017-11-30 03:45:08 +03:00
|
|
|
)
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
for {
|
|
|
|
select {
|
2017-12-15 23:56:11 +03:00
|
|
|
// A new policy update has arrived. We'll commit it to the
|
2017-08-22 09:40:02 +03:00
|
|
|
// sub-systems below us, then craft, sign, and broadcast a new
|
|
|
|
// ChannelUpdate for the set of affected clients.
|
2017-12-15 23:56:11 +03:00
|
|
|
case policyUpdate := <-d.chanPolicyUpdates:
|
2017-08-22 09:40:02 +03:00
|
|
|
// First, we'll now create new fully signed updates for
|
|
|
|
// the affected channels and also update the underlying
|
|
|
|
// graph with the new state.
|
2018-08-20 15:28:10 +03:00
|
|
|
newChanUpdates, err := d.processChanPolicyUpdate(
|
|
|
|
policyUpdate,
|
|
|
|
)
|
2017-08-22 09:40:02 +03:00
|
|
|
if err != nil {
|
2017-12-15 23:56:11 +03:00
|
|
|
log.Errorf("Unable to craft policy updates: %v",
|
|
|
|
err)
|
|
|
|
policyUpdate.errResp <- err
|
2017-08-22 09:40:02 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, with the updates committed, we'll now add
|
|
|
|
// them to the announcement batch to be flushed at the
|
|
|
|
// start of the next epoch.
|
2017-11-30 03:45:08 +03:00
|
|
|
announcements.AddMsgs(newChanUpdates...)
|
2017-08-22 09:40:02 +03:00
|
|
|
|
2017-12-15 23:56:11 +03:00
|
|
|
policyUpdate.errResp <- nil
|
2017-08-22 09:40:02 +03:00
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
case announcement := <-d.networkMsgs:
|
2018-11-02 03:13:13 +03:00
|
|
|
switch announcement.msg.(type) {
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
// Channel announcement signatures are amongst the only
|
|
|
|
// messages that we'll process serially.
|
|
|
|
case *lnwire.AnnounceSignatures:
|
2017-11-30 04:53:16 +03:00
|
|
|
emittedAnnouncements := d.processNetworkAnnouncement(
|
|
|
|
announcement,
|
|
|
|
)
|
|
|
|
if emittedAnnouncements != nil {
|
|
|
|
announcements.AddMsgs(
|
|
|
|
emittedAnnouncements...,
|
|
|
|
)
|
|
|
|
}
|
2017-11-30 05:35:11 +03:00
|
|
|
continue
|
2017-11-30 04:53:16 +03:00
|
|
|
}
|
|
|
|
|
2018-01-31 07:41:27 +03:00
|
|
|
// If this message was recently rejected, then we won't
|
|
|
|
// attempt to re-process it.
|
|
|
|
if d.isRecentlyRejectedMsg(announcement.msg) {
|
2018-08-20 15:28:10 +03:00
|
|
|
announcement.err <- fmt.Errorf("recently " +
|
|
|
|
"rejected")
|
2018-01-31 07:41:27 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// We'll set up any dependent, and wait until a free
|
2017-11-30 03:45:08 +03:00
|
|
|
// slot for this job opens up, this allow us to not
|
|
|
|
// have thousands of goroutines active.
|
2018-02-07 06:11:11 +03:00
|
|
|
validationBarrier.InitJobDependencies(announcement.msg)
|
2017-11-30 03:45:08 +03:00
|
|
|
|
2018-05-08 05:10:44 +03:00
|
|
|
d.wg.Add(1)
|
2017-11-30 03:45:08 +03:00
|
|
|
go func() {
|
2018-05-08 05:10:44 +03:00
|
|
|
defer d.wg.Done()
|
2017-11-30 03:45:08 +03:00
|
|
|
defer validationBarrier.CompleteJob()
|
|
|
|
|
|
|
|
// If this message has an existing dependency,
|
|
|
|
// then we'll wait until that has been fully
|
|
|
|
// validated before we proceed.
|
2018-05-08 05:10:44 +03:00
|
|
|
err := validationBarrier.WaitForDependants(
|
|
|
|
announcement.msg,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
if err != routing.ErrVBarrierShuttingDown {
|
|
|
|
log.Warnf("unexpected error "+
|
|
|
|
"during validation "+
|
|
|
|
"barrier shutdown: %v",
|
|
|
|
err)
|
|
|
|
}
|
2018-08-20 15:28:10 +03:00
|
|
|
announcement.err <- err
|
2018-05-08 05:10:44 +03:00
|
|
|
return
|
|
|
|
}
|
2017-11-30 03:45:08 +03:00
|
|
|
|
2018-08-20 15:28:10 +03:00
|
|
|
// Process the network announcement to
|
|
|
|
// determine if this is either a new
|
|
|
|
// announcement from our PoV or an edges to a
|
|
|
|
// prior vertex/edge we previously proceeded.
|
2017-11-30 03:45:08 +03:00
|
|
|
emittedAnnouncements := d.processNetworkAnnouncement(
|
|
|
|
announcement,
|
|
|
|
)
|
|
|
|
|
|
|
|
// If this message had any dependencies, then
|
|
|
|
// we can now signal them to continue.
|
2018-08-20 15:28:10 +03:00
|
|
|
validationBarrier.SignalDependants(
|
|
|
|
announcement.msg,
|
|
|
|
)
|
2017-11-30 03:45:08 +03:00
|
|
|
|
2018-08-20 15:28:10 +03:00
|
|
|
// If the announcement was accepted, then add
|
|
|
|
// the emitted announcements to our announce
|
|
|
|
// batch to be broadcast once the trickle timer
|
|
|
|
// ticks gain.
|
2017-11-30 03:45:08 +03:00
|
|
|
if emittedAnnouncements != nil {
|
2018-08-20 15:28:10 +03:00
|
|
|
// TODO(roasbeef): exclude peer that
|
|
|
|
// sent.
|
2017-11-30 03:45:08 +03:00
|
|
|
announcements.AddMsgs(
|
|
|
|
emittedAnnouncements...,
|
|
|
|
)
|
|
|
|
}
|
2017-03-28 22:08:14 +03:00
|
|
|
|
2017-11-30 03:45:08 +03:00
|
|
|
}()
|
2017-03-19 21:40:25 +03:00
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// A new block has arrived, so we can re-process the previously
|
|
|
|
// premature announcements.
|
2018-06-12 03:37:17 +03:00
|
|
|
case newBlock, ok := <-d.blockEpochs.Epochs:
|
2017-03-19 21:40:25 +03:00
|
|
|
// If the channel has been closed, then this indicates
|
|
|
|
// the daemon is shutting down, so we exit ourselves.
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Once a new block arrives, we updates our running
|
|
|
|
// track of the height of the chain tip.
|
|
|
|
blockHeight := uint32(newBlock.Height)
|
2017-11-30 03:45:08 +03:00
|
|
|
atomic.StoreUint32(&d.bestHeight, blockHeight)
|
2017-03-19 21:40:25 +03:00
|
|
|
|
|
|
|
// Next we check if we have any premature announcements
|
|
|
|
// for this height, if so, then we process them once
|
|
|
|
// more as normal announcements.
|
2017-11-30 03:45:08 +03:00
|
|
|
d.Lock()
|
2018-08-20 15:28:10 +03:00
|
|
|
numPremature := len(d.prematureAnnouncements[blockHeight])
|
2017-11-30 03:45:08 +03:00
|
|
|
d.Unlock()
|
2018-08-20 15:28:10 +03:00
|
|
|
|
|
|
|
// Return early if no announcement to process.
|
|
|
|
if numPremature == 0 {
|
|
|
|
continue
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2018-08-20 15:28:10 +03:00
|
|
|
log.Infof("Re-processing %v premature announcements "+
|
|
|
|
"for height %v", numPremature, blockHeight)
|
|
|
|
|
2017-11-30 03:45:08 +03:00
|
|
|
d.Lock()
|
2018-08-20 15:28:10 +03:00
|
|
|
for _, ann := range d.prematureAnnouncements[blockHeight] {
|
2017-03-28 22:08:14 +03:00
|
|
|
emittedAnnouncements := d.processNetworkAnnouncement(ann)
|
|
|
|
if emittedAnnouncements != nil {
|
2017-11-30 03:45:08 +03:00
|
|
|
announcements.AddMsgs(
|
|
|
|
emittedAnnouncements...,
|
|
|
|
)
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
delete(d.prematureAnnouncements, blockHeight)
|
2017-11-30 03:45:08 +03:00
|
|
|
d.Unlock()
|
2017-03-19 21:40:25 +03:00
|
|
|
|
|
|
|
// The trickle timer has ticked, which indicates we should
|
|
|
|
// flush to the network the pending batch of new announcements
|
|
|
|
// we've received since the last trickle tick.
|
|
|
|
case <-trickleTimer.C:
|
2017-11-30 03:45:08 +03:00
|
|
|
// Emit the current batch of announcements from
|
|
|
|
// deDupedAnnouncements.
|
|
|
|
announcementBatch := announcements.Emit()
|
2017-09-08 04:25:43 +03:00
|
|
|
|
2017-03-28 22:08:14 +03:00
|
|
|
// If the current announcements batch is nil, then we
|
2017-03-19 21:40:25 +03:00
|
|
|
// have no further work here.
|
|
|
|
if len(announcementBatch) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
// For the set of peers that have an active gossip
|
|
|
|
// syncers, we'll collect their pubkeys so we can avoid
|
|
|
|
// sending them the full message blast below.
|
2019-03-23 05:56:33 +03:00
|
|
|
syncerPeers := d.syncMgr.GossipSyncers()
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
log.Infof("Broadcasting batch of %v new announcements",
|
|
|
|
len(announcementBatch))
|
|
|
|
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
// We'll first attempt to filter out this new message
|
|
|
|
// for all peers that have active gossip syncers
|
|
|
|
// active.
|
2018-06-05 02:50:04 +03:00
|
|
|
for _, syncer := range syncerPeers {
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
syncer.FilterGossipMsgs(announcementBatch...)
|
|
|
|
}
|
2018-04-17 04:54:53 +03:00
|
|
|
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
// Next, If we have new things to announce then
|
|
|
|
// broadcast them to all our immediately connected
|
|
|
|
// peers.
|
|
|
|
for _, msgChunk := range announcementBatch {
|
2018-04-17 04:54:53 +03:00
|
|
|
// With the syncers taken care of, we'll merge
|
|
|
|
// the sender map with the set of syncers, so
|
|
|
|
// we don't send out duplicate messages.
|
|
|
|
msgChunk.mergeSyncerMap(syncerPeers)
|
|
|
|
|
2017-12-26 18:26:28 +03:00
|
|
|
err := d.cfg.Broadcast(
|
|
|
|
msgChunk.senders, msgChunk.msg,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("unable to send batch "+
|
|
|
|
"announcements: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// The retransmission timer has ticked which indicates that we
|
2017-09-25 04:47:48 +03:00
|
|
|
// should check if we need to prune or re-broadcast any of our
|
2018-08-20 15:28:10 +03:00
|
|
|
// personal channels. This addresses the case of "zombie"
|
|
|
|
// channels and channel advertisements that have been dropped,
|
|
|
|
// or not properly propagated through the network.
|
2017-03-19 21:40:25 +03:00
|
|
|
case <-retransmitTimer.C:
|
2017-10-05 06:07:54 +03:00
|
|
|
if err := d.retransmitStaleChannels(); err != nil {
|
|
|
|
log.Errorf("unable to rebroadcast stale "+
|
2017-03-19 21:40:25 +03:00
|
|
|
"channels: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-05 20:17:31 +03:00
|
|
|
// The gossiper has been signalled to exit, to we exit our
|
2017-04-01 15:33:17 +03:00
|
|
|
// main loop so the wait group can be decremented.
|
2017-03-19 21:40:25 +03:00
|
|
|
case <-d.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-31 11:29:12 +03:00
|
|
|
// TODO(roasbeef): d/c peers that send updates not on our chain
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
|
2018-07-31 11:29:12 +03:00
|
|
|
// InitSyncState is called by outside sub-systems when a connection is
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
// established to a new peer that understands how to perform channel range
|
|
|
|
// queries. We'll allocate a new gossip syncer for it, and start any goroutines
|
2019-03-23 05:56:33 +03:00
|
|
|
// needed to handle new queries.
|
|
|
|
func (d *AuthenticatedGossiper) InitSyncState(syncPeer lnpeer.Peer) {
|
|
|
|
d.syncMgr.InitSyncState(syncPeer)
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// PruneSyncState is called by outside sub-systems once a peer that we were
|
|
|
|
// previously connected to has been disconnected. In this case we can stop the
|
2019-03-23 05:54:46 +03:00
|
|
|
// existing GossipSyncer assigned to the peer and free up resources.
|
2019-03-23 05:56:33 +03:00
|
|
|
func (d *AuthenticatedGossiper) PruneSyncState(peer routing.Vertex) {
|
|
|
|
d.syncMgr.PruneSyncState(peer)
|
discovery: update AuthenticatedGossiper to be aware of new gossipSyncers
In this commit, we update the logic in the AuthenticatedGossiper to
ensure that can properly create, manage, and dispatch messages to any
gossipSyncer instances created by the server.
With this set of changes, the gossip now has complete knowledge of the
current set of peers we're conneted to that support the new range
queries. Upon initial connect, InitSyncState will be called by the
server if the new peer understands the set of gossip queries. This will
then create a new spot in the peerSyncers map for the new syncer. For
each new gossip query message, we'll then attempt to dispatch the
message directly to the gossip syncer. When the peer has disconnected,
we then expect the server to call the PruneSyncState method which will
allow us to free up the resources.
Finally, when we go to broadcast messages, we'll send the messages
directly to the peers that have gossipSyncer instances active, so they
can properly be filtered out. For those that don't we'll broadcast
directly, ensuring we skip *all* peers that have an active gossip
syncer.
2018-04-17 05:00:00 +03:00
|
|
|
}
|
|
|
|
|
2018-01-31 07:41:27 +03:00
|
|
|
// isRecentlyRejectedMsg returns true if we recently rejected a message, and
|
|
|
|
// false otherwise, This avoids expensive reprocessing of the message.
|
|
|
|
func (d *AuthenticatedGossiper) isRecentlyRejectedMsg(msg lnwire.Message) bool {
|
|
|
|
d.rejectMtx.RLock()
|
|
|
|
defer d.rejectMtx.RUnlock()
|
|
|
|
|
|
|
|
switch m := msg.(type) {
|
|
|
|
case *lnwire.ChannelUpdate:
|
|
|
|
_, ok := d.recentRejects[m.ShortChannelID.ToUint64()]
|
|
|
|
return ok
|
|
|
|
|
|
|
|
case *lnwire.ChannelAnnouncement:
|
|
|
|
_, ok := d.recentRejects[m.ShortChannelID.ToUint64()]
|
|
|
|
return ok
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-17 07:05:07 +03:00
|
|
|
// retransmitStaleChannels examines all outgoing channels that the source node
|
2017-10-05 06:07:54 +03:00
|
|
|
// is known to maintain to check to see if any of them are "stale". A channel
|
2018-02-07 06:13:07 +03:00
|
|
|
// is stale iff, the last timestamp of its rebroadcast is older then
|
2017-10-05 06:07:54 +03:00
|
|
|
// broadcastInterval.
|
|
|
|
func (d *AuthenticatedGossiper) retransmitStaleChannels() error {
|
|
|
|
// Iterate over all of our channels and check if any of them fall
|
|
|
|
// within the prune interval or re-broadcast interval.
|
|
|
|
type updateTuple struct {
|
|
|
|
info *channeldb.ChannelEdgeInfo
|
|
|
|
edge *channeldb.ChannelEdgePolicy
|
|
|
|
}
|
|
|
|
var edgesToUpdate []updateTuple
|
|
|
|
err := d.cfg.Router.ForAllOutgoingChannels(func(
|
|
|
|
info *channeldb.ChannelEdgeInfo,
|
|
|
|
edge *channeldb.ChannelEdgePolicy) error {
|
|
|
|
|
2018-01-17 07:05:07 +03:00
|
|
|
// If there's no auth proof attached to this edge, it means
|
|
|
|
// that it is a private channel not meant to be announced to
|
|
|
|
// the greater network, so avoid sending channel updates for
|
|
|
|
// this channel to not leak its
|
2017-11-14 04:12:57 +03:00
|
|
|
// existence.
|
|
|
|
if info.AuthProof == nil {
|
|
|
|
log.Debugf("Skipping retransmission of channel "+
|
2018-01-19 01:07:56 +03:00
|
|
|
"without AuthProof: %v", info.ChannelID)
|
2017-11-14 04:12:57 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-01-12 20:59:45 +03:00
|
|
|
// If this edge has a ChannelUpdate that was created before the
|
|
|
|
// introduction of the MaxHTLC field, then we'll update this
|
|
|
|
// edge to propagate this information in the network.
|
2019-01-16 14:43:46 +03:00
|
|
|
if !edge.MessageFlags.HasMaxHtlc() {
|
2019-01-12 20:59:45 +03:00
|
|
|
edgesToUpdate = append(edgesToUpdate, updateTuple{
|
|
|
|
info: info,
|
|
|
|
edge: edge,
|
|
|
|
})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-10-05 06:07:54 +03:00
|
|
|
const broadcastInterval = time.Hour * 24
|
|
|
|
|
|
|
|
timeElapsed := time.Since(edge.LastUpdate)
|
|
|
|
|
|
|
|
// If it's been a full day since we've re-broadcasted the
|
|
|
|
// channel, add the channel to the set of edges we need to
|
|
|
|
// update.
|
2017-11-14 04:12:57 +03:00
|
|
|
if timeElapsed >= broadcastInterval {
|
2017-10-05 06:07:54 +03:00
|
|
|
edgesToUpdate = append(edgesToUpdate, updateTuple{
|
|
|
|
info: info,
|
|
|
|
edge: edge,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
2018-08-29 04:35:24 +03:00
|
|
|
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
|
|
|
|
return fmt.Errorf("unable to retrieve outgoing channels: %v",
|
|
|
|
err)
|
2017-10-05 06:07:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var signedUpdates []lnwire.Message
|
|
|
|
for _, chanToUpdate := range edgesToUpdate {
|
|
|
|
// Re-sign and update the channel on disk and retrieve our
|
|
|
|
// ChannelUpdate to broadcast.
|
2018-04-04 05:46:06 +03:00
|
|
|
chanAnn, chanUpdate, err := d.updateChannel(
|
|
|
|
chanToUpdate.info, chanToUpdate.edge,
|
|
|
|
)
|
2017-10-05 06:07:54 +03:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to update channel: %v", err)
|
|
|
|
}
|
2017-10-05 06:33:03 +03:00
|
|
|
|
|
|
|
// If we have a valid announcement to transmit, then we'll send
|
|
|
|
// that along with the update.
|
|
|
|
if chanAnn != nil {
|
|
|
|
signedUpdates = append(signedUpdates, chanAnn)
|
|
|
|
}
|
|
|
|
|
2017-10-05 06:07:54 +03:00
|
|
|
signedUpdates = append(signedUpdates, chanUpdate)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we don't have any channels to re-broadcast, then we'll exit
|
|
|
|
// early.
|
|
|
|
if len(signedUpdates) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-10-05 06:33:03 +03:00
|
|
|
log.Infof("Retransmitting %v outgoing channels", len(edgesToUpdate))
|
2017-10-05 06:07:54 +03:00
|
|
|
|
|
|
|
// With all the wire announcements properly crafted, we'll broadcast
|
|
|
|
// our known outgoing channels to all our immediate peers.
|
|
|
|
if err := d.cfg.Broadcast(nil, signedUpdates...); err != nil {
|
|
|
|
return fmt.Errorf("unable to re-broadcast channels: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-15 23:56:11 +03:00
|
|
|
// processChanPolicyUpdate generates a new set of channel updates with the new
|
|
|
|
// channel policy applied for each specified channel identified by its channel
|
2018-04-04 05:46:06 +03:00
|
|
|
// point. In the case that no channel points are specified, then the update
|
|
|
|
// will be applied to all channels. Finally, the backing ChannelGraphSource is
|
2017-12-15 23:56:11 +03:00
|
|
|
// updated with the latest information reflecting the applied updates.
|
2017-08-22 09:40:02 +03:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): generalize into generic for any channel update
|
2017-12-15 23:56:11 +03:00
|
|
|
func (d *AuthenticatedGossiper) processChanPolicyUpdate(
|
|
|
|
policyUpdate *chanPolicyUpdateRequest) ([]networkMsg, error) {
|
2017-08-22 09:40:02 +03:00
|
|
|
// First, we'll construct a set of all the channels that need to be
|
|
|
|
// updated.
|
|
|
|
chansToUpdate := make(map[wire.OutPoint]struct{})
|
2017-12-15 23:56:11 +03:00
|
|
|
for _, chanPoint := range policyUpdate.targetChans {
|
2017-08-22 09:40:02 +03:00
|
|
|
chansToUpdate[chanPoint] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
haveChanFilter := len(chansToUpdate) != 0
|
2018-04-04 06:18:42 +03:00
|
|
|
if haveChanFilter {
|
|
|
|
log.Infof("Updating routing policies for chan_points=%v",
|
|
|
|
spew.Sdump(chansToUpdate))
|
|
|
|
} else {
|
|
|
|
log.Infof("Updating routing policies for all chans")
|
|
|
|
}
|
2017-08-22 09:40:02 +03:00
|
|
|
|
2018-04-04 05:46:06 +03:00
|
|
|
type edgeWithInfo struct {
|
|
|
|
info *channeldb.ChannelEdgeInfo
|
|
|
|
edge *channeldb.ChannelEdgePolicy
|
|
|
|
}
|
|
|
|
var edgesToUpdate []edgeWithInfo
|
2017-10-05 05:30:54 +03:00
|
|
|
|
2017-08-22 09:40:02 +03:00
|
|
|
// Next, we'll loop over all the outgoing channels the router knows of.
|
|
|
|
// If we have a filter then we'll only collected those channels,
|
|
|
|
// otherwise we'll collect them all.
|
2018-04-04 05:46:06 +03:00
|
|
|
err := d.cfg.Router.ForAllOutgoingChannels(func(
|
|
|
|
info *channeldb.ChannelEdgeInfo,
|
2017-08-22 09:40:02 +03:00
|
|
|
edge *channeldb.ChannelEdgePolicy) error {
|
|
|
|
|
|
|
|
// If we have a channel filter, and this channel isn't a part
|
|
|
|
// of it, then we'll skip it.
|
|
|
|
if _, ok := chansToUpdate[info.ChannelPoint]; !ok && haveChanFilter {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-04-04 05:46:06 +03:00
|
|
|
// Now that we know we should update this channel, we'll update
|
|
|
|
// its set of policies.
|
2017-12-15 23:56:11 +03:00
|
|
|
edge.FeeBaseMSat = policyUpdate.newSchema.BaseFee
|
2017-08-22 09:40:02 +03:00
|
|
|
edge.FeeProportionalMillionths = lnwire.MilliSatoshi(
|
2017-12-15 23:56:11 +03:00
|
|
|
policyUpdate.newSchema.FeeRate,
|
2017-08-22 09:40:02 +03:00
|
|
|
)
|
2017-12-15 23:56:11 +03:00
|
|
|
edge.TimeLockDelta = uint16(policyUpdate.newSchema.TimeLockDelta)
|
|
|
|
|
2018-04-04 05:46:06 +03:00
|
|
|
edgesToUpdate = append(edgesToUpdate, edgeWithInfo{
|
|
|
|
info: info,
|
|
|
|
edge: edge,
|
|
|
|
})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the set of edges we need to update retrieved, we'll now re-sign
|
|
|
|
// them, and insert them into the database.
|
|
|
|
var chanUpdates []networkMsg
|
|
|
|
for _, edgeInfo := range edgesToUpdate {
|
|
|
|
// Now that we've collected all the channels we need to update,
|
|
|
|
// we'll Re-sign and update the backing ChannelGraphSource, and
|
2017-10-05 05:30:54 +03:00
|
|
|
// retrieve our ChannelUpdate to broadcast.
|
2018-04-04 05:46:06 +03:00
|
|
|
_, chanUpdate, err := d.updateChannel(
|
|
|
|
edgeInfo.info, edgeInfo.edge,
|
|
|
|
)
|
2017-08-22 09:40:02 +03:00
|
|
|
if err != nil {
|
2018-04-04 05:46:06 +03:00
|
|
|
return nil, err
|
2017-08-22 09:40:02 +03:00
|
|
|
}
|
|
|
|
|
2019-04-09 03:54:58 +03:00
|
|
|
// We'll avoid broadcasting any updates for private channels to
|
2019-04-10 03:44:45 +03:00
|
|
|
// avoid directly giving away their existence. Instead, we'll
|
|
|
|
// send the update directly to the remote party.
|
2019-04-09 03:54:58 +03:00
|
|
|
if edgeInfo.info.AuthProof == nil {
|
2019-04-10 03:44:45 +03:00
|
|
|
remotePubKey := remotePubFromChanInfo(
|
|
|
|
edgeInfo.info, chanUpdate.ChannelFlags,
|
|
|
|
)
|
|
|
|
err := d.reliableSender.sendMessage(
|
|
|
|
chanUpdate, remotePubKey,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("Unable to reliably send %v for "+
|
|
|
|
"channel=%v to peer=%x: %v",
|
|
|
|
chanUpdate.MsgType(),
|
|
|
|
chanUpdate.ShortChannelID,
|
|
|
|
remotePubKey, err)
|
|
|
|
}
|
2019-04-09 03:54:58 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-12-26 18:23:05 +03:00
|
|
|
// We set ourselves as the source of this message to indicate
|
|
|
|
// that we shouldn't skip any peers when sending this message.
|
|
|
|
chanUpdates = append(chanUpdates, networkMsg{
|
2018-06-08 06:11:27 +03:00
|
|
|
source: d.selfKey,
|
|
|
|
msg: chanUpdate,
|
2017-12-26 18:23:05 +03:00
|
|
|
})
|
2017-08-22 09:40:02 +03:00
|
|
|
}
|
|
|
|
|
2017-10-05 05:30:54 +03:00
|
|
|
return chanUpdates, nil
|
2017-08-22 09:40:02 +03:00
|
|
|
}
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// processRejectedEdge examines a rejected edge to see if we can extract any
|
2017-12-26 18:28:17 +03:00
|
|
|
// new announcements from it. An edge will get rejected if we already added
|
|
|
|
// the same edge without AuthProof to the graph. If the received announcement
|
|
|
|
// contains a proof, we can add this proof to our edge. We can end up in this
|
2018-02-07 06:11:11 +03:00
|
|
|
// situation in the case where we create a channel, but for some reason fail
|
2017-12-26 18:28:17 +03:00
|
|
|
// to receive the remote peer's proof, while the remote peer is able to fully
|
|
|
|
// assemble the proof and craft the ChannelAnnouncement.
|
2018-08-20 15:28:10 +03:00
|
|
|
func (d *AuthenticatedGossiper) processRejectedEdge(
|
|
|
|
chanAnnMsg *lnwire.ChannelAnnouncement,
|
2017-12-26 18:28:17 +03:00
|
|
|
proof *channeldb.ChannelAuthProof) ([]networkMsg, error) {
|
|
|
|
|
|
|
|
// First, we'll fetch the state of the channel as we know if from the
|
|
|
|
// database.
|
|
|
|
chanInfo, e1, e2, err := d.cfg.Router.GetChannelByID(
|
|
|
|
chanAnnMsg.ShortChannelID,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The edge is in the graph, and has a proof attached, then we'll just
|
|
|
|
// reject it as normal.
|
|
|
|
if chanInfo.AuthProof != nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, this means that the edge is within the graph, but it
|
2018-01-02 11:57:01 +03:00
|
|
|
// doesn't yet have a proper proof attached. If we did not receive
|
|
|
|
// the proof such that we now can add it, there's nothing more we
|
|
|
|
// can do.
|
|
|
|
if proof == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-12-26 18:28:17 +03:00
|
|
|
|
|
|
|
// We'll then create then validate the new fully assembled
|
|
|
|
// announcement.
|
2018-04-17 05:09:11 +03:00
|
|
|
chanAnn, e1Ann, e2Ann, err := CreateChanAnnouncement(
|
2017-12-26 18:28:17 +03:00
|
|
|
proof, chanInfo, e1, e2,
|
|
|
|
)
|
2018-01-31 07:23:14 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-08-15 16:51:01 +03:00
|
|
|
err = routing.ValidateChannelAnn(chanAnn)
|
2017-12-26 18:28:17 +03:00
|
|
|
if err != nil {
|
2018-09-05 07:45:57 +03:00
|
|
|
err := fmt.Errorf("assembled channel announcement proof "+
|
2017-12-26 18:28:17 +03:00
|
|
|
"for shortChanID=%v isn't valid: %v",
|
|
|
|
chanAnnMsg.ShortChannelID, err)
|
|
|
|
log.Error(err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If everything checks out, then we'll add the fully assembled proof
|
|
|
|
// to the database.
|
|
|
|
err = d.cfg.Router.AddProof(chanAnnMsg.ShortChannelID, proof)
|
|
|
|
if err != nil {
|
2018-09-05 07:45:57 +03:00
|
|
|
err := fmt.Errorf("unable add proof to shortChanID=%v: %v",
|
2017-12-26 18:28:17 +03:00
|
|
|
chanAnnMsg.ShortChannelID, err)
|
|
|
|
log.Error(err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// As we now have a complete channel announcement for this channel,
|
|
|
|
// we'll construct the announcement so they can be broadcast out to all
|
|
|
|
// our peers.
|
|
|
|
announcements := make([]networkMsg, 0, 3)
|
|
|
|
announcements = append(announcements, networkMsg{
|
2018-06-08 06:11:27 +03:00
|
|
|
source: d.selfKey,
|
|
|
|
msg: chanAnn,
|
2017-12-26 18:28:17 +03:00
|
|
|
})
|
|
|
|
if e1Ann != nil {
|
|
|
|
announcements = append(announcements, networkMsg{
|
2018-06-08 06:11:27 +03:00
|
|
|
source: d.selfKey,
|
|
|
|
msg: e1Ann,
|
2017-12-26 18:28:17 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
if e2Ann != nil {
|
|
|
|
announcements = append(announcements, networkMsg{
|
2018-06-08 06:11:27 +03:00
|
|
|
source: d.selfKey,
|
|
|
|
msg: e2Ann,
|
2017-12-26 18:28:17 +03:00
|
|
|
})
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return announcements, nil
|
|
|
|
}
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
// processNetworkAnnouncement processes a new network relate authenticated
|
2017-03-28 22:08:14 +03:00
|
|
|
// channel or node announcement or announcements proofs. If the announcement
|
|
|
|
// didn't affect the internal state due to either being out of date, invalid,
|
2017-04-01 15:33:17 +03:00
|
|
|
// or redundant, then nil is returned. Otherwise, the set of announcements will
|
|
|
|
// be returned which should be broadcasted to the rest of the network.
|
2018-08-20 15:28:10 +03:00
|
|
|
func (d *AuthenticatedGossiper) processNetworkAnnouncement(
|
|
|
|
nMsg *networkMsg) []networkMsg {
|
|
|
|
|
2017-03-28 22:08:14 +03:00
|
|
|
isPremature := func(chanID lnwire.ShortChannelID, delta uint32) bool {
|
2017-08-22 09:40:02 +03:00
|
|
|
// TODO(roasbeef) make height delta 6
|
|
|
|
// * or configurable
|
2017-11-30 03:45:08 +03:00
|
|
|
bestHeight := atomic.LoadUint32(&d.bestHeight)
|
|
|
|
return chanID.BlockHeight+delta > bestHeight
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2017-12-26 18:28:17 +03:00
|
|
|
var announcements []networkMsg
|
2017-04-01 15:33:17 +03:00
|
|
|
|
2017-03-28 22:08:14 +03:00
|
|
|
switch msg := nMsg.msg.(type) {
|
2017-04-01 15:33:17 +03:00
|
|
|
|
2017-08-05 04:32:25 +03:00
|
|
|
// A new node announcement has arrived which either presents new
|
|
|
|
// information about a node in one of the channels we know about, or a
|
|
|
|
// updating previously advertised information.
|
2017-03-19 21:40:25 +03:00
|
|
|
case *lnwire.NodeAnnouncement:
|
2018-02-25 06:35:58 +03:00
|
|
|
timestamp := time.Unix(int64(msg.Timestamp), 0)
|
|
|
|
|
2018-10-10 16:05:21 +03:00
|
|
|
// We'll quickly ask the router if it already has a
|
|
|
|
// newer update for this node so we can skip validating
|
|
|
|
// signatures if not required.
|
|
|
|
if d.cfg.Router.IsStaleNode(msg.NodeID, timestamp) {
|
|
|
|
nMsg.err <- nil
|
|
|
|
return nil
|
|
|
|
}
|
2018-02-25 06:35:58 +03:00
|
|
|
|
2018-10-10 16:05:21 +03:00
|
|
|
if err := routing.ValidateNodeAnn(msg); err != nil {
|
|
|
|
err := fmt.Errorf("unable to validate "+
|
|
|
|
"node announcement: %v", err)
|
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2018-08-20 15:28:10 +03:00
|
|
|
features := lnwire.NewFeatureVector(
|
|
|
|
msg.Features, lnwire.GlobalFeatures,
|
|
|
|
)
|
2017-03-19 21:40:25 +03:00
|
|
|
node := &channeldb.LightningNode{
|
2017-07-14 22:41:46 +03:00
|
|
|
HaveNodeAnnouncement: true,
|
2018-02-25 06:35:58 +03:00
|
|
|
LastUpdate: timestamp,
|
2017-07-14 22:41:46 +03:00
|
|
|
Addresses: msg.Addresses,
|
2018-01-31 07:23:14 +03:00
|
|
|
PubKeyBytes: msg.NodeID,
|
2017-07-14 22:41:46 +03:00
|
|
|
Alias: msg.Alias.String(),
|
2018-01-31 07:23:14 +03:00
|
|
|
AuthSigBytes: msg.Signature.ToSignatureBytes(),
|
2017-10-11 21:37:54 +03:00
|
|
|
Features: features,
|
2017-12-03 05:28:26 +03:00
|
|
|
Color: msg.RGBColor,
|
2018-09-01 06:13:54 +03:00
|
|
|
ExtraOpaqueData: msg.ExtraOpaqueData,
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := d.cfg.Router.AddNode(node); err != nil {
|
2017-08-05 04:32:25 +03:00
|
|
|
if routing.IsError(err, routing.ErrOutdated,
|
|
|
|
routing.ErrIgnored) {
|
|
|
|
|
2017-04-14 00:30:41 +03:00
|
|
|
log.Debug(err)
|
2017-03-28 22:08:14 +03:00
|
|
|
} else {
|
2017-04-01 15:33:17 +03:00
|
|
|
log.Error(err)
|
2017-03-28 22:08:14 +03:00
|
|
|
}
|
2017-04-01 15:33:17 +03:00
|
|
|
|
|
|
|
nMsg.err <- err
|
2017-03-28 22:08:14 +03:00
|
|
|
return nil
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2018-10-18 01:49:14 +03:00
|
|
|
// In order to ensure we don't leak unadvertised nodes, we'll
|
|
|
|
// make a quick check to ensure this node intends to publicly
|
|
|
|
// advertise itself to the network.
|
|
|
|
isPublic, err := d.cfg.Router.IsPublicNode(node.PubKeyBytes)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("Unable to determine if node %x is "+
|
|
|
|
"advertised: %v", node.PubKeyBytes, err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If it does, we'll add their announcement to our batch so that
|
|
|
|
// it can be broadcast to the rest of our peers.
|
|
|
|
if isPublic {
|
|
|
|
announcements = append(announcements, networkMsg{
|
|
|
|
peer: nMsg.peer,
|
|
|
|
source: nMsg.source,
|
|
|
|
msg: msg,
|
|
|
|
})
|
2018-11-05 09:57:24 +03:00
|
|
|
} else {
|
|
|
|
log.Tracef("Skipping broadcasting node announcement "+
|
|
|
|
"for %x due to being unadvertised", msg.NodeID)
|
2018-10-18 01:49:14 +03:00
|
|
|
}
|
2017-03-28 22:08:14 +03:00
|
|
|
|
|
|
|
nMsg.err <- nil
|
2017-04-01 15:33:17 +03:00
|
|
|
// TODO(roasbeef): get rid of the above
|
2017-03-28 22:08:14 +03:00
|
|
|
return announcements
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
// A new channel announcement has arrived, this indicates the
|
|
|
|
// *creation* of a new channel within the network. This only advertises
|
|
|
|
// the existence of a channel and not yet the routing policies in
|
|
|
|
// either direction of the channel.
|
|
|
|
case *lnwire.ChannelAnnouncement:
|
2017-09-04 02:41:01 +03:00
|
|
|
// We'll ignore any channel announcements that target any chain
|
|
|
|
// other than the set of chains we know of.
|
|
|
|
if !bytes.Equal(msg.ChainHash[:], d.cfg.ChainHash[:]) {
|
2018-08-20 15:28:09 +03:00
|
|
|
err := fmt.Errorf("Ignoring ChannelAnnouncement from "+
|
2017-09-04 02:41:01 +03:00
|
|
|
"chain=%v, gossiper on chain=%v", msg.ChainHash,
|
|
|
|
d.cfg.ChainHash)
|
2018-08-20 15:28:09 +03:00
|
|
|
log.Errorf(err.Error())
|
|
|
|
|
2018-01-31 07:41:27 +03:00
|
|
|
d.rejectMtx.Lock()
|
|
|
|
d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{}
|
|
|
|
d.rejectMtx.Unlock()
|
2018-08-20 15:28:09 +03:00
|
|
|
|
|
|
|
nMsg.err <- err
|
2017-09-04 02:41:01 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
// If the advertised inclusionary block is beyond our knowledge
|
|
|
|
// of the chain tip, then we'll put the announcement in limbo
|
|
|
|
// to be fully verified once we advance forward in the chain.
|
2017-10-18 05:24:04 +03:00
|
|
|
if nMsg.isRemote && isPremature(msg.ShortChannelID, 0) {
|
2017-03-27 18:22:37 +03:00
|
|
|
blockHeight := msg.ShortChannelID.BlockHeight
|
2018-08-20 15:28:10 +03:00
|
|
|
log.Infof("Announcement for chan_id=(%v), is "+
|
|
|
|
"premature: advertises height %v, only "+
|
|
|
|
"height %v is known",
|
2017-04-24 05:06:33 +03:00
|
|
|
msg.ShortChannelID.ToUint64(),
|
2017-11-30 03:45:08 +03:00
|
|
|
msg.ShortChannelID.BlockHeight,
|
|
|
|
atomic.LoadUint32(&d.bestHeight))
|
2017-03-19 21:40:25 +03:00
|
|
|
|
2017-11-30 03:45:08 +03:00
|
|
|
d.Lock()
|
2017-03-19 21:40:25 +03:00
|
|
|
d.prematureAnnouncements[blockHeight] = append(
|
|
|
|
d.prematureAnnouncements[blockHeight],
|
2017-03-28 22:08:14 +03:00
|
|
|
nMsg,
|
2017-03-19 21:40:25 +03:00
|
|
|
)
|
2017-11-30 03:45:08 +03:00
|
|
|
d.Unlock()
|
2017-03-28 22:08:14 +03:00
|
|
|
return nil
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2019-03-27 23:08:03 +03:00
|
|
|
// At this point, we'll now ask the router if this is a
|
|
|
|
// zombie/known edge. If so we can skip all the processing
|
|
|
|
// below.
|
2018-02-25 06:35:58 +03:00
|
|
|
if d.cfg.Router.IsKnownEdge(msg.ShortChannelID) {
|
|
|
|
nMsg.err <- nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-05 04:32:25 +03:00
|
|
|
// If this is a remote channel announcement, then we'll validate
|
|
|
|
// all the signatures within the proof as it should be well
|
2017-04-01 15:33:17 +03:00
|
|
|
// formed.
|
2017-03-19 21:40:25 +03:00
|
|
|
var proof *channeldb.ChannelAuthProof
|
2017-03-28 22:08:14 +03:00
|
|
|
if nMsg.isRemote {
|
2018-08-15 16:51:01 +03:00
|
|
|
if err := routing.ValidateChannelAnn(msg); err != nil {
|
2018-09-05 07:45:57 +03:00
|
|
|
err := fmt.Errorf("unable to validate "+
|
2017-03-27 20:25:44 +03:00
|
|
|
"announcement: %v", err)
|
2018-01-31 07:41:27 +03:00
|
|
|
d.rejectMtx.Lock()
|
|
|
|
d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{}
|
|
|
|
d.rejectMtx.Unlock()
|
2017-04-01 15:33:17 +03:00
|
|
|
|
2017-03-27 20:25:44 +03:00
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
|
|
|
}
|
2017-03-28 22:08:14 +03:00
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// If the proof checks out, then we'll save the proof
|
|
|
|
// itself to the database so we can fetch it later when
|
|
|
|
// gossiping with other nodes.
|
2017-03-28 22:08:14 +03:00
|
|
|
proof = &channeldb.ChannelAuthProof{
|
2018-01-31 07:23:14 +03:00
|
|
|
NodeSig1Bytes: msg.NodeSig1.ToSignatureBytes(),
|
|
|
|
NodeSig2Bytes: msg.NodeSig2.ToSignatureBytes(),
|
|
|
|
BitcoinSig1Bytes: msg.BitcoinSig1.ToSignatureBytes(),
|
|
|
|
BitcoinSig2Bytes: msg.BitcoinSig2.ToSignatureBytes(),
|
2017-03-28 22:08:14 +03:00
|
|
|
}
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// With the proof validate (if necessary), we can now store it
|
|
|
|
// within the database for our path finding and syncing needs.
|
2017-08-22 09:41:19 +03:00
|
|
|
var featureBuf bytes.Buffer
|
|
|
|
if err := msg.Features.Encode(&featureBuf); err != nil {
|
2017-08-22 11:00:07 +03:00
|
|
|
log.Errorf("unable to encode features: %v", err)
|
2017-08-22 09:41:19 +03:00
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
edge := &channeldb.ChannelEdgeInfo{
|
2018-01-31 07:23:14 +03:00
|
|
|
ChannelID: msg.ShortChannelID.ToUint64(),
|
|
|
|
ChainHash: msg.ChainHash,
|
|
|
|
NodeKey1Bytes: msg.NodeID1,
|
|
|
|
NodeKey2Bytes: msg.NodeID2,
|
|
|
|
BitcoinKey1Bytes: msg.BitcoinKey1,
|
|
|
|
BitcoinKey2Bytes: msg.BitcoinKey2,
|
|
|
|
AuthProof: proof,
|
|
|
|
Features: featureBuf.Bytes(),
|
2018-09-01 06:13:54 +03:00
|
|
|
ExtraOpaqueData: msg.ExtraOpaqueData,
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
2017-07-14 22:41:46 +03:00
|
|
|
|
2019-04-17 23:25:56 +03:00
|
|
|
// If there were any optional message fields provided, we'll
|
|
|
|
// include them in its serialized disk representation now.
|
|
|
|
if nMsg.optionalMsgFields != nil {
|
|
|
|
if nMsg.optionalMsgFields.capacity != nil {
|
|
|
|
edge.Capacity = *nMsg.optionalMsgFields.capacity
|
|
|
|
}
|
|
|
|
if nMsg.optionalMsgFields.channelPoint != nil {
|
|
|
|
edge.ChannelPoint = *nMsg.optionalMsgFields.channelPoint
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-14 22:41:46 +03:00
|
|
|
// We will add the edge to the channel router. If the nodes
|
|
|
|
// present in this channel are not present in the database, a
|
|
|
|
// partial node will be added to represent each node while we
|
|
|
|
// wait for a node announcement.
|
2018-01-23 18:26:15 +03:00
|
|
|
//
|
|
|
|
// Before we add the edge to the database, we obtain
|
|
|
|
// the mutex for this channel ID. We do this to ensure
|
|
|
|
// no other goroutine has read the database and is now
|
|
|
|
// making decisions based on this DB state, before it
|
|
|
|
// writes to the DB.
|
|
|
|
d.channelMtx.Lock(msg.ShortChannelID.ToUint64())
|
|
|
|
defer d.channelMtx.Unlock(msg.ShortChannelID.ToUint64())
|
2017-03-19 21:40:25 +03:00
|
|
|
if err := d.cfg.Router.AddEdge(edge); err != nil {
|
2017-12-26 18:28:17 +03:00
|
|
|
// If the edge was rejected due to already being known,
|
|
|
|
// then it may be that case that this new message has a
|
2018-02-25 06:35:58 +03:00
|
|
|
// fresh channel proof, so we'll check.
|
2017-04-01 15:33:17 +03:00
|
|
|
if routing.IsError(err, routing.ErrOutdated,
|
2017-03-28 22:08:14 +03:00
|
|
|
routing.ErrIgnored) {
|
2017-04-01 15:33:17 +03:00
|
|
|
|
2017-12-26 18:28:17 +03:00
|
|
|
// Attempt to process the rejected message to
|
|
|
|
// see if we get any new announcements.
|
|
|
|
anns, rErr := d.processRejectedEdge(msg, proof)
|
2018-01-02 11:57:01 +03:00
|
|
|
if rErr != nil {
|
2018-01-31 07:41:27 +03:00
|
|
|
d.rejectMtx.Lock()
|
|
|
|
d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{}
|
|
|
|
d.rejectMtx.Unlock()
|
2017-12-26 18:28:17 +03:00
|
|
|
nMsg.err <- rErr
|
2017-11-18 06:21:50 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-26 18:28:17 +03:00
|
|
|
// If while processing this rejected edge, we
|
|
|
|
// realized there's a set of announcements we
|
|
|
|
// could extract, then we'll return those
|
|
|
|
// directly.
|
|
|
|
if len(anns) != 0 {
|
2017-11-18 06:21:50 +03:00
|
|
|
nMsg.err <- nil
|
2017-12-26 18:28:17 +03:00
|
|
|
return anns
|
2017-11-18 06:21:50 +03:00
|
|
|
}
|
|
|
|
|
2018-03-24 01:50:50 +03:00
|
|
|
// Otherwise, this is just a regular rejected
|
|
|
|
// edge.
|
2017-12-26 18:28:17 +03:00
|
|
|
log.Debugf("Router rejected channel "+
|
|
|
|
"edge: %v", err)
|
2017-03-19 21:40:25 +03:00
|
|
|
} else {
|
2018-03-24 01:50:50 +03:00
|
|
|
log.Tracef("Router rejected channel "+
|
2017-12-26 18:28:17 +03:00
|
|
|
"edge: %v", err)
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
2017-04-01 15:33:17 +03:00
|
|
|
|
|
|
|
nMsg.err <- err
|
2017-03-28 22:08:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
2017-03-19 21:40:25 +03:00
|
|
|
|
2017-12-14 19:52:41 +03:00
|
|
|
// If we earlier received any ChannelUpdates for this channel,
|
|
|
|
// we can now process them, as the channel is added to the
|
|
|
|
// graph.
|
|
|
|
shortChanID := msg.ShortChannelID.ToUint64()
|
|
|
|
var channelUpdates []*networkMsg
|
|
|
|
|
|
|
|
d.pChanUpdMtx.Lock()
|
|
|
|
for _, cu := range d.prematureChannelUpdates[shortChanID] {
|
|
|
|
channelUpdates = append(channelUpdates, cu)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now delete the premature ChannelUpdates, since we added them
|
|
|
|
// all to the queue of network messages.
|
|
|
|
delete(d.prematureChannelUpdates, shortChanID)
|
|
|
|
d.pChanUpdMtx.Unlock()
|
|
|
|
|
2017-12-26 18:23:05 +03:00
|
|
|
// Launch a new goroutine to handle each ChannelUpdate, this to
|
|
|
|
// ensure we don't block here, as we can handle only one
|
|
|
|
// announcement at a time.
|
2017-12-14 19:52:41 +03:00
|
|
|
for _, cu := range channelUpdates {
|
2018-08-20 15:28:10 +03:00
|
|
|
d.wg.Add(1)
|
2017-12-14 19:52:41 +03:00
|
|
|
go func(nMsg *networkMsg) {
|
2018-08-20 15:28:10 +03:00
|
|
|
defer d.wg.Done()
|
|
|
|
|
2017-12-14 19:52:41 +03:00
|
|
|
switch msg := nMsg.msg.(type) {
|
|
|
|
|
2018-08-20 15:28:10 +03:00
|
|
|
// Reprocess the message, making sure we return
|
|
|
|
// an error to the original caller in case the
|
|
|
|
// gossiper shuts down.
|
2017-12-14 19:52:41 +03:00
|
|
|
case *lnwire.ChannelUpdate:
|
2018-08-20 15:28:10 +03:00
|
|
|
log.Debugf("Reprocessing"+
|
|
|
|
" ChannelUpdate for "+
|
|
|
|
"shortChanID=%v",
|
|
|
|
msg.ShortChannelID.ToUint64())
|
|
|
|
|
|
|
|
select {
|
|
|
|
case d.networkMsgs <- nMsg:
|
|
|
|
case <-d.quit:
|
|
|
|
nMsg.err <- ErrGossiperShuttingDown
|
2017-12-14 19:52:41 +03:00
|
|
|
}
|
|
|
|
|
2017-12-26 18:23:05 +03:00
|
|
|
// We don't expect any other message type than
|
|
|
|
// ChannelUpdate to be in this map.
|
2017-12-14 19:52:41 +03:00
|
|
|
default:
|
|
|
|
log.Errorf("Unsupported message type "+
|
2018-08-20 15:28:10 +03:00
|
|
|
"found among ChannelUpdates: "+
|
|
|
|
"%T", msg)
|
2017-12-14 19:52:41 +03:00
|
|
|
}
|
|
|
|
}(cu)
|
|
|
|
}
|
|
|
|
|
2017-03-28 22:08:14 +03:00
|
|
|
// Channel announcement was successfully proceeded and know it
|
2017-04-01 15:33:17 +03:00
|
|
|
// might be broadcast to other connected nodes if it was
|
2017-03-28 22:08:14 +03:00
|
|
|
// announcement with proof (remote).
|
|
|
|
if proof != nil {
|
2017-12-26 18:23:05 +03:00
|
|
|
announcements = append(announcements, networkMsg{
|
2018-06-08 06:11:27 +03:00
|
|
|
peer: nMsg.peer,
|
|
|
|
source: nMsg.source,
|
|
|
|
msg: msg,
|
2017-12-26 18:23:05 +03:00
|
|
|
})
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2017-03-28 22:08:14 +03:00
|
|
|
nMsg.err <- nil
|
|
|
|
return announcements
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// A new authenticated channel edge update has arrived. This indicates
|
2017-03-19 21:40:25 +03:00
|
|
|
// that the directional information for an already known channel has
|
|
|
|
// been updated.
|
2017-04-20 02:20:46 +03:00
|
|
|
case *lnwire.ChannelUpdate:
|
2017-09-04 02:41:01 +03:00
|
|
|
// We'll ignore any channel announcements that target any chain
|
|
|
|
// other than the set of chains we know of.
|
|
|
|
if !bytes.Equal(msg.ChainHash[:], d.cfg.ChainHash[:]) {
|
2018-08-20 15:28:09 +03:00
|
|
|
err := fmt.Errorf("Ignoring ChannelUpdate from "+
|
2017-09-04 02:41:01 +03:00
|
|
|
"chain=%v, gossiper on chain=%v", msg.ChainHash,
|
|
|
|
d.cfg.ChainHash)
|
2018-08-20 15:28:09 +03:00
|
|
|
log.Errorf(err.Error())
|
|
|
|
|
2018-01-31 07:41:27 +03:00
|
|
|
d.rejectMtx.Lock()
|
|
|
|
d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{}
|
|
|
|
d.rejectMtx.Unlock()
|
2018-08-20 15:28:09 +03:00
|
|
|
|
|
|
|
nMsg.err <- err
|
2017-09-04 02:41:01 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-28 22:08:14 +03:00
|
|
|
blockHeight := msg.ShortChannelID.BlockHeight
|
|
|
|
shortChanID := msg.ShortChannelID.ToUint64()
|
2017-03-19 21:40:25 +03:00
|
|
|
|
|
|
|
// If the advertised inclusionary block is beyond our knowledge
|
|
|
|
// of the chain tip, then we'll put the announcement in limbo
|
|
|
|
// to be fully verified once we advance forward in the chain.
|
2017-10-18 05:24:04 +03:00
|
|
|
if nMsg.isRemote && isPremature(msg.ShortChannelID, 0) {
|
2017-03-28 22:08:14 +03:00
|
|
|
log.Infof("Update announcement for "+
|
2017-04-01 15:33:17 +03:00
|
|
|
"short_chan_id(%v), is premature: advertises "+
|
2017-03-28 22:08:14 +03:00
|
|
|
"height %v, only height %v is known",
|
2017-11-30 03:45:08 +03:00
|
|
|
shortChanID, blockHeight,
|
|
|
|
atomic.LoadUint32(&d.bestHeight))
|
2017-03-19 21:40:25 +03:00
|
|
|
|
2017-11-30 03:45:08 +03:00
|
|
|
d.Lock()
|
2017-03-19 21:40:25 +03:00
|
|
|
d.prematureAnnouncements[blockHeight] = append(
|
|
|
|
d.prematureAnnouncements[blockHeight],
|
2017-03-28 22:08:14 +03:00
|
|
|
nMsg,
|
2017-03-19 21:40:25 +03:00
|
|
|
)
|
2017-11-30 03:45:08 +03:00
|
|
|
d.Unlock()
|
2017-03-28 22:08:14 +03:00
|
|
|
return nil
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2018-02-25 06:35:58 +03:00
|
|
|
// Before we perform any of the expensive checks below, we'll
|
2019-03-27 23:08:03 +03:00
|
|
|
// check whether this update is stale or is for a zombie
|
|
|
|
// channel in order to quickly reject it.
|
2018-02-25 06:35:58 +03:00
|
|
|
timestamp := time.Unix(int64(msg.Timestamp), 0)
|
|
|
|
if d.cfg.Router.IsStaleEdgePolicy(
|
2019-01-12 20:59:43 +03:00
|
|
|
msg.ShortChannelID, timestamp, msg.ChannelFlags,
|
2018-02-25 06:35:58 +03:00
|
|
|
) {
|
|
|
|
nMsg.err <- nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// Get the node pub key as far as we don't have it in channel
|
|
|
|
// update announcement message. We'll need this to properly
|
|
|
|
// verify message signature.
|
2018-01-23 18:26:15 +03:00
|
|
|
//
|
|
|
|
// We make sure to obtain the mutex for this channel ID
|
2018-02-07 11:30:09 +03:00
|
|
|
// before we access the database. This ensures the state
|
2018-01-23 18:26:15 +03:00
|
|
|
// we read from the database has not changed between this
|
|
|
|
// point and when we call UpdateEdge() later.
|
|
|
|
d.channelMtx.Lock(msg.ShortChannelID.ToUint64())
|
|
|
|
defer d.channelMtx.Unlock(msg.ShortChannelID.ToUint64())
|
2017-03-28 22:08:14 +03:00
|
|
|
chanInfo, _, _, err := d.cfg.Router.GetChannelByID(msg.ShortChannelID)
|
2019-03-27 23:08:03 +03:00
|
|
|
switch err {
|
|
|
|
// No error, break.
|
|
|
|
case nil:
|
|
|
|
break
|
|
|
|
|
|
|
|
case channeldb.ErrZombieEdge:
|
|
|
|
// Since we've deemed the update as not stale above,
|
|
|
|
// before marking it live, we'll make sure it has been
|
|
|
|
// signed by the correct party. The least-significant
|
|
|
|
// bit in the flag on the channel update tells us which
|
|
|
|
// edge is being updated.
|
|
|
|
var pubKey *btcec.PublicKey
|
|
|
|
switch {
|
|
|
|
case msg.ChannelFlags&lnwire.ChanUpdateDirection == 0:
|
|
|
|
pubKey, _ = chanInfo.NodeKey1()
|
|
|
|
case msg.ChannelFlags&lnwire.ChanUpdateDirection == 1:
|
|
|
|
pubKey, _ = chanInfo.NodeKey2()
|
|
|
|
}
|
2018-04-28 02:31:15 +03:00
|
|
|
|
2019-03-27 23:08:03 +03:00
|
|
|
err := routing.VerifyChannelUpdateSignature(msg, pubKey)
|
|
|
|
if err != nil {
|
|
|
|
err := fmt.Errorf("unable to verify channel "+
|
|
|
|
"update signature: %v", err)
|
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
2017-12-14 19:52:41 +03:00
|
|
|
return nil
|
2019-03-27 23:08:03 +03:00
|
|
|
}
|
2018-08-20 15:28:10 +03:00
|
|
|
|
2019-03-27 23:08:03 +03:00
|
|
|
// With the signature valid, we'll proceed to mark the
|
|
|
|
// edge as live and wait for the channel announcement to
|
|
|
|
// come through again.
|
|
|
|
err = d.cfg.Router.MarkEdgeLive(msg.ShortChannelID)
|
|
|
|
if err != nil {
|
|
|
|
err := fmt.Errorf("unable to remove edge with "+
|
|
|
|
"chan_id=%v from zombie index: %v",
|
|
|
|
msg.ShortChannelID, err)
|
2017-12-14 19:52:41 +03:00
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
|
|
|
}
|
2019-03-27 23:08:03 +03:00
|
|
|
|
|
|
|
log.Debugf("Removed edge with chan_id=%v from zombie "+
|
|
|
|
"index", msg.ShortChannelID)
|
|
|
|
|
|
|
|
// We'll fallthrough to ensure we stash the update until
|
|
|
|
// we receive its corresponding ChannelAnnouncement.
|
|
|
|
// This is needed to ensure the edge exists in the graph
|
|
|
|
// before applying the update.
|
|
|
|
fallthrough
|
|
|
|
case channeldb.ErrGraphNotFound:
|
|
|
|
fallthrough
|
|
|
|
case channeldb.ErrGraphNoEdgesFound:
|
|
|
|
fallthrough
|
|
|
|
case channeldb.ErrEdgeNotFound:
|
|
|
|
// If the edge corresponding to this ChannelUpdate was
|
|
|
|
// not found in the graph, this might be a channel in
|
|
|
|
// the process of being opened, and we haven't processed
|
|
|
|
// our own ChannelAnnouncement yet, hence it is not
|
|
|
|
// found in the graph. This usually gets resolved after
|
|
|
|
// the channel proofs are exchanged and the channel is
|
|
|
|
// broadcasted to the rest of the network, but in case
|
|
|
|
// this is a private channel this won't ever happen.
|
|
|
|
// This can also happen in the case of a zombie channel
|
|
|
|
// with a fresh update for which we don't have a
|
|
|
|
// ChannelAnnouncement for since we reject them. Because
|
|
|
|
// of this, we temporarily add it to a map, and
|
|
|
|
// reprocess it after our own ChannelAnnouncement has
|
|
|
|
// been processed.
|
|
|
|
d.pChanUpdMtx.Lock()
|
|
|
|
d.prematureChannelUpdates[shortChanID] = append(
|
|
|
|
d.prematureChannelUpdates[shortChanID], nMsg,
|
|
|
|
)
|
|
|
|
d.pChanUpdMtx.Unlock()
|
|
|
|
|
|
|
|
log.Debugf("Got ChannelUpdate for edge not found in "+
|
|
|
|
"graph(shortChanID=%v), saving for "+
|
|
|
|
"reprocessing later", shortChanID)
|
|
|
|
|
|
|
|
// NOTE: We don't return anything on the error channel
|
|
|
|
// for this message, as we expect that will be done when
|
|
|
|
// this ChannelUpdate is later reprocessed.
|
|
|
|
return nil
|
|
|
|
|
|
|
|
default:
|
|
|
|
err := fmt.Errorf("unable to validate channel update "+
|
|
|
|
"short_chan_id=%v: %v", shortChanID, err)
|
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
|
|
|
|
|
|
|
d.rejectMtx.Lock()
|
|
|
|
d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{}
|
|
|
|
d.rejectMtx.Unlock()
|
|
|
|
return nil
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2017-12-01 09:37:16 +03:00
|
|
|
// The least-significant bit in the flag on the channel update
|
|
|
|
// announcement tells us "which" side of the channels directed
|
|
|
|
// edge is being updated.
|
2017-03-27 20:25:44 +03:00
|
|
|
var pubKey *btcec.PublicKey
|
2017-12-01 09:37:16 +03:00
|
|
|
switch {
|
2019-01-12 20:59:43 +03:00
|
|
|
case msg.ChannelFlags&lnwire.ChanUpdateDirection == 0:
|
2018-01-31 07:23:14 +03:00
|
|
|
pubKey, _ = chanInfo.NodeKey1()
|
2019-01-12 20:59:43 +03:00
|
|
|
case msg.ChannelFlags&lnwire.ChanUpdateDirection == 1:
|
2018-01-31 07:23:14 +03:00
|
|
|
pubKey, _ = chanInfo.NodeKey2()
|
2017-03-27 20:25:44 +03:00
|
|
|
}
|
|
|
|
|
2019-01-12 20:59:43 +03:00
|
|
|
// Validate the channel announcement with the expected public key and
|
|
|
|
// channel capacity. In the case of an invalid channel update, we'll
|
|
|
|
// return an error to the caller and exit early.
|
|
|
|
err = routing.ValidateChannelUpdateAnn(pubKey, chanInfo.Capacity, msg)
|
|
|
|
if err != nil {
|
2018-09-05 07:45:57 +03:00
|
|
|
rErr := fmt.Errorf("unable to validate channel "+
|
2017-04-01 15:33:17 +03:00
|
|
|
"update announcement for short_chan_id=%v: %v",
|
|
|
|
spew.Sdump(msg.ShortChannelID), err)
|
|
|
|
|
2017-08-23 06:12:43 +03:00
|
|
|
log.Error(rErr)
|
|
|
|
nMsg.err <- rErr
|
2017-03-27 20:25:44 +03:00
|
|
|
return nil
|
|
|
|
}
|
2017-03-28 22:08:14 +03:00
|
|
|
|
2017-03-19 21:40:25 +03:00
|
|
|
update := &channeldb.ChannelEdgePolicy{
|
2018-01-31 07:23:14 +03:00
|
|
|
SigBytes: msg.Signature.ToSignatureBytes(),
|
2017-03-28 22:08:14 +03:00
|
|
|
ChannelID: shortChanID,
|
2018-02-25 06:35:58 +03:00
|
|
|
LastUpdate: timestamp,
|
2019-01-12 20:59:43 +03:00
|
|
|
MessageFlags: msg.MessageFlags,
|
|
|
|
ChannelFlags: msg.ChannelFlags,
|
2017-03-19 21:40:25 +03:00
|
|
|
TimeLockDelta: msg.TimeLockDelta,
|
2017-08-22 09:40:02 +03:00
|
|
|
MinHTLC: msg.HtlcMinimumMsat,
|
2019-01-12 20:59:44 +03:00
|
|
|
MaxHTLC: msg.HtlcMaximumMsat,
|
2017-08-22 09:40:02 +03:00
|
|
|
FeeBaseMSat: lnwire.MilliSatoshi(msg.BaseFee),
|
|
|
|
FeeProportionalMillionths: lnwire.MilliSatoshi(msg.FeeRate),
|
2018-09-01 06:13:54 +03:00
|
|
|
ExtraOpaqueData: msg.ExtraOpaqueData,
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := d.cfg.Router.UpdateEdge(update); err != nil {
|
2018-08-20 15:28:10 +03:00
|
|
|
if routing.IsError(err, routing.ErrOutdated,
|
|
|
|
routing.ErrIgnored) {
|
2017-04-14 00:30:41 +03:00
|
|
|
log.Debug(err)
|
2017-03-28 22:08:14 +03:00
|
|
|
} else {
|
2018-01-31 07:41:27 +03:00
|
|
|
d.rejectMtx.Lock()
|
|
|
|
d.recentRejects[msg.ShortChannelID.ToUint64()] = struct{}{}
|
|
|
|
d.rejectMtx.Unlock()
|
2017-04-01 15:33:17 +03:00
|
|
|
log.Error(err)
|
2017-03-28 22:08:14 +03:00
|
|
|
}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
nMsg.err <- err
|
2017-03-28 22:08:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-14 19:52:41 +03:00
|
|
|
// If this is a local ChannelUpdate without an AuthProof, it
|
|
|
|
// means it is an update to a channel that is not (yet)
|
|
|
|
// supposed to be announced to the greater network. However,
|
|
|
|
// our channel counter party will need to be given the update,
|
|
|
|
// so we'll try sending the update directly to the remote peer.
|
2017-11-14 04:12:57 +03:00
|
|
|
if !nMsg.isRemote && chanInfo.AuthProof == nil {
|
|
|
|
// Get our peer's public key.
|
2019-04-10 03:44:45 +03:00
|
|
|
remotePubKey := remotePubFromChanInfo(
|
|
|
|
chanInfo, msg.ChannelFlags,
|
|
|
|
)
|
2017-11-14 02:51:56 +03:00
|
|
|
|
2019-02-06 04:19:04 +03:00
|
|
|
// Now, we'll attempt to send the channel update message
|
|
|
|
// reliably to the remote peer in the background, so
|
|
|
|
// that we don't block if the peer happens to be offline
|
|
|
|
// at the moment.
|
|
|
|
err := d.reliableSender.sendMessage(msg, remotePubKey)
|
2018-06-08 06:11:27 +03:00
|
|
|
if err != nil {
|
2019-02-06 04:19:04 +03:00
|
|
|
err := fmt.Errorf("unable to reliably send %v "+
|
|
|
|
"for channel=%v to peer=%x: %v",
|
|
|
|
msg.MsgType(), msg.ShortChannelID,
|
|
|
|
remotePubKey, err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
2017-11-14 02:51:56 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// Channel update announcement was successfully processed and
|
|
|
|
// now it can be broadcast to the rest of the network. However,
|
|
|
|
// we'll only broadcast the channel update announcement if it
|
|
|
|
// has an attached authentication proof.
|
2017-03-28 22:08:14 +03:00
|
|
|
if chanInfo.AuthProof != nil {
|
2017-12-26 18:23:05 +03:00
|
|
|
announcements = append(announcements, networkMsg{
|
2018-06-08 06:11:27 +03:00
|
|
|
peer: nMsg.peer,
|
|
|
|
source: nMsg.source,
|
|
|
|
msg: msg,
|
2017-12-26 18:23:05 +03:00
|
|
|
})
|
2017-03-28 22:08:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
nMsg.err <- nil
|
|
|
|
return announcements
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// A new signature announcement has been received. This indicates
|
|
|
|
// willingness of nodes involved in the funding of a channel to
|
|
|
|
// announce this new channel to the rest of the world.
|
2017-03-28 22:08:14 +03:00
|
|
|
case *lnwire.AnnounceSignatures:
|
2018-08-20 15:28:10 +03:00
|
|
|
needBlockHeight := msg.ShortChannelID.BlockHeight +
|
|
|
|
d.cfg.ProofMatureDelta
|
2017-03-28 22:08:14 +03:00
|
|
|
shortChanID := msg.ShortChannelID.ToUint64()
|
2017-04-01 15:33:17 +03:00
|
|
|
|
2017-03-28 22:08:14 +03:00
|
|
|
prefix := "local"
|
|
|
|
if nMsg.isRemote {
|
|
|
|
prefix = "remote"
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2017-11-14 04:12:57 +03:00
|
|
|
log.Infof("Received new %v channel announcement: %v", prefix,
|
|
|
|
spew.Sdump(msg))
|
2017-04-01 15:33:17 +03:00
|
|
|
|
|
|
|
// By the specification, channel announcement proofs should be
|
|
|
|
// sent after some number of confirmations after channel was
|
|
|
|
// registered in bitcoin blockchain. Therefore, we check if the
|
|
|
|
// proof is premature. If so we'll halt processing until the
|
|
|
|
// expected announcement height. This allows us to be tolerant
|
|
|
|
// to other clients if this constraint was changed.
|
2017-03-28 22:08:14 +03:00
|
|
|
if isPremature(msg.ShortChannelID, d.cfg.ProofMatureDelta) {
|
2017-11-30 03:45:08 +03:00
|
|
|
d.Lock()
|
2017-03-28 22:08:14 +03:00
|
|
|
d.prematureAnnouncements[needBlockHeight] = append(
|
|
|
|
d.prematureAnnouncements[needBlockHeight],
|
|
|
|
nMsg,
|
|
|
|
)
|
2017-11-30 03:45:08 +03:00
|
|
|
d.Unlock()
|
2017-04-01 15:33:17 +03:00
|
|
|
log.Infof("Premature proof announcement, "+
|
2017-03-28 22:08:14 +03:00
|
|
|
"current block height lower than needed: %v <"+
|
|
|
|
" %v, add announcement to reprocessing batch",
|
2017-11-30 03:45:08 +03:00
|
|
|
atomic.LoadUint32(&d.bestHeight), needBlockHeight)
|
2017-03-28 22:08:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// Ensure that we know of a channel with the target channel ID
|
|
|
|
// before proceeding further.
|
2018-01-23 18:26:15 +03:00
|
|
|
//
|
|
|
|
// We must acquire the mutex for this channel ID before getting
|
|
|
|
// the channel from the database, to ensure what we read does
|
|
|
|
// not change before we call AddProof() later.
|
|
|
|
d.channelMtx.Lock(msg.ShortChannelID.ToUint64())
|
|
|
|
defer d.channelMtx.Unlock(msg.ShortChannelID.ToUint64())
|
2018-08-20 15:28:10 +03:00
|
|
|
|
2017-11-18 06:21:50 +03:00
|
|
|
chanInfo, e1, e2, err := d.cfg.Router.GetChannelByID(
|
|
|
|
msg.ShortChannelID)
|
2017-03-28 22:08:14 +03:00
|
|
|
if err != nil {
|
2017-04-26 05:04:53 +03:00
|
|
|
// TODO(andrew.shvv) this is dangerous because remote
|
|
|
|
// node might rewrite the waiting proof.
|
2017-05-05 20:17:31 +03:00
|
|
|
proof := channeldb.NewWaitingProof(nMsg.isRemote, msg)
|
2019-02-06 04:18:34 +03:00
|
|
|
err := d.cfg.WaitingProofStore.Add(proof)
|
|
|
|
if err != nil {
|
2018-09-05 07:45:57 +03:00
|
|
|
err := fmt.Errorf("unable to store "+
|
2017-05-05 20:17:31 +03:00
|
|
|
"the proof for short_chan_id=%v: %v",
|
|
|
|
shortChanID, err)
|
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
|
|
|
}
|
2017-04-26 05:04:53 +03:00
|
|
|
|
|
|
|
log.Infof("Orphan %v proof announcement with "+
|
|
|
|
"short_chan_id=%v, adding"+
|
|
|
|
"to waiting batch", prefix, shortChanID)
|
|
|
|
nMsg.err <- nil
|
2017-03-28 22:08:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-06-08 06:11:27 +03:00
|
|
|
nodeID := nMsg.source.SerializeCompressed()
|
|
|
|
isFirstNode := bytes.Equal(nodeID, chanInfo.NodeKey1Bytes[:])
|
|
|
|
isSecondNode := bytes.Equal(nodeID, chanInfo.NodeKey2Bytes[:])
|
2017-03-28 22:08:14 +03:00
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// Ensure that channel that was retrieved belongs to the peer
|
|
|
|
// which sent the proof announcement.
|
2017-03-28 22:08:14 +03:00
|
|
|
if !(isFirstNode || isSecondNode) {
|
2018-09-05 07:45:57 +03:00
|
|
|
err := fmt.Errorf("channel that was received not "+
|
2017-03-28 22:08:14 +03:00
|
|
|
"belongs to the peer which sent the proof, "+
|
2017-04-01 15:33:17 +03:00
|
|
|
"short_chan_id=%v", shortChanID)
|
2017-03-28 22:08:14 +03:00
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-18 06:21:50 +03:00
|
|
|
// If proof was sent by a local sub-system, then we'll
|
|
|
|
// send the announcement signature to the remote node
|
|
|
|
// so they can also reconstruct the full channel
|
|
|
|
// announcement.
|
|
|
|
if !nMsg.isRemote {
|
2019-02-06 04:18:56 +03:00
|
|
|
var remotePubKey [33]byte
|
2017-11-18 06:21:50 +03:00
|
|
|
if isFirstNode {
|
2019-02-06 04:18:56 +03:00
|
|
|
remotePubKey = chanInfo.NodeKey2Bytes
|
2017-11-18 06:21:50 +03:00
|
|
|
} else {
|
2019-02-06 04:18:56 +03:00
|
|
|
remotePubKey = chanInfo.NodeKey1Bytes
|
2017-11-18 06:21:50 +03:00
|
|
|
}
|
|
|
|
// Since the remote peer might not be online
|
|
|
|
// we'll call a method that will attempt to
|
|
|
|
// deliver the proof when it comes online.
|
2019-02-06 04:18:56 +03:00
|
|
|
err := d.reliableSender.sendMessage(msg, remotePubKey)
|
2018-08-20 15:28:10 +03:00
|
|
|
if err != nil {
|
2019-02-06 04:18:56 +03:00
|
|
|
err := fmt.Errorf("unable to reliably send %v "+
|
|
|
|
"for channel=%v to peer=%x: %v",
|
|
|
|
msg.MsgType(), msg.ShortChannelID,
|
|
|
|
remotePubKey, err)
|
2017-11-18 06:21:50 +03:00
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we already have the full proof for this channel.
|
|
|
|
if chanInfo.AuthProof != nil {
|
|
|
|
// If we already have the fully assembled proof, then
|
|
|
|
// the peer sending us their proof has probably not
|
|
|
|
// received our local proof yet. So be kind and send
|
|
|
|
// them the full proof.
|
|
|
|
if nMsg.isRemote {
|
2018-06-08 06:11:27 +03:00
|
|
|
peerID := nMsg.source.SerializeCompressed()
|
2017-11-18 06:21:50 +03:00
|
|
|
log.Debugf("Got AnnounceSignatures for " +
|
|
|
|
"channel with full proof.")
|
|
|
|
|
|
|
|
d.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer d.wg.Done()
|
|
|
|
log.Debugf("Received half proof for "+
|
|
|
|
"channel %v with existing "+
|
|
|
|
"full proof. Sending full "+
|
|
|
|
"proof to peer=%x",
|
|
|
|
msg.ChannelID,
|
|
|
|
peerID)
|
|
|
|
|
2018-04-17 05:09:11 +03:00
|
|
|
chanAnn, _, _, err := CreateChanAnnouncement(
|
2018-08-20 15:28:10 +03:00
|
|
|
chanInfo.AuthProof, chanInfo,
|
|
|
|
e1, e2,
|
2018-01-31 07:23:14 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
2018-08-20 15:28:10 +03:00
|
|
|
log.Errorf("unable to gen "+
|
|
|
|
"ann: %v", err)
|
2018-01-31 07:23:14 +03:00
|
|
|
return
|
|
|
|
}
|
2018-08-20 15:28:10 +03:00
|
|
|
err = nMsg.peer.SendMessage(
|
|
|
|
false, chanAnn,
|
|
|
|
)
|
2017-11-18 06:21:50 +03:00
|
|
|
if err != nil {
|
|
|
|
log.Errorf("Failed sending "+
|
|
|
|
"full proof to "+
|
|
|
|
"peer=%x: %v",
|
|
|
|
peerID, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
log.Debugf("Full proof sent to peer=%x"+
|
2018-08-20 15:28:10 +03:00
|
|
|
" for chanID=%v", peerID,
|
|
|
|
msg.ChannelID)
|
2017-11-18 06:21:50 +03:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debugf("Already have proof for channel "+
|
|
|
|
"with chanID=%v", msg.ChannelID)
|
|
|
|
nMsg.err <- nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// Check that we received the opposite proof. If so, then we're
|
|
|
|
// now able to construct the full proof, and create the channel
|
2017-03-28 22:08:14 +03:00
|
|
|
// announcement. If we didn't receive the opposite half of the
|
2017-04-01 15:33:17 +03:00
|
|
|
// proof than we should store it this one, and wait for
|
|
|
|
// opposite to be received.
|
2017-05-05 20:17:31 +03:00
|
|
|
proof := channeldb.NewWaitingProof(nMsg.isRemote, msg)
|
2019-02-06 04:18:34 +03:00
|
|
|
oppositeProof, err := d.cfg.WaitingProofStore.Get(
|
|
|
|
proof.OppositeKey(),
|
|
|
|
)
|
2017-05-05 20:17:31 +03:00
|
|
|
if err != nil && err != channeldb.ErrWaitingProofNotFound {
|
2018-09-05 07:45:57 +03:00
|
|
|
err := fmt.Errorf("unable to get "+
|
2017-05-05 20:17:31 +03:00
|
|
|
"the opposite proof for short_chan_id=%v: %v",
|
|
|
|
shortChanID, err)
|
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err == channeldb.ErrWaitingProofNotFound {
|
2019-02-06 04:18:34 +03:00
|
|
|
err := d.cfg.WaitingProofStore.Add(proof)
|
|
|
|
if err != nil {
|
2018-09-05 07:45:57 +03:00
|
|
|
err := fmt.Errorf("unable to store "+
|
2017-05-05 20:17:31 +03:00
|
|
|
"the proof for short_chan_id=%v: %v",
|
|
|
|
shortChanID, err)
|
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
|
|
|
}
|
2017-03-28 22:08:14 +03:00
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
log.Infof("1/2 of channel ann proof received for "+
|
|
|
|
"short_chan_id=%v, waiting for other half",
|
|
|
|
shortChanID)
|
2017-03-28 22:08:14 +03:00
|
|
|
|
|
|
|
nMsg.err <- nil
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-18 06:21:50 +03:00
|
|
|
// We now have both halves of the channel announcement proof,
|
|
|
|
// then we'll reconstruct the initial announcement so we can
|
|
|
|
// validate it shortly below.
|
2017-03-28 22:08:14 +03:00
|
|
|
var dbProof channeldb.ChannelAuthProof
|
|
|
|
if isFirstNode {
|
2018-01-31 07:23:14 +03:00
|
|
|
dbProof.NodeSig1Bytes = msg.NodeSignature.ToSignatureBytes()
|
|
|
|
dbProof.NodeSig2Bytes = oppositeProof.NodeSignature.ToSignatureBytes()
|
|
|
|
dbProof.BitcoinSig1Bytes = msg.BitcoinSignature.ToSignatureBytes()
|
|
|
|
dbProof.BitcoinSig2Bytes = oppositeProof.BitcoinSignature.ToSignatureBytes()
|
2017-03-28 22:08:14 +03:00
|
|
|
} else {
|
2018-01-31 07:23:14 +03:00
|
|
|
dbProof.NodeSig1Bytes = oppositeProof.NodeSignature.ToSignatureBytes()
|
|
|
|
dbProof.NodeSig2Bytes = msg.NodeSignature.ToSignatureBytes()
|
|
|
|
dbProof.BitcoinSig1Bytes = oppositeProof.BitcoinSignature.ToSignatureBytes()
|
|
|
|
dbProof.BitcoinSig2Bytes = msg.BitcoinSignature.ToSignatureBytes()
|
|
|
|
}
|
2018-08-20 15:28:10 +03:00
|
|
|
chanAnn, e1Ann, e2Ann, err := CreateChanAnnouncement(
|
|
|
|
&dbProof, chanInfo, e1, e2,
|
|
|
|
)
|
2018-01-31 07:23:14 +03:00
|
|
|
if err != nil {
|
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
2017-03-28 22:08:14 +03:00
|
|
|
}
|
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// With all the necessary components assembled validate the
|
|
|
|
// full channel announcement proof.
|
2018-08-15 16:51:01 +03:00
|
|
|
if err := routing.ValidateChannelAnn(chanAnn); err != nil {
|
2018-09-05 07:45:57 +03:00
|
|
|
err := fmt.Errorf("channel announcement proof "+
|
2017-04-01 15:33:17 +03:00
|
|
|
"for short_chan_id=%v isn't valid: %v",
|
2017-03-27 20:25:44 +03:00
|
|
|
shortChanID, err)
|
2017-04-01 15:33:17 +03:00
|
|
|
|
2017-03-27 20:25:44 +03:00
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
|
|
|
}
|
2017-03-28 22:08:14 +03:00
|
|
|
|
|
|
|
// If the channel was returned by the router it means that
|
|
|
|
// existence of funding point and inclusion of nodes bitcoin
|
2017-04-01 15:33:17 +03:00
|
|
|
// keys in it already checked by the router. In this stage we
|
|
|
|
// should check that node keys are attest to the bitcoin keys
|
|
|
|
// by validating the signatures of announcement. If proof is
|
|
|
|
// valid then we'll populate the channel edge with it, so we
|
|
|
|
// can announce it on peer connect.
|
2017-03-28 22:08:14 +03:00
|
|
|
err = d.cfg.Router.AddProof(msg.ShortChannelID, &dbProof)
|
|
|
|
if err != nil {
|
2018-09-05 07:45:57 +03:00
|
|
|
err := fmt.Errorf("unable add proof to the "+
|
2017-03-28 22:08:14 +03:00
|
|
|
"channel chanID=%v: %v", msg.ChannelID, err)
|
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
|
|
|
}
|
2017-05-05 20:17:31 +03:00
|
|
|
|
2019-02-06 04:18:34 +03:00
|
|
|
err = d.cfg.WaitingProofStore.Remove(proof.OppositeKey())
|
2018-08-20 15:28:10 +03:00
|
|
|
if err != nil {
|
2018-09-05 07:45:57 +03:00
|
|
|
err := fmt.Errorf("unable remove opposite proof "+
|
2018-08-20 15:28:10 +03:00
|
|
|
"for the channel with chanID=%v: %v",
|
|
|
|
msg.ChannelID, err)
|
2017-05-05 20:17:31 +03:00
|
|
|
log.Error(err)
|
|
|
|
nMsg.err <- err
|
|
|
|
return nil
|
|
|
|
}
|
2017-03-28 22:08:14 +03:00
|
|
|
|
|
|
|
// Proof was successfully created and now can announce the
|
|
|
|
// channel to the remain network.
|
2017-04-01 15:33:17 +03:00
|
|
|
log.Infof("Fully valid channel proof for short_chan_id=%v "+
|
|
|
|
"constructed, adding to next ann batch",
|
|
|
|
shortChanID)
|
2017-03-28 22:08:14 +03:00
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
// Assemble the necessary announcements to add to the next
|
|
|
|
// broadcasting batch.
|
2017-12-26 18:23:05 +03:00
|
|
|
announcements = append(announcements, networkMsg{
|
2018-06-08 06:11:27 +03:00
|
|
|
peer: nMsg.peer,
|
|
|
|
source: nMsg.source,
|
|
|
|
msg: chanAnn,
|
2017-12-26 18:23:05 +03:00
|
|
|
})
|
2017-03-28 22:08:14 +03:00
|
|
|
if e1Ann != nil {
|
2017-12-26 18:23:05 +03:00
|
|
|
announcements = append(announcements, networkMsg{
|
2018-06-08 06:11:27 +03:00
|
|
|
peer: nMsg.peer,
|
|
|
|
source: nMsg.source,
|
|
|
|
msg: e1Ann,
|
2017-12-26 18:23:05 +03:00
|
|
|
})
|
2017-03-28 22:08:14 +03:00
|
|
|
}
|
|
|
|
if e2Ann != nil {
|
2017-12-26 18:23:05 +03:00
|
|
|
announcements = append(announcements, networkMsg{
|
2018-06-08 06:11:27 +03:00
|
|
|
peer: nMsg.peer,
|
|
|
|
source: nMsg.source,
|
|
|
|
msg: e2Ann,
|
2017-12-26 18:23:05 +03:00
|
|
|
})
|
2017-03-28 22:08:14 +03:00
|
|
|
}
|
|
|
|
|
2018-11-05 09:59:38 +03:00
|
|
|
// We'll also send along the node announcements for each channel
|
|
|
|
// participant if we know of them.
|
|
|
|
node1Ann, err := d.fetchNodeAnn(chanInfo.NodeKey1Bytes)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("Unable to fetch node announcement for "+
|
|
|
|
"%x: %v", chanInfo.NodeKey1Bytes, err)
|
|
|
|
} else {
|
|
|
|
announcements = append(announcements, networkMsg{
|
|
|
|
peer: nMsg.peer,
|
|
|
|
source: nMsg.source,
|
|
|
|
msg: node1Ann,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
node2Ann, err := d.fetchNodeAnn(chanInfo.NodeKey2Bytes)
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("Unable to fetch node announcement for "+
|
|
|
|
"%x: %v", chanInfo.NodeKey2Bytes, err)
|
|
|
|
} else {
|
|
|
|
announcements = append(announcements, networkMsg{
|
|
|
|
peer: nMsg.peer,
|
|
|
|
source: nMsg.source,
|
|
|
|
msg: node2Ann,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-03-28 22:08:14 +03:00
|
|
|
nMsg.err <- nil
|
|
|
|
return announcements
|
|
|
|
|
|
|
|
default:
|
|
|
|
nMsg.err <- errors.New("wrong type of the announcement")
|
|
|
|
return nil
|
|
|
|
}
|
2017-03-19 21:40:25 +03:00
|
|
|
}
|
|
|
|
|
2018-11-05 09:59:38 +03:00
|
|
|
// fetchNodeAnn fetches the latest signed node announcement from our point of
|
|
|
|
// view for the node with the given public key.
|
|
|
|
func (d *AuthenticatedGossiper) fetchNodeAnn(
|
|
|
|
pubKey [33]byte) (*lnwire.NodeAnnouncement, error) {
|
|
|
|
|
|
|
|
node, err := d.cfg.Router.FetchLightningNode(pubKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return node.NodeAnnouncement(true)
|
|
|
|
}
|
|
|
|
|
2019-02-06 04:18:56 +03:00
|
|
|
// isMsgStale determines whether a message retrieved from the backing
|
|
|
|
// MessageStore is seen as stale by the current graph.
|
|
|
|
func (d *AuthenticatedGossiper) isMsgStale(msg lnwire.Message) bool {
|
|
|
|
switch msg := msg.(type) {
|
|
|
|
case *lnwire.AnnounceSignatures:
|
|
|
|
chanInfo, _, _, err := d.cfg.Router.GetChannelByID(
|
|
|
|
msg.ShortChannelID,
|
|
|
|
)
|
2017-11-18 06:21:50 +03:00
|
|
|
|
2019-02-06 04:18:56 +03:00
|
|
|
// If the channel cannot be found, it is most likely a leftover
|
|
|
|
// message for a channel that was closed, so we can consider it
|
|
|
|
// stale.
|
|
|
|
if err == channeldb.ErrEdgeNotFound {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
log.Debugf("Unable to retrieve channel=%v from graph: "+
|
|
|
|
"%v", err)
|
|
|
|
return false
|
|
|
|
}
|
2017-11-18 06:21:50 +03:00
|
|
|
|
2019-02-06 04:18:56 +03:00
|
|
|
// If the proof exists in the graph, then we have successfully
|
|
|
|
// received the remote proof and assembled the full proof, so we
|
|
|
|
// can safely delete the local proof from the database.
|
|
|
|
return chanInfo.AuthProof != nil
|
2018-07-18 02:43:06 +03:00
|
|
|
|
2019-02-06 04:18:56 +03:00
|
|
|
case *lnwire.ChannelUpdate:
|
2019-03-13 23:36:21 +03:00
|
|
|
_, p1, p2, err := d.cfg.Router.GetChannelByID(msg.ShortChannelID)
|
|
|
|
|
|
|
|
// If the channel cannot be found, it is most likely a leftover
|
|
|
|
// message for a channel that was closed, so we can consider it
|
|
|
|
// stale.
|
|
|
|
if err == channeldb.ErrEdgeNotFound {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if err != nil {
|
2019-02-06 04:18:56 +03:00
|
|
|
log.Debugf("Unable to retrieve channel=%v from graph: "+
|
2019-03-13 23:36:21 +03:00
|
|
|
"%v", msg.ShortChannelID, err)
|
|
|
|
return false
|
2017-11-18 06:21:50 +03:00
|
|
|
}
|
2019-03-13 23:36:21 +03:00
|
|
|
|
|
|
|
// Otherwise, we'll retrieve the correct policy that we
|
|
|
|
// currently have stored within our graph to check if this
|
|
|
|
// message is stale by comparing its timestamp.
|
|
|
|
var p *channeldb.ChannelEdgePolicy
|
|
|
|
if msg.ChannelFlags&lnwire.ChanUpdateDirection == 0 {
|
|
|
|
p = p1
|
|
|
|
} else {
|
|
|
|
p = p2
|
|
|
|
}
|
|
|
|
|
2019-04-15 22:49:34 +03:00
|
|
|
// If the policy is still unknown, then we can consider this
|
|
|
|
// policy fresh.
|
|
|
|
if p == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-03-13 23:36:21 +03:00
|
|
|
timestamp := time.Unix(int64(msg.Timestamp), 0)
|
|
|
|
return p.LastUpdate.After(timestamp)
|
2017-11-18 06:21:50 +03:00
|
|
|
|
2019-02-06 04:18:56 +03:00
|
|
|
default:
|
|
|
|
// We'll make sure to not mark any unsupported messages as stale
|
|
|
|
// to ensure they are not removed.
|
|
|
|
return false
|
|
|
|
}
|
2017-11-18 06:21:50 +03:00
|
|
|
}
|
|
|
|
|
2017-10-05 05:30:54 +03:00
|
|
|
// updateChannel creates a new fully signed update for the channel, and updates
|
|
|
|
// the underlying graph with the new state.
|
|
|
|
func (d *AuthenticatedGossiper) updateChannel(info *channeldb.ChannelEdgeInfo,
|
2018-08-20 15:28:10 +03:00
|
|
|
edge *channeldb.ChannelEdgePolicy) (*lnwire.ChannelAnnouncement,
|
|
|
|
*lnwire.ChannelUpdate, error) {
|
2017-09-25 04:47:48 +03:00
|
|
|
|
2019-01-12 20:59:45 +03:00
|
|
|
// We'll make sure we support the new max_htlc field if not already
|
|
|
|
// present.
|
2019-01-16 14:43:46 +03:00
|
|
|
if !edge.MessageFlags.HasMaxHtlc() {
|
2019-01-12 20:59:45 +03:00
|
|
|
edge.MessageFlags |= lnwire.ChanUpdateOptionMaxHtlc
|
|
|
|
edge.MaxHTLC = lnwire.NewMSatFromSatoshis(info.Capacity)
|
|
|
|
}
|
2018-01-31 07:23:14 +03:00
|
|
|
|
2018-04-04 06:18:42 +03:00
|
|
|
// Make sure timestamp is always increased, such that our update gets
|
|
|
|
// propagated.
|
2017-12-20 16:02:04 +03:00
|
|
|
timestamp := time.Now().Unix()
|
|
|
|
if timestamp <= edge.LastUpdate.Unix() {
|
|
|
|
timestamp = edge.LastUpdate.Unix() + 1
|
|
|
|
}
|
|
|
|
edge.LastUpdate = time.Unix(timestamp, 0)
|
2019-01-12 20:59:45 +03:00
|
|
|
|
2017-09-25 04:47:48 +03:00
|
|
|
chanUpdate := &lnwire.ChannelUpdate{
|
|
|
|
ChainHash: info.ChainHash,
|
|
|
|
ShortChannelID: lnwire.NewShortChanIDFromInt(edge.ChannelID),
|
2017-12-20 16:02:04 +03:00
|
|
|
Timestamp: uint32(timestamp),
|
2019-01-12 20:59:43 +03:00
|
|
|
MessageFlags: edge.MessageFlags,
|
|
|
|
ChannelFlags: edge.ChannelFlags,
|
2017-09-25 04:47:48 +03:00
|
|
|
TimeLockDelta: edge.TimeLockDelta,
|
|
|
|
HtlcMinimumMsat: edge.MinHTLC,
|
2019-01-12 20:59:45 +03:00
|
|
|
HtlcMaximumMsat: edge.MaxHTLC,
|
2017-09-25 04:47:48 +03:00
|
|
|
BaseFee: uint32(edge.FeeBaseMSat),
|
|
|
|
FeeRate: uint32(edge.FeeProportionalMillionths),
|
2018-09-01 06:13:54 +03:00
|
|
|
ExtraOpaqueData: edge.ExtraOpaqueData,
|
2017-09-25 04:47:48 +03:00
|
|
|
}
|
2019-01-12 20:59:45 +03:00
|
|
|
|
|
|
|
var err error
|
2018-01-31 07:23:14 +03:00
|
|
|
chanUpdate.Signature, err = lnwire.NewSigFromRawSignature(edge.SigBytes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2017-09-25 04:47:48 +03:00
|
|
|
|
2017-10-05 05:30:54 +03:00
|
|
|
// With the update applied, we'll generate a new signature over a
|
|
|
|
// digest of the channel announcement itself.
|
2017-09-25 04:47:48 +03:00
|
|
|
sig, err := SignAnnouncement(d.cfg.AnnSigner, d.selfKey, chanUpdate)
|
|
|
|
if err != nil {
|
2017-10-05 06:31:24 +03:00
|
|
|
return nil, nil, err
|
2017-09-25 04:47:48 +03:00
|
|
|
}
|
|
|
|
|
2017-10-05 05:30:54 +03:00
|
|
|
// Next, we'll set the new signature in place, and update the reference
|
|
|
|
// in the backing slice.
|
2018-01-31 07:23:14 +03:00
|
|
|
edge.SigBytes = sig.Serialize()
|
|
|
|
chanUpdate.Signature, err = lnwire.NewSigFromSignature(sig)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2017-09-25 04:47:48 +03:00
|
|
|
|
2017-10-05 05:30:54 +03:00
|
|
|
// To ensure that our signature is valid, we'll verify it ourself
|
|
|
|
// before committing it to the slice returned.
|
2019-01-12 20:59:43 +03:00
|
|
|
err = routing.ValidateChannelUpdateAnn(d.selfKey, info.Capacity, chanUpdate)
|
2017-09-25 04:47:48 +03:00
|
|
|
if err != nil {
|
2017-10-05 06:31:24 +03:00
|
|
|
return nil, nil, fmt.Errorf("generated invalid channel "+
|
|
|
|
"update sig: %v", err)
|
2017-09-25 04:47:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll write the new edge policy to disk.
|
|
|
|
if err := d.cfg.Router.UpdateEdge(edge); err != nil {
|
2017-10-05 06:31:24 +03:00
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll also create the original channel announcement so the two can
|
|
|
|
// be broadcast along side each other (if necessary), but only if we
|
|
|
|
// have a full channel announcement for this channel.
|
|
|
|
var chanAnn *lnwire.ChannelAnnouncement
|
|
|
|
if info.AuthProof != nil {
|
|
|
|
chanID := lnwire.NewShortChanIDFromInt(info.ChannelID)
|
|
|
|
chanAnn = &lnwire.ChannelAnnouncement{
|
2018-09-01 06:13:54 +03:00
|
|
|
ShortChannelID: chanID,
|
|
|
|
NodeID1: info.NodeKey1Bytes,
|
|
|
|
NodeID2: info.NodeKey2Bytes,
|
|
|
|
ChainHash: info.ChainHash,
|
|
|
|
BitcoinKey1: info.BitcoinKey1Bytes,
|
|
|
|
Features: lnwire.NewRawFeatureVector(),
|
|
|
|
BitcoinKey2: info.BitcoinKey2Bytes,
|
|
|
|
ExtraOpaqueData: edge.ExtraOpaqueData,
|
2018-01-31 07:23:14 +03:00
|
|
|
}
|
|
|
|
chanAnn.NodeSig1, err = lnwire.NewSigFromRawSignature(
|
|
|
|
info.AuthProof.NodeSig1Bytes,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
chanAnn.NodeSig2, err = lnwire.NewSigFromRawSignature(
|
|
|
|
info.AuthProof.NodeSig2Bytes,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
chanAnn.BitcoinSig1, err = lnwire.NewSigFromRawSignature(
|
|
|
|
info.AuthProof.BitcoinSig1Bytes,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
chanAnn.BitcoinSig2, err = lnwire.NewSigFromRawSignature(
|
|
|
|
info.AuthProof.BitcoinSig2Bytes,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
2017-10-05 06:31:24 +03:00
|
|
|
}
|
2017-09-25 04:47:48 +03:00
|
|
|
}
|
|
|
|
|
2017-10-05 06:31:24 +03:00
|
|
|
return chanAnn, chanUpdate, err
|
2017-09-25 04:47:48 +03:00
|
|
|
}
|
2019-03-23 05:56:33 +03:00
|
|
|
|
|
|
|
// SyncManager returns the gossiper's SyncManager instance.
|
|
|
|
func (d *AuthenticatedGossiper) SyncManager() *SyncManager {
|
|
|
|
return d.syncMgr
|
|
|
|
}
|