2019-01-24 16:28:25 +03:00
|
|
|
package lnd
|
2016-01-14 08:41:46 +03:00
|
|
|
|
|
|
|
import (
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
"bytes"
|
2018-06-05 04:34:16 +03:00
|
|
|
"context"
|
2018-02-01 11:48:38 +03:00
|
|
|
"crypto/rand"
|
2016-12-27 08:42:23 +03:00
|
|
|
"encoding/hex"
|
2016-01-14 08:41:46 +03:00
|
|
|
"fmt"
|
2017-12-03 05:38:14 +03:00
|
|
|
"image/color"
|
2018-02-01 11:48:38 +03:00
|
|
|
"math/big"
|
2019-04-04 12:25:31 +03:00
|
|
|
prand "math/rand"
|
2016-01-14 08:41:46 +03:00
|
|
|
"net"
|
2018-02-24 04:33:05 +03:00
|
|
|
"path/filepath"
|
2018-11-04 05:00:19 +03:00
|
|
|
"regexp"
|
2017-04-14 00:41:54 +03:00
|
|
|
"strconv"
|
2016-01-14 08:41:46 +03:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2016-12-15 05:11:31 +03:00
|
|
|
"time"
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/connmgr"
|
2019-06-14 03:30:22 +03:00
|
|
|
"github.com/btcsuite/btcd/txscript"
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
2018-03-26 20:10:47 +03:00
|
|
|
"github.com/go-errors/errors"
|
2019-01-16 17:47:43 +03:00
|
|
|
sphinx "github.com/lightningnetwork/lightning-onion"
|
2017-09-04 02:58:14 +03:00
|
|
|
"github.com/lightningnetwork/lnd/autopilot"
|
2016-10-28 05:49:10 +03:00
|
|
|
"github.com/lightningnetwork/lnd/brontide"
|
2019-08-08 05:18:23 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chanacceptor"
|
2018-12-10 07:08:32 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chanbackup"
|
2019-08-08 20:39:38 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chanfitness"
|
2016-03-23 04:49:22 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2020-01-10 05:47:38 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb/kvdb"
|
2018-10-22 06:36:56 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channelnotifier"
|
2019-11-25 13:46:29 +03:00
|
|
|
"github.com/lightningnetwork/lnd/clock"
|
2018-01-17 07:25:34 +03:00
|
|
|
"github.com/lightningnetwork/lnd/contractcourt"
|
2017-03-20 00:06:10 +03:00
|
|
|
"github.com/lightningnetwork/lnd/discovery"
|
2019-11-08 16:29:29 +03:00
|
|
|
"github.com/lightningnetwork/lnd/feature"
|
2018-03-26 20:10:47 +03:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
2019-09-05 14:35:39 +03:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch/hop"
|
2019-01-16 17:47:43 +03:00
|
|
|
"github.com/lightningnetwork/lnd/input"
|
2018-12-20 13:57:44 +03:00
|
|
|
"github.com/lightningnetwork/lnd/invoices"
|
2020-04-28 11:06:27 +03:00
|
|
|
"github.com/lightningnetwork/lnd/keychain"
|
2018-05-23 16:41:16 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lncfg"
|
2018-06-08 06:09:40 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnpeer"
|
2016-08-31 02:52:53 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2019-05-22 12:56:04 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
2018-01-17 07:25:34 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
2019-10-31 05:43:05 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
2019-11-14 07:58:54 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet/chanfunding"
|
2016-10-28 05:49:10 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2018-04-20 08:00:49 +03:00
|
|
|
"github.com/lightningnetwork/lnd/nat"
|
2019-01-09 03:18:39 +03:00
|
|
|
"github.com/lightningnetwork/lnd/netann"
|
2019-07-29 17:59:48 +03:00
|
|
|
"github.com/lightningnetwork/lnd/peernotifier"
|
2019-02-16 06:31:24 +03:00
|
|
|
"github.com/lightningnetwork/lnd/pool"
|
2020-03-17 09:22:35 +03:00
|
|
|
"github.com/lightningnetwork/lnd/queue"
|
2017-02-02 05:29:46 +03:00
|
|
|
"github.com/lightningnetwork/lnd/routing"
|
2019-09-19 12:02:46 +03:00
|
|
|
"github.com/lightningnetwork/lnd/routing/localchans"
|
2019-04-05 18:36:11 +03:00
|
|
|
"github.com/lightningnetwork/lnd/routing/route"
|
2018-10-23 13:08:03 +03:00
|
|
|
"github.com/lightningnetwork/lnd/sweep"
|
2018-08-01 07:01:19 +03:00
|
|
|
"github.com/lightningnetwork/lnd/ticker"
|
2018-04-27 23:59:19 +03:00
|
|
|
"github.com/lightningnetwork/lnd/tor"
|
2018-12-10 07:08:32 +03:00
|
|
|
"github.com/lightningnetwork/lnd/walletunlocker"
|
2019-06-14 03:29:47 +03:00
|
|
|
"github.com/lightningnetwork/lnd/watchtower/wtclient"
|
|
|
|
"github.com/lightningnetwork/lnd/watchtower/wtdb"
|
|
|
|
"github.com/lightningnetwork/lnd/watchtower/wtpolicy"
|
2016-01-14 08:41:46 +03:00
|
|
|
)
|
|
|
|
|
2018-05-08 07:45:36 +03:00
|
|
|
const (
|
|
|
|
// defaultMinPeers is the minimum number of peers nodes should always be
|
|
|
|
// connected to.
|
|
|
|
defaultMinPeers = 3
|
2018-02-01 11:48:38 +03:00
|
|
|
|
2018-09-02 00:09:16 +03:00
|
|
|
// defaultStableConnDuration is a floor under which all reconnection
|
|
|
|
// attempts will apply exponential randomized backoff. Connections
|
|
|
|
// durations exceeding this value will be eligible to have their
|
|
|
|
// backoffs reduced.
|
|
|
|
defaultStableConnDuration = 10 * time.Minute
|
2019-04-04 12:25:31 +03:00
|
|
|
|
|
|
|
// numInstantInitReconnect specifies how many persistent peers we should
|
|
|
|
// always attempt outbound connections to immediately. After this value
|
|
|
|
// is surpassed, the remaining peers will be randomly delayed using
|
|
|
|
// maxInitReconnectDelay.
|
|
|
|
numInstantInitReconnect = 10
|
|
|
|
|
|
|
|
// maxInitReconnectDelay specifies the maximum delay in seconds we will
|
|
|
|
// apply in attempting to reconnect to persistent peers on startup. The
|
|
|
|
// value used or a particular peer will be chosen between 0s and this
|
|
|
|
// value.
|
|
|
|
maxInitReconnectDelay = 30
|
2017-08-31 11:15:39 +03:00
|
|
|
)
|
|
|
|
|
2018-05-08 07:45:36 +03:00
|
|
|
var (
|
|
|
|
// ErrPeerNotConnected signals that the server has no connection to the
|
|
|
|
// given peer.
|
|
|
|
ErrPeerNotConnected = errors.New("peer is not connected")
|
|
|
|
|
2019-10-24 00:35:41 +03:00
|
|
|
// ErrServerNotActive indicates that the server has started but hasn't
|
|
|
|
// fully finished the startup process.
|
|
|
|
ErrServerNotActive = errors.New("server is still in the process of " +
|
|
|
|
"starting")
|
|
|
|
|
2018-05-08 07:45:36 +03:00
|
|
|
// ErrServerShuttingDown indicates that the server is in the process of
|
|
|
|
// gracefully exiting.
|
|
|
|
ErrServerShuttingDown = errors.New("server is shutting down")
|
2018-11-04 05:00:19 +03:00
|
|
|
|
|
|
|
// validColorRegexp is a regexp that lets you check if a particular
|
|
|
|
// color string matches the standard hex color format #RRGGBB.
|
|
|
|
validColorRegexp = regexp.MustCompile("^#[A-Fa-f0-9]{6}$")
|
2018-05-08 07:45:36 +03:00
|
|
|
)
|
|
|
|
|
2019-04-10 05:45:48 +03:00
|
|
|
// errPeerAlreadyConnected is an error returned by the server when we're
|
|
|
|
// commanded to connect to a peer, but they're already connected.
|
|
|
|
type errPeerAlreadyConnected struct {
|
|
|
|
peer *peer
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns the human readable version of this error type.
|
|
|
|
//
|
|
|
|
// NOTE: Part of the error interface.
|
|
|
|
func (e *errPeerAlreadyConnected) Error() string {
|
|
|
|
return fmt.Sprintf("already connected to peer: %v", e.peer)
|
|
|
|
}
|
|
|
|
|
2016-12-25 03:51:25 +03:00
|
|
|
// server is the main server of the Lightning Network Daemon. The server houses
|
|
|
|
// global state pertaining to the wallet, database, and the rpcserver.
|
2016-06-21 21:52:09 +03:00
|
|
|
// Additionally, the server is also used as a central messaging bus to interact
|
|
|
|
// with any of its companion objects.
|
2016-01-14 08:41:46 +03:00
|
|
|
type server struct {
|
2019-03-12 02:12:15 +03:00
|
|
|
active int32 // atomic
|
|
|
|
stopping int32 // atomic
|
|
|
|
|
|
|
|
start sync.Once
|
|
|
|
stop sync.Once
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2020-05-14 15:18:11 +03:00
|
|
|
cfg *Config
|
|
|
|
|
2020-04-28 11:06:21 +03:00
|
|
|
// identityECDH is an ECDH capable wrapper for the private key used
|
|
|
|
// to authenticate any incoming connections.
|
2020-04-28 11:06:29 +03:00
|
|
|
identityECDH keychain.SingleKeyECDH
|
2016-01-17 06:07:44 +03:00
|
|
|
|
2017-04-14 21:17:41 +03:00
|
|
|
// nodeSigner is an implementation of the MessageSigner implementation
|
2017-05-18 21:55:25 +03:00
|
|
|
// that's backed by the identity private key of the running lnd node.
|
2019-01-09 03:18:39 +03:00
|
|
|
nodeSigner *netann.NodeSigner
|
2017-04-14 21:17:41 +03:00
|
|
|
|
2019-02-15 04:13:44 +03:00
|
|
|
chanStatusMgr *netann.ChanStatusManager
|
|
|
|
|
2018-04-28 00:02:05 +03:00
|
|
|
// listenAddrs is the list of addresses the server is currently
|
|
|
|
// listening on.
|
2018-05-23 16:38:19 +03:00
|
|
|
listenAddrs []net.Addr
|
2018-04-28 00:02:05 +03:00
|
|
|
|
|
|
|
// torController is a client that will communicate with a locally
|
|
|
|
// running Tor server. This client will handle initiating and
|
|
|
|
// authenticating the connection to the Tor server, automatically
|
|
|
|
// creating and setting up onion services, etc.
|
|
|
|
torController *tor.Controller
|
|
|
|
|
2018-04-20 08:00:49 +03:00
|
|
|
// natTraversal is the specific NAT traversal technique used to
|
|
|
|
// automatically set up port forwarding rules in order to advertise to
|
|
|
|
// the network that the node is accepting inbound connections.
|
|
|
|
natTraversal nat.Traversal
|
|
|
|
|
|
|
|
// lastDetectedIP is the last IP detected by the NAT traversal technique
|
|
|
|
// above. This IP will be watched periodically in a goroutine in order
|
|
|
|
// to handle dynamic IP changes.
|
|
|
|
lastDetectedIP net.IP
|
|
|
|
|
2018-01-23 03:04:40 +03:00
|
|
|
mu sync.RWMutex
|
2016-12-15 05:11:31 +03:00
|
|
|
peersByPub map[string]*peer
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
inboundPeers map[string]*peer
|
|
|
|
outboundPeers map[string]*peer
|
|
|
|
|
2019-01-10 07:16:27 +03:00
|
|
|
peerConnectedListeners map[string][]chan<- lnpeer.Peer
|
|
|
|
peerDisconnectedListeners map[string][]chan<- struct{}
|
2017-09-13 15:38:06 +03:00
|
|
|
|
2019-02-15 06:17:52 +03:00
|
|
|
persistentPeers map[string]bool
|
2018-02-01 11:48:38 +03:00
|
|
|
persistentPeersBackoff map[string]time.Duration
|
|
|
|
persistentConnReqs map[string][]*connmgr.ConnReq
|
2018-03-31 02:19:52 +03:00
|
|
|
persistentRetryCancels map[string]chan struct{}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
2020-03-17 09:22:35 +03:00
|
|
|
// peerErrors keeps a set of peer error buffers for peers that have
|
|
|
|
// disconnected from us. This allows us to track historic peer errors
|
|
|
|
// over connections. The string of the peer's compressed pubkey is used
|
|
|
|
// as a key for this map.
|
|
|
|
peerErrors map[string]*queue.CircularBuffer
|
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// ignorePeerTermination tracks peers for which the server has initiated
|
|
|
|
// a disconnect. Adding a peer to this map causes the peer termination
|
|
|
|
// watcher to short circuit in the event that peers are purposefully
|
|
|
|
// disconnected.
|
|
|
|
ignorePeerTermination map[*peer]struct{}
|
|
|
|
|
2018-05-08 06:18:15 +03:00
|
|
|
// scheduledPeerConnection maps a pubkey string to a callback that
|
|
|
|
// should be executed in the peerTerminationWatcher the prior peer with
|
|
|
|
// the same pubkey exits. This allows the server to wait until the
|
|
|
|
// prior peer has cleaned up successfully, before adding the new peer
|
|
|
|
// intended to replace it.
|
|
|
|
scheduledPeerConnection map[string]func()
|
|
|
|
|
2017-05-18 21:55:25 +03:00
|
|
|
cc *chainControl
|
2016-06-21 21:52:09 +03:00
|
|
|
|
|
|
|
fundingMgr *fundingManager
|
2017-05-18 21:55:25 +03:00
|
|
|
|
|
|
|
chanDB *channeldb.DB
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2018-01-17 07:25:34 +03:00
|
|
|
htlcSwitch *htlcswitch.Switch
|
|
|
|
|
2018-12-20 13:57:44 +03:00
|
|
|
invoices *invoices.InvoiceRegistry
|
2018-01-17 07:25:34 +03:00
|
|
|
|
2018-10-22 06:36:56 +03:00
|
|
|
channelNotifier *channelnotifier.ChannelNotifier
|
|
|
|
|
2019-07-29 17:59:48 +03:00
|
|
|
peerNotifier *peernotifier.PeerNotifier
|
|
|
|
|
2020-02-19 18:34:47 +03:00
|
|
|
htlcNotifier *htlcswitch.HtlcNotifier
|
|
|
|
|
2018-01-17 07:25:34 +03:00
|
|
|
witnessBeacon contractcourt.WitnessBeacon
|
|
|
|
|
2016-11-29 06:43:57 +03:00
|
|
|
breachArbiter *breachArbiter
|
2016-07-10 02:36:25 +03:00
|
|
|
|
2019-05-10 19:00:15 +03:00
|
|
|
missionControl *routing.MissionControl
|
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
chanRouter *routing.ChannelRouter
|
2016-07-15 14:02:59 +03:00
|
|
|
|
2019-03-22 12:21:25 +03:00
|
|
|
controlTower routing.ControlTower
|
|
|
|
|
2017-08-22 09:54:10 +03:00
|
|
|
authGossiper *discovery.AuthenticatedGossiper
|
2017-03-20 00:06:10 +03:00
|
|
|
|
2019-09-19 12:02:46 +03:00
|
|
|
localChanMgr *localchans.Manager
|
|
|
|
|
2016-09-12 22:37:51 +03:00
|
|
|
utxoNursery *utxoNursery
|
|
|
|
|
2018-10-23 13:08:03 +03:00
|
|
|
sweeper *sweep.UtxoSweeper
|
|
|
|
|
2018-01-17 07:25:34 +03:00
|
|
|
chainArb *contractcourt.ChainArbitrator
|
|
|
|
|
2019-09-05 14:35:39 +03:00
|
|
|
sphinx *hop.OnionProcessor
|
2016-09-21 03:15:26 +03:00
|
|
|
|
2019-06-14 03:29:47 +03:00
|
|
|
towerClient wtclient.Client
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
connMgr *connmgr.ConnManager
|
|
|
|
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 03:35:07 +03:00
|
|
|
sigPool *lnwallet.SigPool
|
|
|
|
|
2019-02-22 07:10:51 +03:00
|
|
|
writePool *pool.Write
|
2019-02-01 06:21:36 +03:00
|
|
|
|
2019-02-22 07:11:33 +03:00
|
|
|
readPool *pool.Read
|
2019-02-01 06:21:36 +03:00
|
|
|
|
2019-11-08 16:29:29 +03:00
|
|
|
// featureMgr dispatches feature vectors for various contexts within the
|
|
|
|
// daemon.
|
|
|
|
featureMgr *feature.Manager
|
2017-02-16 15:39:38 +03:00
|
|
|
|
2017-08-05 04:32:25 +03:00
|
|
|
// currentNodeAnn is the node announcement that has been broadcast to
|
|
|
|
// the network upon startup, if the attributes of the node (us) has
|
|
|
|
// changed since last start.
|
|
|
|
currentNodeAnn *lnwire.NodeAnnouncement
|
|
|
|
|
2018-12-10 07:08:32 +03:00
|
|
|
// chansToRestore is the set of channels that upon starting, the server
|
|
|
|
// should attempt to restore/recover.
|
|
|
|
chansToRestore walletunlocker.ChannelsToRecover
|
|
|
|
|
2019-02-09 06:45:39 +03:00
|
|
|
// chanSubSwapper is a sub-system that will ensure our on-disk channel
|
|
|
|
// backups are consistent at all times. It interacts with the
|
|
|
|
// channelNotifier to be notified of newly opened and closed channels.
|
|
|
|
chanSubSwapper *chanbackup.SubSwapper
|
|
|
|
|
2019-08-08 20:39:38 +03:00
|
|
|
// chanEventStore tracks the behaviour of channels and their remote peers to
|
|
|
|
// provide insights into their health and performance.
|
|
|
|
chanEventStore *chanfitness.ChannelEventStore
|
|
|
|
|
2017-08-11 07:18:57 +03:00
|
|
|
quit chan struct{}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
wg sync.WaitGroup
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2018-04-27 23:59:19 +03:00
|
|
|
// parseAddr parses an address from its string format to a net.Addr.
|
2020-05-14 15:18:11 +03:00
|
|
|
func parseAddr(address string, netCfg tor.Net) (net.Addr, error) {
|
2018-04-27 23:59:19 +03:00
|
|
|
var (
|
|
|
|
host string
|
|
|
|
port int
|
|
|
|
)
|
|
|
|
|
|
|
|
// Split the address into its host and port components.
|
|
|
|
h, p, err := net.SplitHostPort(address)
|
|
|
|
if err != nil {
|
|
|
|
// If a port wasn't specified, we'll assume the address only
|
|
|
|
// contains the host so we'll use the default port.
|
|
|
|
host = address
|
|
|
|
port = defaultPeerPort
|
|
|
|
} else {
|
|
|
|
// Otherwise, we'll note both the host and ports.
|
|
|
|
host = h
|
|
|
|
portNum, err := strconv.Atoi(p)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
port = portNum
|
|
|
|
}
|
|
|
|
|
|
|
|
if tor.IsOnionHost(host) {
|
|
|
|
return &tor.OnionAddr{OnionService: host, Port: port}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the host is part of a TCP address, we'll use the network
|
|
|
|
// specific ResolveTCPAddr function in order to resolve these
|
|
|
|
// addresses over Tor in order to prevent leaking your real IP
|
|
|
|
// address.
|
|
|
|
hostPort := net.JoinHostPort(host, strconv.Itoa(port))
|
2020-05-14 15:18:11 +03:00
|
|
|
return netCfg.ResolveTCPAddr("tcp", hostPort)
|
2018-04-27 23:59:19 +03:00
|
|
|
}
|
|
|
|
|
2018-08-09 22:13:05 +03:00
|
|
|
// noiseDial is a factory function which creates a connmgr compliant dialing
|
|
|
|
// function by returning a closure which includes the server's identity key.
|
2020-04-28 11:06:29 +03:00
|
|
|
func noiseDial(idKey keychain.SingleKeyECDH,
|
2020-05-14 15:18:11 +03:00
|
|
|
netCfg tor.Net) func(net.Addr) (net.Conn, error) {
|
|
|
|
|
2018-08-09 22:13:05 +03:00
|
|
|
return func(a net.Addr) (net.Conn, error) {
|
|
|
|
lnAddr := a.(*lnwire.NetAddress)
|
2020-04-28 11:06:29 +03:00
|
|
|
return brontide.Dial(idKey, lnAddr, netCfg.Dial)
|
2018-08-09 22:13:05 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// newServer creates a new instance of the server which is to listen using the
|
|
|
|
// passed listener address.
|
2020-05-14 15:18:11 +03:00
|
|
|
func newServer(cfg *Config, listenAddrs []net.Addr, chanDB *channeldb.DB,
|
2019-06-14 03:29:47 +03:00
|
|
|
towerClientDB *wtdb.ClientDB, cc *chainControl,
|
2020-04-28 11:06:35 +03:00
|
|
|
nodeKeyDesc *keychain.KeyDescriptor,
|
2019-08-08 05:18:23 +03:00
|
|
|
chansToRestore walletunlocker.ChannelsToRecover,
|
2020-03-17 17:58:36 +03:00
|
|
|
chanPredicate chanacceptor.ChannelAcceptor,
|
|
|
|
torController *tor.Controller) (*server, error) {
|
2017-06-06 01:18:06 +03:00
|
|
|
|
2020-04-28 11:06:27 +03:00
|
|
|
var (
|
|
|
|
err error
|
2020-04-28 11:06:28 +03:00
|
|
|
nodeKeyECDH = keychain.NewPubKeyECDH(*nodeKeyDesc, cc.keyRing)
|
2020-04-28 11:06:27 +03:00
|
|
|
nodeKeySigner = keychain.NewPubKeyDigestSigner(
|
|
|
|
*nodeKeyDesc, cc.keyRing,
|
|
|
|
)
|
|
|
|
)
|
2016-01-17 06:07:44 +03:00
|
|
|
|
|
|
|
listeners := make([]net.Listener, len(listenAddrs))
|
2018-05-23 16:38:19 +03:00
|
|
|
for i, listenAddr := range listenAddrs {
|
2018-03-11 00:02:19 +03:00
|
|
|
// Note: though brontide.NewListener uses ResolveTCPAddr, it
|
|
|
|
// doesn't need to call the general lndResolveTCP function
|
|
|
|
// since we are resolving a local address.
|
2018-05-23 16:38:19 +03:00
|
|
|
listeners[i], err = brontide.NewListener(
|
2020-04-28 11:06:29 +03:00
|
|
|
nodeKeyECDH, listenAddr.String(),
|
2018-05-23 16:38:19 +03:00
|
|
|
)
|
2016-01-17 06:07:44 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-11 21:37:54 +03:00
|
|
|
globalFeatures := lnwire.NewRawFeatureVector()
|
2019-08-06 07:34:46 +03:00
|
|
|
|
|
|
|
// Only if we're not being forced to use the legacy onion format, will
|
|
|
|
// we signal our knowledge of the new TLV onion format.
|
2020-03-06 18:11:48 +03:00
|
|
|
if !cfg.ProtocolOptions.LegacyOnion() {
|
2019-08-06 07:34:46 +03:00
|
|
|
globalFeatures.Set(lnwire.TLVOnionPayloadOptional)
|
|
|
|
}
|
2017-10-11 21:37:54 +03:00
|
|
|
|
2020-03-06 18:11:48 +03:00
|
|
|
// Similarly, we default to supporting the new modern commitment format
|
|
|
|
// where the remote key is static unless the protocol config is set to
|
|
|
|
// keep using the older format.
|
|
|
|
if !cfg.ProtocolOptions.NoStaticRemoteKey() {
|
2019-09-11 15:44:31 +03:00
|
|
|
globalFeatures.Set(lnwire.StaticRemoteKeyOptional)
|
|
|
|
}
|
|
|
|
|
2020-03-06 18:11:48 +03:00
|
|
|
// We only signal that we support the experimental anchor commitments
|
|
|
|
// if explicitly enabled in the config.
|
|
|
|
if cfg.ProtocolOptions.AnchorCommitments() {
|
|
|
|
globalFeatures.Set(lnwire.AnchorsOptional)
|
|
|
|
}
|
|
|
|
|
2018-05-08 06:00:32 +03:00
|
|
|
var serializedPubKey [33]byte
|
2020-04-28 11:06:29 +03:00
|
|
|
copy(serializedPubKey[:], nodeKeyECDH.PubKey().SerializeCompressed())
|
2018-02-24 04:33:05 +03:00
|
|
|
|
|
|
|
// Initialize the sphinx router, placing it's persistent replay log in
|
|
|
|
// the same directory as the channel graph database.
|
2020-03-13 19:06:58 +03:00
|
|
|
sharedSecretPath := filepath.Join(cfg.localDatabaseDir(), "sphinxreplay.db")
|
2018-03-26 20:10:47 +03:00
|
|
|
replayLog := htlcswitch.NewDecayedLog(sharedSecretPath, cc.chainNotifier)
|
2020-04-28 11:06:28 +03:00
|
|
|
sphinxRouter := sphinx.NewRouter(
|
|
|
|
nodeKeyECDH, activeNetParams.Params, replayLog,
|
|
|
|
)
|
2019-02-22 07:11:33 +03:00
|
|
|
|
2019-02-16 06:31:24 +03:00
|
|
|
writeBufferPool := pool.NewWriteBuffer(
|
|
|
|
pool.DefaultWriteBufferGCInterval,
|
|
|
|
pool.DefaultWriteBufferExpiryInterval,
|
2019-02-01 06:21:36 +03:00
|
|
|
)
|
2018-02-24 04:33:05 +03:00
|
|
|
|
2019-02-22 07:10:51 +03:00
|
|
|
writePool := pool.NewWrite(
|
2019-03-14 06:32:39 +03:00
|
|
|
writeBufferPool, cfg.Workers.Write, pool.DefaultWorkerTimeout,
|
2019-02-22 07:10:51 +03:00
|
|
|
)
|
2018-02-24 04:33:05 +03:00
|
|
|
|
2019-02-22 07:11:33 +03:00
|
|
|
readBufferPool := pool.NewReadBuffer(
|
|
|
|
pool.DefaultReadBufferGCInterval,
|
|
|
|
pool.DefaultReadBufferExpiryInterval,
|
|
|
|
)
|
|
|
|
|
|
|
|
readPool := pool.NewRead(
|
2019-03-14 06:32:39 +03:00
|
|
|
readBufferPool, cfg.Workers.Read, pool.DefaultWorkerTimeout,
|
2019-02-22 07:11:33 +03:00
|
|
|
)
|
|
|
|
|
2019-11-08 16:29:29 +03:00
|
|
|
featureMgr, err := feature.NewManager(feature.Config{
|
2020-03-06 18:11:48 +03:00
|
|
|
NoTLVOnion: cfg.ProtocolOptions.LegacyOnion(),
|
2020-03-06 18:11:48 +03:00
|
|
|
NoStaticRemoteKey: cfg.ProtocolOptions.NoStaticRemoteKey(),
|
|
|
|
NoAnchors: !cfg.ProtocolOptions.AnchorCommitments(),
|
2019-11-08 16:29:29 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-12-04 20:47:53 +03:00
|
|
|
registryConfig := invoices.RegistryConfig{
|
2020-05-13 16:47:45 +03:00
|
|
|
FinalCltvRejectDelta: lncfg.DefaultFinalCltvRejectDelta,
|
2019-09-03 13:23:39 +03:00
|
|
|
HtlcHoldDuration: invoices.DefaultHtlcHoldDuration,
|
2019-11-25 13:46:29 +03:00
|
|
|
Clock: clock.NewDefaultClock(),
|
2019-12-05 14:55:17 +03:00
|
|
|
AcceptKeySend: cfg.AcceptKeySend,
|
2019-12-04 20:47:53 +03:00
|
|
|
}
|
|
|
|
|
2016-01-17 06:07:44 +03:00
|
|
|
s := &server{
|
2020-05-14 15:18:11 +03:00
|
|
|
cfg: cfg,
|
2018-12-10 07:08:32 +03:00
|
|
|
chanDB: chanDB,
|
|
|
|
cc: cc,
|
|
|
|
sigPool: lnwallet.NewSigPool(cfg.Workers.Sig, cc.signer),
|
|
|
|
writePool: writePool,
|
|
|
|
readPool: readPool,
|
|
|
|
chansToRestore: chansToRestore,
|
2017-05-18 21:55:25 +03:00
|
|
|
|
2019-12-09 19:50:11 +03:00
|
|
|
invoices: invoices.NewRegistry(
|
|
|
|
chanDB, invoices.NewInvoiceExpiryWatcher(clock.NewDefaultClock()),
|
|
|
|
®istryConfig,
|
|
|
|
),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
2018-10-22 06:36:56 +03:00
|
|
|
channelNotifier: channelnotifier.New(chanDB),
|
|
|
|
|
2020-04-28 11:06:29 +03:00
|
|
|
identityECDH: nodeKeyECDH,
|
2020-04-28 11:06:27 +03:00
|
|
|
nodeSigner: netann.NewNodeSigner(nodeKeySigner),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
2018-04-28 00:02:05 +03:00
|
|
|
listenAddrs: listenAddrs,
|
|
|
|
|
2016-09-21 03:15:26 +03:00
|
|
|
// TODO(roasbeef): derive proper onion key based on rotation
|
|
|
|
// schedule
|
2019-09-05 14:35:39 +03:00
|
|
|
sphinx: hop.NewOnionProcessor(sphinxRouter),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
2020-03-17 17:58:36 +03:00
|
|
|
torController: torController,
|
|
|
|
|
2019-02-15 06:17:52 +03:00
|
|
|
persistentPeers: make(map[string]bool),
|
2018-05-08 06:18:15 +03:00
|
|
|
persistentPeersBackoff: make(map[string]time.Duration),
|
|
|
|
persistentConnReqs: make(map[string][]*connmgr.ConnReq),
|
|
|
|
persistentRetryCancels: make(map[string]chan struct{}),
|
2020-03-17 09:22:35 +03:00
|
|
|
peerErrors: make(map[string]*queue.CircularBuffer),
|
2018-05-08 06:18:15 +03:00
|
|
|
ignorePeerTermination: make(map[*peer]struct{}),
|
|
|
|
scheduledPeerConnection: make(map[string]func()),
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2019-01-10 07:16:27 +03:00
|
|
|
peersByPub: make(map[string]*peer),
|
|
|
|
inboundPeers: make(map[string]*peer),
|
|
|
|
outboundPeers: make(map[string]*peer),
|
|
|
|
peerConnectedListeners: make(map[string][]chan<- lnpeer.Peer),
|
|
|
|
peerDisconnectedListeners: make(map[string][]chan<- struct{}),
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2019-11-08 16:29:29 +03:00
|
|
|
featureMgr: featureMgr,
|
|
|
|
quit: make(chan struct{}),
|
2016-01-17 06:07:44 +03:00
|
|
|
}
|
|
|
|
|
2018-01-20 04:41:08 +03:00
|
|
|
s.witnessBeacon = &preimageBeacon{
|
|
|
|
wCache: chanDB.NewWitnessCache(),
|
2018-02-07 06:11:11 +03:00
|
|
|
subscribers: make(map[uint64]*preimageSubscriber),
|
2018-01-20 04:41:08 +03:00
|
|
|
}
|
2018-01-17 07:25:34 +03:00
|
|
|
|
2018-06-01 06:31:40 +03:00
|
|
|
_, currentHeight, err := s.cc.chainIO.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-02-19 18:34:47 +03:00
|
|
|
s.htlcNotifier = htlcswitch.NewHtlcNotifier(time.Now)
|
|
|
|
|
2018-05-08 06:00:32 +03:00
|
|
|
s.htlcSwitch, err = htlcswitch.New(htlcswitch.Config{
|
2019-06-11 11:24:19 +03:00
|
|
|
DB: chanDB,
|
2017-05-02 23:04:58 +03:00
|
|
|
LocalChannelClose: func(pubKey []byte,
|
|
|
|
request *htlcswitch.ChanClose) {
|
2017-08-04 02:40:08 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
peer, err := s.FindPeerByPubStr(string(pubKey))
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to close channel, peer"+
|
|
|
|
" with %v id can't be found: %v",
|
|
|
|
pubKey, err,
|
|
|
|
)
|
2017-05-02 23:04:58 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
select {
|
|
|
|
case peer.localCloseChanReqs <- request:
|
2017-08-11 07:17:04 +03:00
|
|
|
srvrLog.Infof("Local close channel request "+
|
|
|
|
"delivered to peer: %x", pubKey[:])
|
2017-08-09 02:51:41 +03:00
|
|
|
case <-peer.quit:
|
2017-08-11 07:17:04 +03:00
|
|
|
srvrLog.Errorf("Unable to deliver local close "+
|
|
|
|
"channel request to peer %x, err: %v",
|
|
|
|
pubKey[:], err)
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
2017-05-02 23:04:58 +03:00
|
|
|
},
|
2018-05-08 06:00:32 +03:00
|
|
|
FwdingLog: chanDB.ForwardingLog(),
|
|
|
|
SwitchPackager: channeldb.NewSwitchPackager(),
|
|
|
|
ExtractErrorEncrypter: s.sphinx.ExtractErrorEncrypter,
|
2018-08-22 10:32:44 +03:00
|
|
|
FetchLastChannelUpdate: s.fetchLastChanUpdate(),
|
2018-06-01 06:31:40 +03:00
|
|
|
Notifier: s.cc.chainNotifier,
|
2020-02-19 18:34:47 +03:00
|
|
|
HtlcNotifier: s.htlcNotifier,
|
2019-05-30 19:26:24 +03:00
|
|
|
FwdEventTicker: ticker.New(htlcswitch.DefaultFwdEventInterval),
|
|
|
|
LogEventTicker: ticker.New(htlcswitch.DefaultLogInterval),
|
|
|
|
AckEventTicker: ticker.New(htlcswitch.DefaultAckInterval),
|
2020-01-30 11:01:18 +03:00
|
|
|
AllowCircularRoute: cfg.AllowCircularRoute,
|
2018-11-19 20:37:50 +03:00
|
|
|
RejectHTLC: cfg.RejectHTLC,
|
2020-04-14 20:49:26 +03:00
|
|
|
Clock: clock.NewDefaultClock(),
|
|
|
|
HTLCExpiry: htlcswitch.DefaultHTLCExpiry,
|
2018-06-01 06:31:40 +03:00
|
|
|
}, uint32(currentHeight))
|
2018-02-28 11:14:03 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-05-02 23:04:58 +03:00
|
|
|
|
2019-02-15 04:13:44 +03:00
|
|
|
chanStatusMgrCfg := &netann.ChanStatusConfig{
|
|
|
|
ChanStatusSampleInterval: cfg.ChanStatusSampleInterval,
|
|
|
|
ChanEnableTimeout: cfg.ChanEnableTimeout,
|
|
|
|
ChanDisableTimeout: cfg.ChanDisableTimeout,
|
2020-04-28 11:06:29 +03:00
|
|
|
OurPubKey: nodeKeyECDH.PubKey(),
|
2019-02-15 04:13:44 +03:00
|
|
|
MessageSigner: s.nodeSigner,
|
|
|
|
IsChannelActive: s.htlcSwitch.HasActiveLink,
|
|
|
|
ApplyChannelUpdate: s.applyChannelUpdate,
|
|
|
|
DB: chanDB,
|
|
|
|
Graph: chanDB.ChannelGraph(),
|
|
|
|
}
|
|
|
|
|
|
|
|
chanStatusMgr, err := netann.NewChanStatusManager(chanStatusMgrCfg)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.chanStatusMgr = chanStatusMgr
|
|
|
|
|
2018-01-21 20:22:13 +03:00
|
|
|
// If enabled, use either UPnP or NAT-PMP to automatically configure
|
2018-04-20 08:00:49 +03:00
|
|
|
// port forwarding for users behind a NAT.
|
|
|
|
if cfg.NAT {
|
|
|
|
srvrLog.Info("Scanning local network for a UPnP enabled device")
|
|
|
|
|
|
|
|
discoveryTimeout := time.Duration(10 * time.Second)
|
2017-12-25 02:15:40 +03:00
|
|
|
|
2018-04-20 08:00:49 +03:00
|
|
|
ctx, cancel := context.WithTimeout(
|
|
|
|
context.Background(), discoveryTimeout,
|
|
|
|
)
|
|
|
|
defer cancel()
|
|
|
|
upnp, err := nat.DiscoverUPnP(ctx)
|
2018-01-21 20:22:13 +03:00
|
|
|
if err == nil {
|
2018-04-20 08:00:49 +03:00
|
|
|
s.natTraversal = upnp
|
|
|
|
} else {
|
|
|
|
// If we were not able to discover a UPnP enabled device
|
|
|
|
// on the local network, we'll fall back to attempting
|
|
|
|
// to discover a NAT-PMP enabled device.
|
|
|
|
srvrLog.Errorf("Unable to discover a UPnP enabled "+
|
|
|
|
"device on the local network: %v", err)
|
|
|
|
|
|
|
|
srvrLog.Info("Scanning local network for a NAT-PMP " +
|
|
|
|
"enabled device")
|
|
|
|
|
|
|
|
pmp, err := nat.DiscoverPMP(discoveryTimeout)
|
|
|
|
if err != nil {
|
2020-04-14 20:56:05 +03:00
|
|
|
err := fmt.Errorf("unable to discover a "+
|
2018-04-20 08:00:49 +03:00
|
|
|
"NAT-PMP enabled device on the local "+
|
|
|
|
"network: %v", err)
|
|
|
|
srvrLog.Error(err)
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-12-25 02:15:40 +03:00
|
|
|
|
2018-04-20 08:00:49 +03:00
|
|
|
s.natTraversal = pmp
|
|
|
|
}
|
2018-01-20 21:58:06 +03:00
|
|
|
}
|
|
|
|
|
2018-04-20 08:00:49 +03:00
|
|
|
// If we were requested to automatically configure port forwarding,
|
|
|
|
// we'll use the ports that the server will be listening on.
|
2018-07-31 11:29:12 +03:00
|
|
|
externalIPStrings := make([]string, len(cfg.ExternalIPs))
|
2018-05-23 16:38:19 +03:00
|
|
|
for idx, ip := range cfg.ExternalIPs {
|
2018-07-31 11:29:12 +03:00
|
|
|
externalIPStrings[idx] = ip.String()
|
2018-05-23 16:38:19 +03:00
|
|
|
}
|
2018-04-20 08:00:49 +03:00
|
|
|
if s.natTraversal != nil {
|
|
|
|
listenPorts := make([]uint16, 0, len(listenAddrs))
|
|
|
|
for _, listenAddr := range listenAddrs {
|
|
|
|
// At this point, the listen addresses should have
|
|
|
|
// already been normalized, so it's safe to ignore the
|
|
|
|
// errors.
|
2018-05-23 16:38:19 +03:00
|
|
|
_, portStr, _ := net.SplitHostPort(listenAddr.String())
|
2018-04-20 08:00:49 +03:00
|
|
|
port, _ := strconv.Atoi(portStr)
|
|
|
|
|
|
|
|
listenPorts = append(listenPorts, uint16(port))
|
2017-12-25 02:15:40 +03:00
|
|
|
}
|
|
|
|
|
2018-04-20 08:00:49 +03:00
|
|
|
ips, err := s.configurePortForwarding(listenPorts...)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("Unable to automatically set up port "+
|
|
|
|
"forwarding using %s: %v",
|
|
|
|
s.natTraversal.Name(), err)
|
|
|
|
} else {
|
|
|
|
srvrLog.Infof("Automatically set up port forwarding "+
|
|
|
|
"using %s to advertise external IP",
|
|
|
|
s.natTraversal.Name())
|
2018-07-31 11:29:12 +03:00
|
|
|
externalIPStrings = append(externalIPStrings, ips...)
|
2018-04-20 08:00:49 +03:00
|
|
|
}
|
2017-12-25 02:15:40 +03:00
|
|
|
}
|
|
|
|
|
2017-02-23 03:24:22 +03:00
|
|
|
// If external IP addresses have been specified, add those to the list
|
2018-04-27 23:59:19 +03:00
|
|
|
// of this server's addresses.
|
2018-05-23 16:41:16 +03:00
|
|
|
externalIPs, err := lncfg.NormalizeAddresses(
|
2018-07-31 11:29:12 +03:00
|
|
|
externalIPStrings, strconv.Itoa(defaultPeerPort),
|
2018-06-28 01:05:11 +03:00
|
|
|
cfg.net.ResolveTCPAddr,
|
2018-04-20 08:00:49 +03:00
|
|
|
)
|
2018-05-23 16:38:19 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-04-14 18:55:26 +03:00
|
|
|
|
2018-04-20 08:00:49 +03:00
|
|
|
selfAddrs := make([]net.Addr, 0, len(externalIPs))
|
2020-04-14 18:55:26 +03:00
|
|
|
selfAddrs = append(selfAddrs, externalIPs...)
|
2016-10-05 23:47:02 +03:00
|
|
|
|
2017-03-27 20:25:44 +03:00
|
|
|
chanGraph := chanDB.ChannelGraph()
|
2017-03-28 22:08:14 +03:00
|
|
|
|
2018-09-21 05:28:11 +03:00
|
|
|
// We'll now reconstruct a node announcement based on our current
|
|
|
|
// configuration so we can send it out as a sort of heart beat within
|
|
|
|
// the network.
|
|
|
|
//
|
|
|
|
// We'll start by parsing the node color from configuration.
|
2017-12-16 02:06:20 +03:00
|
|
|
color, err := parseHexColor(cfg.Color)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to parse color: %v\n", err)
|
|
|
|
return nil, err
|
2017-12-03 05:38:14 +03:00
|
|
|
}
|
|
|
|
|
2018-09-21 05:28:11 +03:00
|
|
|
// If no alias is provided, default to first 10 characters of public
|
|
|
|
// key.
|
2017-12-16 02:06:20 +03:00
|
|
|
alias := cfg.Alias
|
|
|
|
if alias == "" {
|
|
|
|
alias = hex.EncodeToString(serializedPubKey[:10])
|
|
|
|
}
|
|
|
|
nodeAlias, err := lnwire.NewNodeAlias(alias)
|
2017-08-22 09:54:10 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-08-05 04:32:25 +03:00
|
|
|
selfNode := &channeldb.LightningNode{
|
2017-07-14 22:40:26 +03:00
|
|
|
HaveNodeAnnouncement: true,
|
|
|
|
LastUpdate: time.Now(),
|
|
|
|
Addresses: selfAddrs,
|
2017-12-16 02:06:20 +03:00
|
|
|
Alias: nodeAlias.String(),
|
2019-11-08 16:29:29 +03:00
|
|
|
Features: s.featureMgr.Get(feature.SetNodeAnn),
|
2017-12-16 02:06:20 +03:00
|
|
|
Color: color,
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2020-04-28 11:06:29 +03:00
|
|
|
copy(selfNode.PubKeyBytes[:], nodeKeyECDH.PubKey().SerializeCompressed())
|
2017-03-27 20:25:44 +03:00
|
|
|
|
2018-09-21 05:27:12 +03:00
|
|
|
// Based on the disk representation of the node announcement generated
|
|
|
|
// above, we'll generate a node announcement that can go out on the
|
|
|
|
// network so we can properly sign it.
|
|
|
|
nodeAnn, err := selfNode.NodeAnnouncement(false)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to gen self node ann: %v", err)
|
2017-08-05 04:32:25 +03:00
|
|
|
}
|
2018-09-21 05:28:11 +03:00
|
|
|
|
|
|
|
// With the announcement generated, we'll sign it to properly
|
|
|
|
// authenticate the message on the network.
|
2020-03-18 02:23:01 +03:00
|
|
|
authSig, err := netann.SignAnnouncement(
|
2020-04-28 11:06:21 +03:00
|
|
|
s.nodeSigner, s.identityECDH.PubKey(), nodeAnn,
|
2017-04-01 15:33:17 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
2017-03-27 20:25:44 +03:00
|
|
|
return nil, fmt.Errorf("unable to generate signature for "+
|
|
|
|
"self node announcement: %v", err)
|
|
|
|
}
|
2018-01-31 07:30:00 +03:00
|
|
|
selfNode.AuthSigBytes = authSig.Serialize()
|
2018-09-21 05:28:11 +03:00
|
|
|
nodeAnn.Signature, err = lnwire.NewSigFromRawSignature(
|
|
|
|
selfNode.AuthSigBytes,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-01-31 07:30:00 +03:00
|
|
|
|
2018-09-21 05:28:11 +03:00
|
|
|
// Finally, we'll update the representation on disk, and update our
|
|
|
|
// cached in-memory version as well.
|
2017-08-05 04:32:25 +03:00
|
|
|
if err := chanGraph.SetSourceNode(selfNode); err != nil {
|
2017-03-27 20:25:44 +03:00
|
|
|
return nil, fmt.Errorf("can't set self node: %v", err)
|
2016-10-05 23:47:02 +03:00
|
|
|
}
|
2018-09-21 05:28:11 +03:00
|
|
|
s.currentNodeAnn = nodeAnn
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2019-05-16 16:27:28 +03:00
|
|
|
// The router will get access to the payment ID sequencer, such that it
|
|
|
|
// can generate unique payment IDs.
|
|
|
|
sequencer, err := htlcswitch.NewPersistentSequencer(chanDB)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-05-23 21:05:30 +03:00
|
|
|
queryBandwidth := func(edge *channeldb.ChannelEdgeInfo) lnwire.MilliSatoshi {
|
|
|
|
cid := lnwire.NewChanIDFromOutPoint(&edge.ChannelPoint)
|
|
|
|
link, err := s.htlcSwitch.GetLink(cid)
|
|
|
|
if err != nil {
|
|
|
|
// If the link isn't online, then we'll report
|
|
|
|
// that it has zero bandwidth to the router.
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the link is found within the switch, but it isn't
|
|
|
|
// yet eligible to forward any HTLCs, then we'll treat
|
|
|
|
// it as if it isn't online in the first place.
|
|
|
|
if !link.EligibleToForward() {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll return the current best estimate
|
|
|
|
// for the available bandwidth for the link.
|
|
|
|
return link.Bandwidth()
|
|
|
|
}
|
|
|
|
|
2019-05-22 12:56:04 +03:00
|
|
|
// Instantiate mission control with config from the sub server.
|
|
|
|
//
|
|
|
|
// TODO(joostjager): When we are further in the process of moving to sub
|
|
|
|
// servers, the mission control instance itself can be moved there too.
|
2019-06-18 19:30:56 +03:00
|
|
|
routingConfig := routerrpc.GetRoutingConfig(cfg.SubRPCServers.RouterRPC)
|
|
|
|
|
2019-06-26 14:00:35 +03:00
|
|
|
s.missionControl, err = routing.NewMissionControl(
|
2020-01-10 05:47:38 +03:00
|
|
|
chanDB,
|
2019-06-18 19:30:56 +03:00
|
|
|
&routing.MissionControlConfig{
|
2020-03-23 21:59:16 +03:00
|
|
|
AprioriHopProbability: routingConfig.AprioriHopProbability,
|
|
|
|
PenaltyHalfLife: routingConfig.PenaltyHalfLife,
|
|
|
|
MaxMcHistory: routingConfig.MaxMcHistory,
|
|
|
|
AprioriWeight: routingConfig.AprioriWeight,
|
|
|
|
SelfNode: selfNode.PubKeyBytes,
|
|
|
|
MinFailureRelaxInterval: routing.DefaultMinFailureRelaxInterval,
|
2019-06-18 19:30:56 +03:00
|
|
|
},
|
2019-05-23 21:05:30 +03:00
|
|
|
)
|
2019-06-26 14:00:35 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("can't create mission control: %v", err)
|
|
|
|
}
|
2019-05-23 21:05:30 +03:00
|
|
|
|
2019-06-18 19:30:56 +03:00
|
|
|
srvrLog.Debugf("Instantiating payment session source with config: "+
|
|
|
|
"PaymentAttemptPenalty=%v, MinRouteProbability=%v",
|
2019-07-14 00:26:26 +03:00
|
|
|
int64(routingConfig.AttemptCost),
|
2019-06-18 19:30:56 +03:00
|
|
|
routingConfig.MinRouteProbability)
|
|
|
|
|
2019-06-20 13:03:45 +03:00
|
|
|
pathFindingConfig := routing.PathFindingConfig{
|
2019-07-14 00:26:26 +03:00
|
|
|
PaymentAttemptPenalty: lnwire.NewMSatFromSatoshis(
|
|
|
|
routingConfig.AttemptCost,
|
|
|
|
),
|
|
|
|
MinProbability: routingConfig.MinRouteProbability,
|
2019-06-20 13:03:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
paymentSessionSource := &routing.SessionSource{
|
|
|
|
Graph: chanGraph,
|
|
|
|
MissionControl: s.missionControl,
|
|
|
|
QueryBandwidth: queryBandwidth,
|
|
|
|
PathFindingConfig: pathFindingConfig,
|
2019-06-18 19:30:56 +03:00
|
|
|
}
|
|
|
|
|
2019-05-29 09:57:04 +03:00
|
|
|
paymentControl := channeldb.NewPaymentControl(chanDB)
|
|
|
|
|
2019-03-22 12:21:25 +03:00
|
|
|
s.controlTower = routing.NewControlTower(paymentControl)
|
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
s.chanRouter, err = routing.New(routing.Config{
|
2019-05-16 16:27:28 +03:00
|
|
|
Graph: chanGraph,
|
|
|
|
Chain: cc.chainIO,
|
|
|
|
ChainView: cc.chainView,
|
|
|
|
Payer: s.htlcSwitch,
|
2019-03-22 12:21:25 +03:00
|
|
|
Control: s.controlTower,
|
2019-05-10 19:00:15 +03:00
|
|
|
MissionControl: s.missionControl,
|
2019-06-18 19:30:56 +03:00
|
|
|
SessionSource: paymentSessionSource,
|
2019-03-27 23:07:13 +03:00
|
|
|
ChannelPruneExpiry: routing.DefaultChannelPruneExpiry,
|
2017-10-05 05:39:38 +03:00
|
|
|
GraphPruneInterval: time.Duration(time.Hour),
|
2019-05-23 21:05:30 +03:00
|
|
|
QueryBandwidth: queryBandwidth,
|
2018-08-30 05:05:27 +03:00
|
|
|
AssumeChannelValid: cfg.Routing.UseAssumeChannelValid(),
|
2019-05-16 16:27:28 +03:00
|
|
|
NextPaymentID: sequencer.NextID,
|
2019-06-20 13:03:45 +03:00
|
|
|
PathFindingConfig: pathFindingConfig,
|
2020-03-05 15:54:03 +03:00
|
|
|
Clock: clock.NewDefaultClock(),
|
2016-12-27 08:42:23 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
2017-03-27 20:25:44 +03:00
|
|
|
return nil, fmt.Errorf("can't create router: %v", err)
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2016-07-15 14:02:59 +03:00
|
|
|
|
2019-03-23 05:56:33 +03:00
|
|
|
chanSeries := discovery.NewChanSeries(s.chanDB.ChannelGraph())
|
2019-02-06 04:18:27 +03:00
|
|
|
gossipMessageStore, err := discovery.NewMessageStore(s.chanDB)
|
2017-03-20 00:06:10 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-02-06 04:18:34 +03:00
|
|
|
waitingProofStore, err := channeldb.NewWaitingProofStore(s.chanDB)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-02-06 04:18:27 +03:00
|
|
|
|
2019-02-06 04:18:34 +03:00
|
|
|
s.authGossiper = discovery.New(discovery.Config{
|
2019-09-10 15:37:18 +03:00
|
|
|
Router: s.chanRouter,
|
|
|
|
Notifier: s.cc.chainNotifier,
|
|
|
|
ChainHash: *activeNetParams.GenesisHash,
|
|
|
|
Broadcast: s.BroadcastMessage,
|
|
|
|
ChanSeries: chanSeries,
|
|
|
|
NotifyWhenOnline: s.NotifyWhenOnline,
|
|
|
|
NotifyWhenOffline: s.NotifyWhenOffline,
|
|
|
|
SelfNodeAnnouncement: func(refresh bool) (lnwire.NodeAnnouncement, error) {
|
|
|
|
return s.genNodeAnnouncement(refresh)
|
|
|
|
},
|
2019-07-31 03:26:03 +03:00
|
|
|
ProofMatureDelta: 0,
|
|
|
|
TrickleDelay: time.Millisecond * time.Duration(cfg.TrickleDelay),
|
2019-09-10 15:37:17 +03:00
|
|
|
RetransmitTicker: ticker.New(time.Minute * 30),
|
2019-09-10 15:37:17 +03:00
|
|
|
RebroadcastInterval: time.Hour * 24,
|
2019-07-31 03:26:03 +03:00
|
|
|
WaitingProofStore: waitingProofStore,
|
|
|
|
MessageStore: gossipMessageStore,
|
|
|
|
AnnSigner: s.nodeSigner,
|
|
|
|
RotateTicker: ticker.New(discovery.DefaultSyncerRotationInterval),
|
|
|
|
HistoricalSyncTicker: ticker.New(cfg.HistoricalSyncInterval),
|
|
|
|
NumActiveSyncers: cfg.NumGraphSyncPeers,
|
|
|
|
MinimumBatchSize: 10,
|
|
|
|
SubBatchDelay: time.Second * 5,
|
|
|
|
IgnoreHistoricalFilters: cfg.IgnoreHistoricalGossipFilters,
|
2017-08-22 09:54:10 +03:00
|
|
|
},
|
2020-04-28 11:06:21 +03:00
|
|
|
s.identityECDH.PubKey(),
|
2017-08-22 09:54:10 +03:00
|
|
|
)
|
2017-03-20 00:06:10 +03:00
|
|
|
|
2019-09-19 12:02:46 +03:00
|
|
|
s.localChanMgr = &localchans.Manager{
|
|
|
|
ForAllOutgoingChannels: s.chanRouter.ForAllOutgoingChannels,
|
|
|
|
PropagateChanPolicyUpdate: s.authGossiper.PropagateChanPolicyUpdate,
|
|
|
|
UpdateForwardingPolicies: s.htlcSwitch.UpdateForwardingPolicies,
|
2019-08-20 03:53:21 +03:00
|
|
|
FetchChannel: s.chanDB.FetchChannel,
|
2019-09-19 12:02:46 +03:00
|
|
|
}
|
|
|
|
|
2018-03-15 03:07:01 +03:00
|
|
|
utxnStore, err := newNurseryStore(activeNetParams.GenesisHash, chanDB)
|
2017-10-02 06:39:40 +03:00
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to create nursery store: %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-10-23 13:08:03 +03:00
|
|
|
srvrLog.Tracef("Sweeper batch window duration: %v",
|
|
|
|
sweep.DefaultBatchWindowDuration)
|
|
|
|
|
|
|
|
sweeperStore, err := sweep.NewSweeperStore(
|
2020-01-10 05:47:38 +03:00
|
|
|
chanDB, activeNetParams.GenesisHash,
|
2018-10-23 13:08:03 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to create sweeper store: %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.sweeper = sweep.New(&sweep.UtxoSweeperConfig{
|
2019-12-10 17:32:57 +03:00
|
|
|
FeeEstimator: cc.feeEstimator,
|
|
|
|
GenSweepScript: newSweepPkScriptGen(cc.wallet),
|
|
|
|
Signer: cc.wallet.Cfg.Signer,
|
|
|
|
Wallet: cc.wallet,
|
2018-10-23 13:08:03 +03:00
|
|
|
NewBatchTimer: func() <-chan time.Time {
|
|
|
|
return time.NewTimer(sweep.DefaultBatchWindowDuration).C
|
|
|
|
},
|
|
|
|
Notifier: cc.chainNotifier,
|
|
|
|
Store: sweeperStore,
|
|
|
|
MaxInputsPerTx: sweep.DefaultMaxInputsPerTx,
|
|
|
|
MaxSweepAttempts: sweep.DefaultMaxSweepAttempts,
|
|
|
|
NextAttemptDeltaFunc: sweep.DefaultNextAttemptDeltaFunc,
|
2019-05-02 02:06:19 +03:00
|
|
|
MaxFeeRate: sweep.DefaultMaxFeeRate,
|
|
|
|
FeeRateBucketSize: sweep.DefaultFeeRateBucketSize,
|
2018-09-26 08:03:16 +03:00
|
|
|
})
|
|
|
|
|
2017-10-02 06:39:40 +03:00
|
|
|
s.utxoNursery = newUtxoNursery(&NurseryConfig{
|
2018-09-12 23:23:06 +03:00
|
|
|
ChainIO: cc.chainIO,
|
|
|
|
ConfDepth: 1,
|
|
|
|
FetchClosedChannels: chanDB.FetchClosedChannels,
|
|
|
|
FetchClosedChannel: chanDB.FetchClosedChannel,
|
2018-09-26 08:03:16 +03:00
|
|
|
Notifier: cc.chainNotifier,
|
|
|
|
PublishTransaction: cc.wallet.PublishTransaction,
|
|
|
|
Store: utxnStore,
|
2018-12-19 14:49:01 +03:00
|
|
|
SweepInput: s.sweeper.SweepInput,
|
2017-10-02 06:39:40 +03:00
|
|
|
})
|
|
|
|
|
2017-09-01 13:12:02 +03:00
|
|
|
// Construct a closure that wraps the htlcswitch's CloseLink method.
|
|
|
|
closeLink := func(chanPoint *wire.OutPoint,
|
|
|
|
closureType htlcswitch.ChannelCloseType) {
|
|
|
|
// TODO(conner): Properly respect the update and error channels
|
|
|
|
// returned by CloseLink.
|
2019-12-09 16:44:00 +03:00
|
|
|
|
|
|
|
// Instruct the switch to close the channel. Provide no close out
|
|
|
|
// delivery script or target fee per kw because user input is not
|
|
|
|
// available when the remote peer closes the channel.
|
|
|
|
s.htlcSwitch.CloseLink(chanPoint, closureType, 0, nil)
|
2017-09-01 13:12:02 +03:00
|
|
|
}
|
|
|
|
|
2018-04-18 15:01:18 +03:00
|
|
|
// We will use the following channel to reliably hand off contract
|
|
|
|
// breach events from the ChannelArbitrator to the breachArbiter,
|
|
|
|
contractBreaches := make(chan *ContractBreachEvent, 1)
|
|
|
|
|
2018-01-17 07:25:34 +03:00
|
|
|
s.chainArb = contractcourt.NewChainArbitrator(contractcourt.ChainArbitratorConfig{
|
2019-04-03 13:18:19 +03:00
|
|
|
ChainHash: *activeNetParams.GenesisHash,
|
2020-05-13 16:47:45 +03:00
|
|
|
IncomingBroadcastDelta: lncfg.DefaultIncomingBroadcastDelta,
|
|
|
|
OutgoingBroadcastDelta: lncfg.DefaultOutgoingBroadcastDelta,
|
2019-06-14 03:30:22 +03:00
|
|
|
NewSweepAddr: newSweepPkScriptGen(cc.wallet),
|
|
|
|
PublishTx: cc.wallet.PublishTransaction,
|
2018-01-17 07:25:34 +03:00
|
|
|
DeliverResolutionMsg: func(msgs ...contractcourt.ResolutionMsg) error {
|
|
|
|
for _, msg := range msgs {
|
|
|
|
err := s.htlcSwitch.ProcessContractResolution(msg)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
IncubateOutputs: func(chanPoint wire.OutPoint,
|
|
|
|
outHtlcRes *lnwallet.OutgoingHtlcResolution,
|
2018-09-05 16:43:01 +03:00
|
|
|
inHtlcRes *lnwallet.IncomingHtlcResolution,
|
|
|
|
broadcastHeight uint32) error {
|
2018-01-17 07:25:34 +03:00
|
|
|
|
|
|
|
var (
|
|
|
|
inRes []lnwallet.IncomingHtlcResolution
|
|
|
|
outRes []lnwallet.OutgoingHtlcResolution
|
|
|
|
)
|
|
|
|
if inHtlcRes != nil {
|
|
|
|
inRes = append(inRes, *inHtlcRes)
|
|
|
|
}
|
|
|
|
if outHtlcRes != nil {
|
|
|
|
outRes = append(outRes, *outHtlcRes)
|
|
|
|
}
|
|
|
|
|
|
|
|
return s.utxoNursery.IncubateOutputs(
|
2019-10-30 13:44:39 +03:00
|
|
|
chanPoint, outRes, inRes,
|
2018-09-05 16:49:35 +03:00
|
|
|
broadcastHeight,
|
2018-01-17 07:25:34 +03:00
|
|
|
)
|
|
|
|
},
|
|
|
|
PreimageDB: s.witnessBeacon,
|
|
|
|
Notifier: cc.chainNotifier,
|
|
|
|
Signer: cc.wallet.Cfg.Signer,
|
|
|
|
FeeEstimator: cc.feeEstimator,
|
|
|
|
ChainIO: cc.chainIO,
|
|
|
|
MarkLinkInactive: func(chanPoint wire.OutPoint) error {
|
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(&chanPoint)
|
2018-07-13 03:39:59 +03:00
|
|
|
s.htlcSwitch.RemoveLink(chanID)
|
|
|
|
return nil
|
2018-01-17 07:25:34 +03:00
|
|
|
},
|
2018-09-28 06:58:46 +03:00
|
|
|
IsOurAddress: cc.wallet.IsOurAddress,
|
2018-04-18 15:01:18 +03:00
|
|
|
ContractBreach: func(chanPoint wire.OutPoint,
|
|
|
|
breachRet *lnwallet.BreachRetribution) error {
|
|
|
|
event := &ContractBreachEvent{
|
|
|
|
ChanPoint: chanPoint,
|
|
|
|
ProcessACK: make(chan error, 1),
|
|
|
|
BreachRetribution: breachRet,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send the contract breach event to the breachArbiter.
|
|
|
|
select {
|
|
|
|
case contractBreaches <- event:
|
|
|
|
case <-s.quit:
|
|
|
|
return ErrServerShuttingDown
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the breachArbiter to ACK the event.
|
|
|
|
select {
|
|
|
|
case err := <-event.ProcessACK:
|
|
|
|
return err
|
|
|
|
case <-s.quit:
|
|
|
|
return ErrServerShuttingDown
|
|
|
|
}
|
|
|
|
},
|
2020-02-03 13:52:22 +03:00
|
|
|
DisableChannel: s.chanStatusMgr.RequestDisable,
|
|
|
|
Sweeper: s.sweeper,
|
|
|
|
Registry: s.invoices,
|
|
|
|
NotifyClosedChannel: s.channelNotifier.NotifyClosedChannelEvent,
|
|
|
|
OnionProcessor: s.sphinx,
|
|
|
|
PaymentsExpirationGracePeriod: cfg.PaymentsExpirationGracePeriod,
|
|
|
|
IsForwardedHTLC: s.htlcSwitch.IsForwardedHTLC,
|
|
|
|
Clock: clock.NewDefaultClock(),
|
2018-01-17 07:25:34 +03:00
|
|
|
}, chanDB)
|
|
|
|
|
2017-09-01 13:12:02 +03:00
|
|
|
s.breachArbiter = newBreachArbiter(&BreachConfig{
|
2019-06-14 03:30:22 +03:00
|
|
|
CloseLink: closeLink,
|
|
|
|
DB: chanDB,
|
|
|
|
Estimator: s.cc.feeEstimator,
|
|
|
|
GenSweepScript: newSweepPkScriptGen(cc.wallet),
|
2018-01-21 07:25:54 +03:00
|
|
|
Notifier: cc.chainNotifier,
|
|
|
|
PublishTransaction: cc.wallet.PublishTransaction,
|
2018-04-18 15:01:18 +03:00
|
|
|
ContractBreaches: contractBreaches,
|
|
|
|
Signer: cc.wallet.Cfg.Signer,
|
|
|
|
Store: newRetributionStore(chanDB),
|
2017-09-01 13:12:02 +03:00
|
|
|
})
|
2017-01-13 06:40:38 +03:00
|
|
|
|
2018-08-02 02:02:47 +03:00
|
|
|
// Select the configuration and furnding parameters for Bitcoin or
|
|
|
|
// Litecoin, depending on the primary registered chain.
|
2020-05-14 14:37:32 +03:00
|
|
|
primaryChain := cfg.registeredChains.PrimaryChain()
|
2018-08-02 02:02:47 +03:00
|
|
|
chainCfg := cfg.Bitcoin
|
|
|
|
minRemoteDelay := minBtcRemoteDelay
|
|
|
|
maxRemoteDelay := maxBtcRemoteDelay
|
|
|
|
if primaryChain == litecoinChain {
|
|
|
|
chainCfg = cfg.Litecoin
|
|
|
|
minRemoteDelay = minLtcRemoteDelay
|
|
|
|
maxRemoteDelay = maxLtcRemoteDelay
|
|
|
|
}
|
|
|
|
|
|
|
|
var chanIDSeed [32]byte
|
|
|
|
if _, err := rand.Read(chanIDSeed[:]); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-08-08 05:18:23 +03:00
|
|
|
|
2018-08-02 02:02:47 +03:00
|
|
|
s.fundingMgr, err = newFundingManager(fundingConfig{
|
2020-04-28 11:06:29 +03:00
|
|
|
IDKey: nodeKeyECDH.PubKey(),
|
2018-08-02 02:02:47 +03:00
|
|
|
Wallet: cc.wallet,
|
|
|
|
PublishTransaction: cc.wallet.PublishTransaction,
|
|
|
|
Notifier: cc.chainNotifier,
|
|
|
|
FeeEstimator: cc.feeEstimator,
|
|
|
|
SignMessage: func(pubKey *btcec.PublicKey,
|
2020-04-06 03:06:38 +03:00
|
|
|
msg []byte) (input.Signature, error) {
|
2018-08-02 02:02:47 +03:00
|
|
|
|
2020-04-28 11:06:29 +03:00
|
|
|
if pubKey.IsEqual(nodeKeyECDH.PubKey()) {
|
2019-01-09 03:18:39 +03:00
|
|
|
return s.nodeSigner.SignMessage(pubKey, msg)
|
2018-08-02 02:02:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return cc.msgSigner.SignMessage(pubKey, msg)
|
|
|
|
},
|
|
|
|
CurrentNodeAnnouncement: func() (lnwire.NodeAnnouncement, error) {
|
|
|
|
return s.genNodeAnnouncement(true)
|
|
|
|
},
|
2019-04-17 23:26:11 +03:00
|
|
|
SendAnnouncement: func(msg lnwire.Message,
|
|
|
|
optionalFields ...discovery.OptionalMsgField) chan error {
|
|
|
|
|
2018-08-20 15:28:11 +03:00
|
|
|
return s.authGossiper.ProcessLocalAnnouncement(
|
2020-04-28 11:06:29 +03:00
|
|
|
msg, nodeKeyECDH.PubKey(), optionalFields...,
|
2018-08-02 02:02:47 +03:00
|
|
|
)
|
|
|
|
},
|
|
|
|
NotifyWhenOnline: s.NotifyWhenOnline,
|
|
|
|
TempChanIDSeed: chanIDSeed,
|
2018-09-26 12:12:57 +03:00
|
|
|
FindChannel: func(chanID lnwire.ChannelID) (
|
|
|
|
*channeldb.OpenChannel, error) {
|
|
|
|
|
2018-08-02 02:02:47 +03:00
|
|
|
dbChannels, err := chanDB.FetchAllChannels()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, channel := range dbChannels {
|
|
|
|
if chanID.IsChanPoint(&channel.FundingOutpoint) {
|
2018-09-26 12:12:57 +03:00
|
|
|
return channel, nil
|
2018-08-02 02:02:47 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, fmt.Errorf("unable to find channel")
|
|
|
|
},
|
|
|
|
DefaultRoutingPolicy: cc.routingPolicy,
|
2019-11-15 12:09:27 +03:00
|
|
|
DefaultMinHtlcIn: cc.minHtlcIn,
|
2018-08-02 02:02:47 +03:00
|
|
|
NumRequiredConfs: func(chanAmt btcutil.Amount,
|
|
|
|
pushAmt lnwire.MilliSatoshi) uint16 {
|
|
|
|
// For large channels we increase the number
|
|
|
|
// of confirmations we require for the
|
|
|
|
// channel to be considered open. As it is
|
|
|
|
// always the responder that gets to choose
|
|
|
|
// value, the pushAmt is value being pushed
|
|
|
|
// to us. This means we have more to lose
|
|
|
|
// in the case this gets re-orged out, and
|
|
|
|
// we will require more confirmations before
|
|
|
|
// we consider it open.
|
|
|
|
// TODO(halseth): Use Litecoin params in case
|
|
|
|
// of LTC channels.
|
|
|
|
|
|
|
|
// In case the user has explicitly specified
|
|
|
|
// a default value for the number of
|
|
|
|
// confirmations, we use it.
|
|
|
|
defaultConf := uint16(chainCfg.DefaultNumChanConfs)
|
|
|
|
if defaultConf != 0 {
|
|
|
|
return defaultConf
|
|
|
|
}
|
|
|
|
|
|
|
|
// If not we return a value scaled linearly
|
|
|
|
// between 3 and 6, depending on channel size.
|
|
|
|
// TODO(halseth): Use 1 as minimum?
|
|
|
|
minConf := uint64(3)
|
|
|
|
maxConf := uint64(6)
|
|
|
|
maxChannelSize := uint64(
|
2019-05-15 10:02:53 +03:00
|
|
|
lnwire.NewMSatFromSatoshis(MaxFundingAmount))
|
2018-08-02 02:02:47 +03:00
|
|
|
stake := lnwire.NewMSatFromSatoshis(chanAmt) + pushAmt
|
|
|
|
conf := maxConf * uint64(stake) / maxChannelSize
|
|
|
|
if conf < minConf {
|
|
|
|
conf = minConf
|
|
|
|
}
|
|
|
|
if conf > maxConf {
|
|
|
|
conf = maxConf
|
|
|
|
}
|
|
|
|
return uint16(conf)
|
|
|
|
},
|
|
|
|
RequiredRemoteDelay: func(chanAmt btcutil.Amount) uint16 {
|
|
|
|
// We scale the remote CSV delay (the time the
|
|
|
|
// remote have to claim funds in case of a unilateral
|
|
|
|
// close) linearly from minRemoteDelay blocks
|
|
|
|
// for small channels, to maxRemoteDelay blocks
|
2019-05-15 10:02:53 +03:00
|
|
|
// for channels of size MaxFundingAmount.
|
2018-08-02 02:02:47 +03:00
|
|
|
// TODO(halseth): Litecoin parameter for LTC.
|
|
|
|
|
|
|
|
// In case the user has explicitly specified
|
|
|
|
// a default value for the remote delay, we
|
|
|
|
// use it.
|
|
|
|
defaultDelay := uint16(chainCfg.DefaultRemoteDelay)
|
|
|
|
if defaultDelay > 0 {
|
|
|
|
return defaultDelay
|
|
|
|
}
|
|
|
|
|
|
|
|
// If not we scale according to channel size.
|
|
|
|
delay := uint16(btcutil.Amount(maxRemoteDelay) *
|
2019-05-15 10:02:53 +03:00
|
|
|
chanAmt / MaxFundingAmount)
|
2018-08-02 02:02:47 +03:00
|
|
|
if delay < minRemoteDelay {
|
|
|
|
delay = minRemoteDelay
|
|
|
|
}
|
|
|
|
if delay > maxRemoteDelay {
|
|
|
|
delay = maxRemoteDelay
|
|
|
|
}
|
|
|
|
return delay
|
|
|
|
},
|
|
|
|
WatchNewChannel: func(channel *channeldb.OpenChannel,
|
|
|
|
peerKey *btcec.PublicKey) error {
|
|
|
|
|
|
|
|
// First, we'll mark this new peer as a persistent peer
|
2019-02-15 06:17:52 +03:00
|
|
|
// for re-connection purposes. If the peer is not yet
|
|
|
|
// tracked or the user hasn't requested it to be perm,
|
|
|
|
// we'll set false to prevent the server from continuing
|
|
|
|
// to connect to this peer even if the number of
|
|
|
|
// channels with this peer is zero.
|
2018-08-02 02:02:47 +03:00
|
|
|
s.mu.Lock()
|
|
|
|
pubStr := string(peerKey.SerializeCompressed())
|
2019-02-15 06:17:52 +03:00
|
|
|
if _, ok := s.persistentPeers[pubStr]; !ok {
|
|
|
|
s.persistentPeers[pubStr] = false
|
|
|
|
}
|
2018-08-02 02:02:47 +03:00
|
|
|
s.mu.Unlock()
|
|
|
|
|
|
|
|
// With that taken care of, we'll send this channel to
|
|
|
|
// the chain arb so it can react to on-chain events.
|
|
|
|
return s.chainArb.WatchNewChannel(channel)
|
|
|
|
},
|
|
|
|
ReportShortChanID: func(chanPoint wire.OutPoint) error {
|
|
|
|
cid := lnwire.NewChanIDFromOutPoint(&chanPoint)
|
|
|
|
return s.htlcSwitch.UpdateShortChanID(cid)
|
|
|
|
},
|
|
|
|
RequiredRemoteChanReserve: func(chanAmt,
|
|
|
|
dustLimit btcutil.Amount) btcutil.Amount {
|
|
|
|
|
|
|
|
// By default, we'll require the remote peer to maintain
|
|
|
|
// at least 1% of the total channel capacity at all
|
|
|
|
// times. If this value ends up dipping below the dust
|
|
|
|
// limit, then we'll use the dust limit itself as the
|
|
|
|
// reserve as required by BOLT #2.
|
|
|
|
reserve := chanAmt / 100
|
|
|
|
if reserve < dustLimit {
|
|
|
|
reserve = dustLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
return reserve
|
|
|
|
},
|
|
|
|
RequiredRemoteMaxValue: func(chanAmt btcutil.Amount) lnwire.MilliSatoshi {
|
|
|
|
// By default, we'll allow the remote peer to fully
|
|
|
|
// utilize the full bandwidth of the channel, minus our
|
|
|
|
// required reserve.
|
|
|
|
reserve := lnwire.NewMSatFromSatoshis(chanAmt / 100)
|
|
|
|
return lnwire.NewMSatFromSatoshis(chanAmt) - reserve
|
|
|
|
},
|
|
|
|
RequiredRemoteMaxHTLCs: func(chanAmt btcutil.Amount) uint16 {
|
|
|
|
// By default, we'll permit them to utilize the full
|
|
|
|
// channel bandwidth.
|
2019-01-16 17:47:43 +03:00
|
|
|
return uint16(input.MaxHTLCNumber / 2)
|
2018-08-02 02:02:47 +03:00
|
|
|
},
|
2019-09-29 13:13:01 +03:00
|
|
|
ZombieSweeperInterval: 1 * time.Minute,
|
|
|
|
ReservationTimeout: 10 * time.Minute,
|
|
|
|
MinChanSize: btcutil.Amount(cfg.MinChanSize),
|
|
|
|
MaxPendingChannels: cfg.MaxPendingChannels,
|
|
|
|
RejectPush: cfg.RejectPush,
|
|
|
|
NotifyOpenChannelEvent: s.channelNotifier.NotifyOpenChannelEvent,
|
|
|
|
OpenChannelPredicate: chanPredicate,
|
|
|
|
NotifyPendingOpenChannelEvent: s.channelNotifier.NotifyPendingOpenChannelEvent,
|
2020-05-14 14:53:33 +03:00
|
|
|
EnableUpfrontShutdown: cfg.EnableUpfrontShutdown,
|
|
|
|
RegisteredChains: cfg.registeredChains,
|
2018-08-02 02:02:47 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-02-09 06:45:39 +03:00
|
|
|
// Next, we'll assemble the sub-system that will maintain an on-disk
|
|
|
|
// static backup of the latest channel state.
|
|
|
|
chanNotifier := &channelNotifier{
|
|
|
|
chanNotifier: s.channelNotifier,
|
|
|
|
addrs: s.chanDB,
|
|
|
|
}
|
|
|
|
backupFile := chanbackup.NewMultiFile(cfg.BackupFilePath)
|
|
|
|
startingChans, err := chanbackup.FetchStaticChanBackups(s.chanDB)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.chanSubSwapper, err = chanbackup.NewSubSwapper(
|
|
|
|
startingChans, chanNotifier, s.cc.keyRing, backupFile,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-07-29 17:59:48 +03:00
|
|
|
// Assemble a peer notifier which will provide clients with subscriptions
|
|
|
|
// to peer online and offline events.
|
|
|
|
s.peerNotifier = peernotifier.New()
|
|
|
|
|
2019-08-08 20:39:38 +03:00
|
|
|
// Create a channel event store which monitors all open channels.
|
|
|
|
s.chanEventStore = chanfitness.NewChannelEventStore(&chanfitness.Config{
|
|
|
|
SubscribeChannelEvents: s.channelNotifier.SubscribeChannelEvents,
|
|
|
|
SubscribePeerEvents: s.peerNotifier.SubscribePeerEvents,
|
|
|
|
GetOpenChannels: s.chanDB.FetchAllOpenChannels,
|
|
|
|
})
|
|
|
|
|
2019-07-04 05:54:28 +03:00
|
|
|
if cfg.WtClient.Active {
|
2019-06-14 03:29:47 +03:00
|
|
|
policy := wtpolicy.DefaultPolicy()
|
|
|
|
|
|
|
|
if cfg.WtClient.SweepFeeRate != 0 {
|
|
|
|
// We expose the sweep fee rate in sat/byte, but the
|
|
|
|
// tower protocol operations on sat/kw.
|
2019-10-31 05:43:05 +03:00
|
|
|
sweepRateSatPerByte := chainfee.SatPerKVByte(
|
2019-06-14 03:29:47 +03:00
|
|
|
1000 * cfg.WtClient.SweepFeeRate,
|
|
|
|
)
|
|
|
|
policy.SweepFeeRate = sweepRateSatPerByte.FeePerKWeight()
|
|
|
|
}
|
|
|
|
|
2019-06-14 03:38:50 +03:00
|
|
|
if err := policy.Validate(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-06-14 03:29:47 +03:00
|
|
|
s.towerClient, err = wtclient.New(&wtclient.Config{
|
2019-06-14 03:30:22 +03:00
|
|
|
Signer: cc.wallet.Cfg.Signer,
|
|
|
|
NewAddress: newSweepPkScriptGen(cc.wallet),
|
2019-06-14 03:29:47 +03:00
|
|
|
SecretKeyRing: s.cc.keyRing,
|
|
|
|
Dial: cfg.net.Dial,
|
|
|
|
AuthDial: wtclient.AuthDial,
|
|
|
|
DB: towerClientDB,
|
2019-07-11 04:21:14 +03:00
|
|
|
Policy: policy,
|
2019-06-14 03:29:47 +03:00
|
|
|
ChainHash: *activeNetParams.GenesisHash,
|
|
|
|
MinBackoff: 10 * time.Second,
|
|
|
|
MaxBackoff: 5 * time.Minute,
|
|
|
|
ForceQuitDelay: wtclient.DefaultForceQuitDelay,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
// Create the connection manager which will be responsible for
|
|
|
|
// maintaining persistent outbound connections and also accepting new
|
|
|
|
// incoming connections
|
|
|
|
cmgr, err := connmgr.New(&connmgr.Config{
|
|
|
|
Listeners: listeners,
|
2017-08-09 02:51:41 +03:00
|
|
|
OnAccept: s.InboundPeerConnected,
|
2016-12-15 05:11:31 +03:00
|
|
|
RetryDuration: time.Second * 5,
|
|
|
|
TargetOutbound: 100,
|
2020-04-28 11:06:21 +03:00
|
|
|
Dial: noiseDial(s.identityECDH, s.cfg.net),
|
2017-08-09 02:51:41 +03:00
|
|
|
OnConnection: s.OutboundPeerConnected,
|
2016-12-15 05:11:31 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.connMgr = cmgr
|
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
2017-08-03 06:55:51 +03:00
|
|
|
// Started returns true if the server has been started, and false otherwise.
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2017-08-03 06:55:51 +03:00
|
|
|
func (s *server) Started() bool {
|
2019-03-12 02:12:15 +03:00
|
|
|
return atomic.LoadInt32(&s.active) != 0
|
2017-08-03 06:55:51 +03:00
|
|
|
}
|
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
// Start starts the main daemon server, all requested listeners, and any helper
|
|
|
|
// goroutines.
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2017-04-24 05:21:32 +03:00
|
|
|
func (s *server) Start() error {
|
2019-03-12 02:12:15 +03:00
|
|
|
var startErr error
|
|
|
|
s.start.Do(func() {
|
|
|
|
if s.torController != nil {
|
2020-03-17 17:58:36 +03:00
|
|
|
if err := s.createNewHiddenService(); err != nil {
|
2019-03-12 02:12:15 +03:00
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
2018-04-28 00:02:05 +03:00
|
|
|
}
|
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
if s.natTraversal != nil {
|
|
|
|
s.wg.Add(1)
|
|
|
|
go s.watchExternalIP()
|
|
|
|
}
|
2018-04-20 08:00:49 +03:00
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
// Start the notification server. This is used so channel
|
|
|
|
// management goroutines can be notified when a funding
|
|
|
|
// transaction reaches a sufficient number of confirmations, or
|
|
|
|
// when the input for the funding transaction is spent in an
|
|
|
|
// attempt at an uncooperative close by the counterparty.
|
|
|
|
if err := s.sigPool.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.writePool.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.readPool.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.cc.chainNotifier.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.channelNotifier.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
2019-07-29 17:59:48 +03:00
|
|
|
if err := s.peerNotifier.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
2020-02-19 18:34:47 +03:00
|
|
|
if err := s.htlcNotifier.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
2019-03-12 02:12:15 +03:00
|
|
|
if err := s.sphinx.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
2019-06-14 03:29:47 +03:00
|
|
|
if s.towerClient != nil {
|
|
|
|
if err := s.towerClient.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2019-03-12 02:12:15 +03:00
|
|
|
if err := s.htlcSwitch.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.sweeper.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.utxoNursery.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.chainArb.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.breachArbiter.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.authGossiper.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.chanRouter.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.fundingMgr.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.invoices.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.chanStatusMgr.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
2018-04-25 07:08:46 +03:00
|
|
|
|
2019-08-08 20:39:38 +03:00
|
|
|
if err := s.chanEventStore.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
// Before we start the connMgr, we'll check to see if we have
|
|
|
|
// any backups to recover. We do this now as we want to ensure
|
|
|
|
// that have all the information we need to handle channel
|
|
|
|
// recovery _before_ we even accept connections from any peers.
|
|
|
|
chanRestorer := &chanDBRestorer{
|
|
|
|
db: s.chanDB,
|
|
|
|
secretKeys: s.cc.keyRing,
|
|
|
|
chainArb: s.chainArb,
|
2018-12-10 07:08:32 +03:00
|
|
|
}
|
2019-03-12 02:12:15 +03:00
|
|
|
if len(s.chansToRestore.PackedSingleChanBackups) != 0 {
|
|
|
|
err := chanbackup.UnpackAndRecoverSingles(
|
|
|
|
s.chansToRestore.PackedSingleChanBackups,
|
|
|
|
s.cc.keyRing, chanRestorer, s,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
startErr = fmt.Errorf("unable to unpack single "+
|
|
|
|
"backups: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(s.chansToRestore.PackedMultiChanBackup) != 0 {
|
|
|
|
err := chanbackup.UnpackAndRecoverMulti(
|
|
|
|
s.chansToRestore.PackedMultiChanBackup,
|
|
|
|
s.cc.keyRing, chanRestorer, s,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
startErr = fmt.Errorf("unable to unpack chan "+
|
|
|
|
"backup: %v", err)
|
|
|
|
return
|
|
|
|
}
|
2018-12-10 07:08:32 +03:00
|
|
|
}
|
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
if err := s.chanSubSwapper.Start(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
2019-02-09 06:45:39 +03:00
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
s.connMgr.Start()
|
2018-12-10 07:08:32 +03:00
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
// With all the relevant sub-systems started, we'll now attempt
|
|
|
|
// to establish persistent connections to our direct channel
|
|
|
|
// collaborators within the network. Before doing so however,
|
|
|
|
// we'll prune our set of link nodes found within the database
|
|
|
|
// to ensure we don't reconnect to any nodes we no longer have
|
|
|
|
// open channels with.
|
|
|
|
if err := s.chanDB.PruneLinkNodes(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err := s.establishPersistentConnections(); err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
2017-04-24 05:21:32 +03:00
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
// If network bootstrapping hasn't been disabled, then we'll
|
|
|
|
// configure the set of active bootstrappers, and launch a
|
|
|
|
// dedicated goroutine to maintain a set of persistent
|
|
|
|
// connections.
|
2020-05-14 15:18:11 +03:00
|
|
|
if !s.cfg.NoNetBootstrap &&
|
|
|
|
!(s.cfg.Bitcoin.SimNet || s.cfg.Litecoin.SimNet) &&
|
|
|
|
!(s.cfg.Bitcoin.RegTest || s.cfg.Litecoin.RegTest) {
|
2018-03-15 03:07:01 +03:00
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
bootstrappers, err := initNetworkBootstrappers(s)
|
|
|
|
if err != nil {
|
|
|
|
startErr = err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s.wg.Add(1)
|
|
|
|
go s.peerBootstrapper(defaultMinPeers, bootstrappers)
|
|
|
|
} else {
|
|
|
|
srvrLog.Infof("Auto peer bootstrapping is disabled")
|
2017-09-04 02:58:14 +03:00
|
|
|
}
|
2018-03-15 03:07:01 +03:00
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
// Set the active flag now that we've completed the full
|
|
|
|
// startup.
|
|
|
|
atomic.StoreInt32(&s.active, 1)
|
|
|
|
})
|
2017-09-04 02:58:14 +03:00
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
return startErr
|
2017-04-24 05:21:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stop gracefully shutsdown the main daemon server. This function will signal
|
|
|
|
// any active goroutines, or helper objects to exit, then blocks until they've
|
|
|
|
// all successfully exited. Additionally, any/all listeners are closed.
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2017-04-24 05:21:32 +03:00
|
|
|
func (s *server) Stop() error {
|
2019-03-12 02:12:15 +03:00
|
|
|
s.stop.Do(func() {
|
2019-06-19 05:38:20 +03:00
|
|
|
atomic.StoreInt32(&s.stopping, 1)
|
2017-04-24 05:21:32 +03:00
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
close(s.quit)
|
2017-08-11 07:18:57 +03:00
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
// Shutdown the wallet, funding manager, and the rpc server.
|
|
|
|
s.chanStatusMgr.Stop()
|
|
|
|
s.cc.chainNotifier.Stop()
|
|
|
|
s.chanRouter.Stop()
|
|
|
|
s.htlcSwitch.Stop()
|
|
|
|
s.sphinx.Stop()
|
|
|
|
s.utxoNursery.Stop()
|
|
|
|
s.breachArbiter.Stop()
|
|
|
|
s.authGossiper.Stop()
|
|
|
|
s.chainArb.Stop()
|
|
|
|
s.sweeper.Stop()
|
|
|
|
s.channelNotifier.Stop()
|
2019-07-29 17:59:48 +03:00
|
|
|
s.peerNotifier.Stop()
|
2020-02-19 18:34:47 +03:00
|
|
|
s.htlcNotifier.Stop()
|
2019-03-12 02:12:15 +03:00
|
|
|
s.cc.wallet.Shutdown()
|
|
|
|
s.cc.chainView.Stop()
|
|
|
|
s.connMgr.Stop()
|
|
|
|
s.cc.feeEstimator.Stop()
|
|
|
|
s.invoices.Stop()
|
|
|
|
s.fundingMgr.Stop()
|
2019-04-02 10:58:13 +03:00
|
|
|
s.chanSubSwapper.Stop()
|
2019-08-08 20:39:38 +03:00
|
|
|
s.chanEventStore.Stop()
|
2019-03-12 02:12:15 +03:00
|
|
|
|
|
|
|
// Disconnect from each active peers to ensure that
|
|
|
|
// peerTerminationWatchers signal completion to each peer.
|
|
|
|
for _, peer := range s.Peers() {
|
|
|
|
s.DisconnectPeer(peer.addr.IdentityKey)
|
|
|
|
}
|
2017-04-24 05:21:32 +03:00
|
|
|
|
2019-06-14 03:29:47 +03:00
|
|
|
// Now that all connections have been torn down, stop the tower
|
|
|
|
// client which will reliably flush all queued states to the
|
|
|
|
// tower. If this is halted for any reason, the force quit timer
|
|
|
|
// will kick in and abort to allow this method to return.
|
|
|
|
if s.towerClient != nil {
|
|
|
|
s.towerClient.Stop()
|
|
|
|
}
|
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
// Wait for all lingering goroutines to quit.
|
|
|
|
s.wg.Wait()
|
2017-04-24 05:21:32 +03:00
|
|
|
|
2019-03-12 02:12:15 +03:00
|
|
|
s.sigPool.Stop()
|
|
|
|
s.writePool.Stop()
|
|
|
|
s.readPool.Stop()
|
|
|
|
})
|
2019-02-22 07:10:51 +03:00
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// Stopped returns true if the server has been instructed to shutdown.
|
|
|
|
// NOTE: This function is safe for concurrent access.
|
|
|
|
func (s *server) Stopped() bool {
|
2019-03-12 02:12:15 +03:00
|
|
|
return atomic.LoadInt32(&s.stopping) != 0
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
|
|
|
|
2018-09-06 11:48:46 +03:00
|
|
|
// configurePortForwarding attempts to set up port forwarding for the different
|
2018-04-20 08:00:49 +03:00
|
|
|
// ports that the server will be listening on.
|
|
|
|
//
|
|
|
|
// NOTE: This should only be used when using some kind of NAT traversal to
|
|
|
|
// automatically set up forwarding rules.
|
|
|
|
func (s *server) configurePortForwarding(ports ...uint16) ([]string, error) {
|
|
|
|
ip, err := s.natTraversal.ExternalIP()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.lastDetectedIP = ip
|
|
|
|
|
|
|
|
externalIPs := make([]string, 0, len(ports))
|
|
|
|
for _, port := range ports {
|
|
|
|
if err := s.natTraversal.AddPortMapping(port); err != nil {
|
|
|
|
srvrLog.Debugf("Unable to forward port %d: %v", port, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
hostIP := fmt.Sprintf("%v:%d", ip, port)
|
|
|
|
externalIPs = append(externalIPs, hostIP)
|
|
|
|
}
|
|
|
|
|
|
|
|
return externalIPs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// removePortForwarding attempts to clear the forwarding rules for the different
|
|
|
|
// ports the server is currently listening on.
|
|
|
|
//
|
|
|
|
// NOTE: This should only be used when using some kind of NAT traversal to
|
|
|
|
// automatically set up forwarding rules.
|
|
|
|
func (s *server) removePortForwarding() {
|
|
|
|
forwardedPorts := s.natTraversal.ForwardedPorts()
|
|
|
|
for _, port := range forwardedPorts {
|
|
|
|
if err := s.natTraversal.DeletePortMapping(port); err != nil {
|
|
|
|
srvrLog.Errorf("Unable to remove forwarding rules for "+
|
|
|
|
"port %d: %v", port, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-06 11:48:46 +03:00
|
|
|
// watchExternalIP continuously checks for an updated external IP address every
|
2018-04-20 08:00:49 +03:00
|
|
|
// 15 minutes. Once a new IP address has been detected, it will automatically
|
|
|
|
// handle port forwarding rules and send updated node announcements to the
|
|
|
|
// currently connected peers.
|
|
|
|
//
|
|
|
|
// NOTE: This MUST be run as a goroutine.
|
|
|
|
func (s *server) watchExternalIP() {
|
|
|
|
defer s.wg.Done()
|
|
|
|
|
|
|
|
// Before exiting, we'll make sure to remove the forwarding rules set
|
|
|
|
// up by the server.
|
|
|
|
defer s.removePortForwarding()
|
|
|
|
|
|
|
|
// Keep track of the external IPs set by the user to avoid replacing
|
|
|
|
// them when detecting a new IP.
|
|
|
|
ipsSetByUser := make(map[string]struct{})
|
2020-05-14 15:18:11 +03:00
|
|
|
for _, ip := range s.cfg.ExternalIPs {
|
2018-05-23 16:38:19 +03:00
|
|
|
ipsSetByUser[ip.String()] = struct{}{}
|
2018-04-20 08:00:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
forwardedPorts := s.natTraversal.ForwardedPorts()
|
|
|
|
|
|
|
|
ticker := time.NewTicker(15 * time.Minute)
|
|
|
|
defer ticker.Stop()
|
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ticker.C:
|
|
|
|
// We'll start off by making sure a new IP address has
|
|
|
|
// been detected.
|
|
|
|
ip, err := s.natTraversal.ExternalIP()
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Debugf("Unable to retrieve the "+
|
|
|
|
"external IP address: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-04-13 16:49:31 +03:00
|
|
|
// Periodically renew the NAT port forwarding.
|
|
|
|
for _, port := range forwardedPorts {
|
|
|
|
err := s.natTraversal.AddPortMapping(port)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Warnf("Unable to automatically "+
|
|
|
|
"re-create port forwarding using %s: %v",
|
|
|
|
s.natTraversal.Name(), err)
|
|
|
|
} else {
|
|
|
|
srvrLog.Debugf("Automatically re-created "+
|
|
|
|
"forwarding for port %d using %s to "+
|
|
|
|
"advertise external IP",
|
|
|
|
port, s.natTraversal.Name())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-20 08:00:49 +03:00
|
|
|
if ip.Equal(s.lastDetectedIP) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
srvrLog.Infof("Detected new external IP address %s", ip)
|
|
|
|
|
|
|
|
// Next, we'll craft the new addresses that will be
|
|
|
|
// included in the new node announcement and advertised
|
|
|
|
// to the network. Each address will consist of the new
|
|
|
|
// IP detected and one of the currently advertised
|
|
|
|
// ports.
|
|
|
|
var newAddrs []net.Addr
|
|
|
|
for _, port := range forwardedPorts {
|
|
|
|
hostIP := fmt.Sprintf("%v:%d", ip, port)
|
|
|
|
addr, err := net.ResolveTCPAddr("tcp", hostIP)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Debugf("Unable to resolve "+
|
|
|
|
"host %v: %v", addr, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
newAddrs = append(newAddrs, addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip the update if we weren't able to resolve any of
|
|
|
|
// the new addresses.
|
|
|
|
if len(newAddrs) == 0 {
|
|
|
|
srvrLog.Debug("Skipping node announcement " +
|
|
|
|
"update due to not being able to " +
|
|
|
|
"resolve any new addresses")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now, we'll need to update the addresses in our node's
|
2018-09-06 11:48:46 +03:00
|
|
|
// announcement in order to propagate the update
|
2018-04-20 08:00:49 +03:00
|
|
|
// throughout the network. We'll only include addresses
|
|
|
|
// that have a different IP from the previous one, as
|
|
|
|
// the previous IP is no longer valid.
|
|
|
|
currentNodeAnn, err := s.genNodeAnnouncement(false)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Debugf("Unable to retrieve current "+
|
|
|
|
"node announcement: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, addr := range currentNodeAnn.Addresses {
|
|
|
|
host, _, err := net.SplitHostPort(addr.String())
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Debugf("Unable to determine "+
|
|
|
|
"host from address %v: %v",
|
|
|
|
addr, err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll also make sure to include external IPs
|
|
|
|
// set manually by the user.
|
|
|
|
_, setByUser := ipsSetByUser[addr.String()]
|
|
|
|
if setByUser || host != s.lastDetectedIP.String() {
|
|
|
|
newAddrs = append(newAddrs, addr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then, we'll generate a new timestamped node
|
|
|
|
// announcement with the updated addresses and broadcast
|
|
|
|
// it to our peers.
|
|
|
|
newNodeAnn, err := s.genNodeAnnouncement(
|
2020-03-18 02:25:02 +03:00
|
|
|
true, netann.NodeAnnSetAddrs(newAddrs),
|
2018-04-20 08:00:49 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Debugf("Unable to generate new node "+
|
|
|
|
"announcement: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
err = s.BroadcastMessage(nil, &newNodeAnn)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Debugf("Unable to broadcast new node "+
|
|
|
|
"announcement to peers: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, update the last IP seen to the current one.
|
|
|
|
s.lastDetectedIP = ip
|
|
|
|
case <-s.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-04 02:58:14 +03:00
|
|
|
// initNetworkBootstrappers initializes a set of network peer bootstrappers
|
|
|
|
// based on the server, and currently active bootstrap mechanisms as defined
|
|
|
|
// within the current configuration.
|
|
|
|
func initNetworkBootstrappers(s *server) ([]discovery.NetworkPeerBootstrapper, error) {
|
2018-02-07 06:11:11 +03:00
|
|
|
srvrLog.Infof("Initializing peer network bootstrappers!")
|
2017-09-04 02:58:14 +03:00
|
|
|
|
|
|
|
var bootStrappers []discovery.NetworkPeerBootstrapper
|
|
|
|
|
|
|
|
// First, we'll create an instance of the ChannelGraphBootstrapper as
|
|
|
|
// this can be used by default if we've already partially seeded the
|
|
|
|
// network.
|
|
|
|
chanGraph := autopilot.ChannelGraphFromDatabase(s.chanDB.ChannelGraph())
|
|
|
|
graphBootstrapper, err := discovery.NewGraphBootstrapper(chanGraph)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
bootStrappers = append(bootStrappers, graphBootstrapper)
|
|
|
|
|
|
|
|
// If this isn't simnet mode, then one of our additional bootstrapping
|
|
|
|
// sources will be the set of running DNS seeds.
|
2020-05-14 15:18:11 +03:00
|
|
|
if !s.cfg.Bitcoin.SimNet || !s.cfg.Litecoin.SimNet {
|
2017-12-03 05:05:43 +03:00
|
|
|
dnsSeeds, ok := chainDNSSeeds[*activeNetParams.GenesisHash]
|
2017-09-04 02:58:14 +03:00
|
|
|
|
|
|
|
// If we have a set of DNS seeds for this chain, then we'll add
|
2018-02-07 06:11:11 +03:00
|
|
|
// it as an additional bootstrapping source.
|
2017-09-04 02:58:14 +03:00
|
|
|
if ok {
|
2018-02-07 06:11:11 +03:00
|
|
|
srvrLog.Infof("Creating DNS peer bootstrapper with "+
|
2017-09-04 02:58:14 +03:00
|
|
|
"seeds: %v", dnsSeeds)
|
|
|
|
|
2018-04-29 07:44:55 +03:00
|
|
|
dnsBootStrapper := discovery.NewDNSSeedBootstrapper(
|
2020-05-14 15:18:11 +03:00
|
|
|
dnsSeeds, s.cfg.net,
|
2017-09-04 02:58:14 +03:00
|
|
|
)
|
|
|
|
bootStrappers = append(bootStrappers, dnsBootStrapper)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return bootStrappers, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// peerBootstrapper is a goroutine which is tasked with attempting to establish
|
2018-05-08 07:45:36 +03:00
|
|
|
// and maintain a target minimum number of outbound connections. With this
|
2017-09-04 02:58:14 +03:00
|
|
|
// invariant, we ensure that our node is connected to a diverse set of peers
|
|
|
|
// and that nodes newly joining the network receive an up to date network view
|
|
|
|
// as soon as possible.
|
|
|
|
func (s *server) peerBootstrapper(numTargetPeers uint32,
|
2018-05-08 07:45:36 +03:00
|
|
|
bootstrappers []discovery.NetworkPeerBootstrapper) {
|
2017-09-04 02:58:14 +03:00
|
|
|
|
|
|
|
defer s.wg.Done()
|
|
|
|
|
2018-05-08 07:45:36 +03:00
|
|
|
// ignore is a set used to keep track of peers already retrieved from
|
|
|
|
// our bootstrappers in order to avoid duplicates.
|
|
|
|
ignore := make(map[autopilot.NodeID]struct{})
|
2017-09-04 02:58:14 +03:00
|
|
|
|
2018-05-08 07:45:36 +03:00
|
|
|
// We'll start off by aggressively attempting connections to peers in
|
|
|
|
// order to be a part of the network as soon as possible.
|
|
|
|
s.initialPeerBootstrap(ignore, numTargetPeers, bootstrappers)
|
2017-09-04 02:58:14 +03:00
|
|
|
|
2018-05-08 07:45:36 +03:00
|
|
|
// Once done, we'll attempt to maintain our target minimum number of
|
|
|
|
// peers.
|
|
|
|
//
|
|
|
|
// We'll use a 15 second backoff, and double the time every time an
|
|
|
|
// epoch fails up to a ceiling.
|
2017-09-04 02:58:14 +03:00
|
|
|
backOff := time.Second * 15
|
|
|
|
|
|
|
|
// We'll create a new ticker to wake us up every 15 seconds so we can
|
|
|
|
// see if we've reached our minimum number of peers.
|
|
|
|
sampleTicker := time.NewTicker(backOff)
|
|
|
|
defer sampleTicker.Stop()
|
|
|
|
|
|
|
|
// We'll use the number of attempts and errors to determine if we need
|
|
|
|
// to increase the time between discovery epochs.
|
2018-06-01 01:41:41 +03:00
|
|
|
var epochErrors uint32 // To be used atomically.
|
|
|
|
var epochAttempts uint32
|
2017-09-04 02:58:14 +03:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
// The ticker has just woken us up, so we'll need to check if
|
|
|
|
// we need to attempt to connect our to any more peers.
|
|
|
|
case <-sampleTicker.C:
|
2017-10-11 05:16:43 +03:00
|
|
|
// Obtain the current number of peers, so we can gauge
|
|
|
|
// if we need to sample more peers or not.
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RLock()
|
2017-10-11 05:16:43 +03:00
|
|
|
numActivePeers := uint32(len(s.peersByPub))
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RUnlock()
|
2017-10-11 05:16:43 +03:00
|
|
|
|
|
|
|
// If we have enough peers, then we can loop back
|
|
|
|
// around to the next round as we're done here.
|
|
|
|
if numActivePeers >= numTargetPeers {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-09-04 02:58:14 +03:00
|
|
|
// If all of our attempts failed during this last back
|
|
|
|
// off period, then will increase our backoff to 5
|
|
|
|
// minute ceiling to avoid an excessive number of
|
|
|
|
// queries
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): add reverse policy too?
|
2017-10-11 05:16:43 +03:00
|
|
|
|
2017-09-04 02:58:14 +03:00
|
|
|
if epochAttempts > 0 &&
|
|
|
|
atomic.LoadUint32(&epochErrors) >= epochAttempts {
|
|
|
|
|
|
|
|
sampleTicker.Stop()
|
|
|
|
|
|
|
|
backOff *= 2
|
2019-06-28 04:50:28 +03:00
|
|
|
if backOff > bootstrapBackOffCeiling {
|
|
|
|
backOff = bootstrapBackOffCeiling
|
2017-09-04 02:58:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
srvrLog.Debugf("Backing off peer bootstrapper to "+
|
|
|
|
"%v", backOff)
|
|
|
|
sampleTicker = time.NewTicker(backOff)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic.StoreUint32(&epochErrors, 0)
|
|
|
|
epochAttempts = 0
|
|
|
|
|
|
|
|
// Since we know need more peers, we'll compute the
|
|
|
|
// exact number we need to reach our threshold.
|
|
|
|
numNeeded := numTargetPeers - numActivePeers
|
|
|
|
|
2017-10-05 00:52:48 +03:00
|
|
|
srvrLog.Debugf("Attempting to obtain %v more network "+
|
2017-09-04 02:58:14 +03:00
|
|
|
"peers", numNeeded)
|
|
|
|
|
|
|
|
// With the number of peers we need calculated, we'll
|
|
|
|
// query the network bootstrappers to sample a set of
|
|
|
|
// random addrs for us.
|
2018-01-29 01:50:15 +03:00
|
|
|
s.mu.RLock()
|
2017-09-04 02:58:14 +03:00
|
|
|
ignoreList := make(map[autopilot.NodeID]struct{})
|
|
|
|
for _, peer := range s.peersByPub {
|
|
|
|
nID := autopilot.NewNodeID(peer.addr.IdentityKey)
|
|
|
|
ignoreList[nID] = struct{}{}
|
|
|
|
}
|
2018-01-29 01:50:15 +03:00
|
|
|
s.mu.RUnlock()
|
2017-09-04 02:58:14 +03:00
|
|
|
|
|
|
|
peerAddrs, err := discovery.MultiSourceBootstrap(
|
2018-05-08 07:45:36 +03:00
|
|
|
ignoreList, numNeeded*2, bootstrappers...,
|
2017-09-04 02:58:14 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("Unable to retrieve bootstrap "+
|
|
|
|
"peers: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll launch a new goroutine for each
|
|
|
|
// prospective peer candidates.
|
|
|
|
for _, addr := range peerAddrs {
|
2017-09-04 03:04:53 +03:00
|
|
|
epochAttempts++
|
2017-09-04 02:58:14 +03:00
|
|
|
|
|
|
|
go func(a *lnwire.NetAddress) {
|
|
|
|
// TODO(roasbeef): can do AS, subnet,
|
|
|
|
// country diversity, etc
|
2018-05-08 07:45:36 +03:00
|
|
|
errChan := make(chan error, 1)
|
|
|
|
s.connectToPeer(a, errChan)
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
2018-08-08 23:13:52 +03:00
|
|
|
if err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-05-08 07:45:36 +03:00
|
|
|
srvrLog.Errorf("Unable to "+
|
|
|
|
"connect to %v: %v",
|
|
|
|
a, err)
|
2017-09-04 02:58:14 +03:00
|
|
|
atomic.AddUint32(&epochErrors, 1)
|
2018-05-08 07:45:36 +03:00
|
|
|
case <-s.quit:
|
2017-09-04 02:58:14 +03:00
|
|
|
}
|
|
|
|
}(addr)
|
|
|
|
}
|
|
|
|
case <-s.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-28 04:50:28 +03:00
|
|
|
// bootstrapBackOffCeiling is the maximum amount of time we'll wait between
|
|
|
|
// failed attempts to locate a set of bootstrap peers. We'll slowly double our
|
|
|
|
// query back off each time we encounter a failure.
|
|
|
|
const bootstrapBackOffCeiling = time.Minute * 5
|
|
|
|
|
2018-05-08 07:45:36 +03:00
|
|
|
// initialPeerBootstrap attempts to continuously connect to peers on startup
|
|
|
|
// until the target number of peers has been reached. This ensures that nodes
|
|
|
|
// receive an up to date network view as soon as possible.
|
|
|
|
func (s *server) initialPeerBootstrap(ignore map[autopilot.NodeID]struct{},
|
|
|
|
numTargetPeers uint32, bootstrappers []discovery.NetworkPeerBootstrapper) {
|
|
|
|
|
2019-06-28 05:04:11 +03:00
|
|
|
// We'll start off by waiting 2 seconds between failed attempts, then
|
|
|
|
// double each time we fail until we hit the bootstrapBackOffCeiling.
|
|
|
|
var delaySignal <-chan time.Time
|
|
|
|
delayTime := time.Second * 2
|
2018-05-08 07:45:36 +03:00
|
|
|
|
2019-06-28 05:04:11 +03:00
|
|
|
// As want to be more aggressive, we'll use a lower back off celling
|
|
|
|
// then the main peer bootstrap logic.
|
|
|
|
backOffCeiling := bootstrapBackOffCeiling / 5
|
|
|
|
|
|
|
|
for attempts := 0; ; attempts++ {
|
2018-05-08 07:45:36 +03:00
|
|
|
// Check if the server has been requested to shut down in order
|
|
|
|
// to prevent blocking.
|
|
|
|
if s.Stopped() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can exit our aggressive initial peer bootstrapping stage
|
|
|
|
// if we've reached out target number of peers.
|
|
|
|
s.mu.RLock()
|
|
|
|
numActivePeers := uint32(len(s.peersByPub))
|
|
|
|
s.mu.RUnlock()
|
|
|
|
|
|
|
|
if numActivePeers >= numTargetPeers {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-06-28 05:04:11 +03:00
|
|
|
if attempts > 0 {
|
|
|
|
srvrLog.Debugf("Waiting %v before trying to locate "+
|
|
|
|
"bootstrap peers (attempt #%v)", delayTime,
|
|
|
|
attempts)
|
|
|
|
|
|
|
|
// We've completed at least one iterating and haven't
|
|
|
|
// finished, so we'll start to insert a delay period
|
|
|
|
// between each attempt.
|
|
|
|
delaySignal = time.After(delayTime)
|
|
|
|
select {
|
|
|
|
case <-delaySignal:
|
|
|
|
case <-s.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// After our delay, we'll double the time we wait up to
|
|
|
|
// the max back off period.
|
|
|
|
delayTime *= 2
|
|
|
|
if delayTime > backOffCeiling {
|
|
|
|
delayTime = backOffCeiling
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll request for the remaining number of peers
|
|
|
|
// in order to reach our target.
|
2018-05-08 07:45:36 +03:00
|
|
|
peersNeeded := numTargetPeers - numActivePeers
|
|
|
|
bootstrapAddrs, err := discovery.MultiSourceBootstrap(
|
|
|
|
ignore, peersNeeded, bootstrappers...,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("Unable to retrieve initial bootstrap "+
|
|
|
|
"peers: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Then, we'll attempt to establish a connection to the
|
|
|
|
// different peer addresses retrieved by our bootstrappers.
|
2019-06-28 04:49:48 +03:00
|
|
|
var wg sync.WaitGroup
|
2018-05-08 07:45:36 +03:00
|
|
|
for _, bootstrapAddr := range bootstrapAddrs {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(addr *lnwire.NetAddress) {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
go s.connectToPeer(addr, errChan)
|
|
|
|
|
|
|
|
// We'll only allow this connection attempt to
|
|
|
|
// take up to 3 seconds. This allows us to move
|
|
|
|
// quickly by discarding peers that are slowing
|
|
|
|
// us down.
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
2018-08-08 23:13:52 +03:00
|
|
|
if err == nil {
|
|
|
|
return
|
|
|
|
}
|
2018-05-08 07:45:36 +03:00
|
|
|
srvrLog.Errorf("Unable to connect to "+
|
|
|
|
"%v: %v", addr, err)
|
|
|
|
// TODO: tune timeout? 3 seconds might be *too*
|
|
|
|
// aggressive but works well.
|
|
|
|
case <-time.After(3 * time.Second):
|
|
|
|
srvrLog.Tracef("Skipping peer %v due "+
|
|
|
|
"to not establishing a "+
|
|
|
|
"connection within 3 seconds",
|
|
|
|
addr)
|
|
|
|
case <-s.quit:
|
|
|
|
}
|
|
|
|
}(bootstrapAddr)
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-17 17:58:36 +03:00
|
|
|
// createNewHiddenService automatically sets up a v2 or v3 onion service in
|
|
|
|
// order to listen for inbound connections over Tor.
|
|
|
|
func (s *server) createNewHiddenService() error {
|
2018-04-28 00:02:05 +03:00
|
|
|
// Determine the different ports the server is listening on. The onion
|
|
|
|
// service's virtual port will map to these ports and one will be picked
|
|
|
|
// at random when the onion service is being accessed.
|
2018-07-01 05:26:11 +03:00
|
|
|
listenPorts := make([]int, 0, len(s.listenAddrs))
|
2018-04-28 00:02:05 +03:00
|
|
|
for _, listenAddr := range s.listenAddrs {
|
2018-07-01 05:26:11 +03:00
|
|
|
port := listenAddr.(*net.TCPAddr).Port
|
|
|
|
listenPorts = append(listenPorts, port)
|
2018-04-28 00:02:05 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Once the port mapping has been set, we can go ahead and automatically
|
|
|
|
// create our onion service. The service's private key will be saved to
|
|
|
|
// disk in order to regain access to this service when restarting `lnd`.
|
2018-07-01 05:26:11 +03:00
|
|
|
onionCfg := tor.AddOnionConfig{
|
2020-03-07 03:26:51 +03:00
|
|
|
VirtualPort: defaultPeerPort,
|
|
|
|
TargetPorts: listenPorts,
|
2020-05-14 15:18:11 +03:00
|
|
|
Store: tor.NewOnionFile(s.cfg.Tor.PrivateKeyPath, 0600),
|
2018-07-01 05:26:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
2020-05-14 15:18:11 +03:00
|
|
|
case s.cfg.Tor.V2:
|
2018-07-01 05:26:11 +03:00
|
|
|
onionCfg.Type = tor.V2
|
2020-05-14 15:18:11 +03:00
|
|
|
case s.cfg.Tor.V3:
|
2018-07-01 05:26:11 +03:00
|
|
|
onionCfg.Type = tor.V3
|
|
|
|
}
|
|
|
|
|
|
|
|
addr, err := s.torController.AddOnion(onionCfg)
|
2018-04-28 00:02:05 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-07-01 05:26:11 +03:00
|
|
|
// Now that the onion service has been created, we'll add the onion
|
|
|
|
// address it can be reached at to our list of advertised addresses.
|
2018-09-21 05:27:12 +03:00
|
|
|
newNodeAnn, err := s.genNodeAnnouncement(
|
|
|
|
true, func(currentAnn *lnwire.NodeAnnouncement) {
|
|
|
|
currentAnn.Addresses = append(currentAnn.Addresses, addr)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
2020-04-14 20:56:05 +03:00
|
|
|
return fmt.Errorf("unable to generate new node "+
|
2018-09-21 05:27:12 +03:00
|
|
|
"announcement: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll update the on-disk version of our announcement so it
|
|
|
|
// will eventually propagate to nodes in the network.
|
|
|
|
selfNode := &channeldb.LightningNode{
|
|
|
|
HaveNodeAnnouncement: true,
|
|
|
|
LastUpdate: time.Unix(int64(newNodeAnn.Timestamp), 0),
|
|
|
|
Addresses: newNodeAnn.Addresses,
|
|
|
|
Alias: newNodeAnn.Alias.String(),
|
|
|
|
Features: lnwire.NewFeatureVector(
|
2019-11-08 16:32:00 +03:00
|
|
|
newNodeAnn.Features, lnwire.Features,
|
2018-09-21 05:27:12 +03:00
|
|
|
),
|
|
|
|
Color: newNodeAnn.RGBColor,
|
|
|
|
AuthSigBytes: newNodeAnn.Signature.ToSignatureBytes(),
|
|
|
|
}
|
2020-04-28 11:06:21 +03:00
|
|
|
copy(selfNode.PubKeyBytes[:], s.identityECDH.PubKey().SerializeCompressed())
|
2018-09-21 05:27:12 +03:00
|
|
|
if err := s.chanDB.ChannelGraph().SetSourceNode(selfNode); err != nil {
|
|
|
|
return fmt.Errorf("can't set self node: %v", err)
|
|
|
|
}
|
2018-04-28 00:02:05 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-05 04:32:25 +03:00
|
|
|
// genNodeAnnouncement generates and returns the current fully signed node
|
|
|
|
// announcement. If refresh is true, then the time stamp of the announcement
|
|
|
|
// will be updated in order to ensure it propagates through the network.
|
2018-04-23 19:43:50 +03:00
|
|
|
func (s *server) genNodeAnnouncement(refresh bool,
|
2020-03-18 02:25:02 +03:00
|
|
|
modifiers ...netann.NodeAnnModifier) (lnwire.NodeAnnouncement, error) {
|
2017-08-09 02:51:41 +03:00
|
|
|
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2017-08-05 04:32:25 +03:00
|
|
|
|
2018-09-21 05:25:28 +03:00
|
|
|
// If we don't need to refresh the announcement, then we can return a
|
|
|
|
// copy of our cached version.
|
2017-08-05 04:32:25 +03:00
|
|
|
if !refresh {
|
2017-08-08 15:47:22 +03:00
|
|
|
return *s.currentNodeAnn, nil
|
2017-08-05 04:32:25 +03:00
|
|
|
}
|
|
|
|
|
2020-03-18 02:25:02 +03:00
|
|
|
// Always update the timestamp when refreshing to ensure the update
|
|
|
|
// propagates.
|
|
|
|
modifiers = append(modifiers, netann.NodeAnnSetTimestamp)
|
2018-09-21 05:25:28 +03:00
|
|
|
|
2020-03-18 02:25:02 +03:00
|
|
|
// Otherwise, we'll sign a new update after applying all of the passed
|
|
|
|
// modifiers.
|
|
|
|
err := netann.SignNodeAnnouncement(
|
2020-04-28 11:06:21 +03:00
|
|
|
s.nodeSigner, s.identityECDH.PubKey(), s.currentNodeAnn,
|
2020-03-18 02:25:02 +03:00
|
|
|
modifiers...,
|
2017-08-05 04:32:25 +03:00
|
|
|
)
|
2018-01-31 07:30:00 +03:00
|
|
|
if err != nil {
|
|
|
|
return lnwire.NodeAnnouncement{}, err
|
|
|
|
}
|
2017-08-05 04:32:25 +03:00
|
|
|
|
2018-01-31 07:30:00 +03:00
|
|
|
return *s.currentNodeAnn, nil
|
2017-08-05 04:32:25 +03:00
|
|
|
}
|
|
|
|
|
2017-08-11 07:20:51 +03:00
|
|
|
type nodeAddresses struct {
|
|
|
|
pubKey *btcec.PublicKey
|
2018-02-03 09:24:43 +03:00
|
|
|
addresses []net.Addr
|
2017-08-11 07:20:51 +03:00
|
|
|
}
|
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
// establishPersistentConnections attempts to establish persistent connections
|
2018-09-07 01:41:23 +03:00
|
|
|
// to all our direct channel collaborators. In order to promote liveness of our
|
|
|
|
// active channels, we instruct the connection manager to attempt to establish
|
|
|
|
// and maintain persistent connections to all our direct channel counterparties.
|
2017-04-24 05:21:32 +03:00
|
|
|
func (s *server) establishPersistentConnections() error {
|
2018-09-07 01:41:23 +03:00
|
|
|
// nodeAddrsMap stores the combination of node public keys and addresses
|
|
|
|
// that we'll attempt to reconnect to. PubKey strings are used as keys
|
|
|
|
// since other PubKey forms can't be compared.
|
2017-03-25 11:40:33 +03:00
|
|
|
nodeAddrsMap := map[string]*nodeAddresses{}
|
|
|
|
|
|
|
|
// Iterate through the list of LinkNodes to find addresses we should
|
|
|
|
// attempt to connect to based on our set of previous connections. Set
|
|
|
|
// the reconnection port to the default peer port.
|
2016-12-15 05:11:31 +03:00
|
|
|
linkNodes, err := s.chanDB.FetchAllLinkNodes()
|
|
|
|
if err != nil && err != channeldb.ErrLinkNodesNotFound {
|
2017-04-24 05:21:32 +03:00
|
|
|
return err
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
for _, node := range linkNodes {
|
|
|
|
pubStr := string(node.IdentityPub.SerializeCompressed())
|
2017-03-25 11:40:33 +03:00
|
|
|
nodeAddrs := &nodeAddresses{
|
|
|
|
pubKey: node.IdentityPub,
|
|
|
|
addresses: node.Addresses,
|
|
|
|
}
|
|
|
|
nodeAddrsMap[pubStr] = nodeAddrs
|
|
|
|
}
|
2017-02-25 02:46:02 +03:00
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
// After checking our previous connections for addresses to connect to,
|
|
|
|
// iterate through the nodes in our channel graph to find addresses
|
|
|
|
// that have been added via NodeAnnouncement messages.
|
2017-04-24 05:21:32 +03:00
|
|
|
chanGraph := s.chanDB.ChannelGraph()
|
2017-03-25 11:40:33 +03:00
|
|
|
sourceNode, err := chanGraph.SourceNode()
|
|
|
|
if err != nil {
|
2017-04-24 05:21:32 +03:00
|
|
|
return err
|
2017-03-25 11:40:33 +03:00
|
|
|
}
|
2019-02-04 02:01:31 +03:00
|
|
|
|
2017-05-05 01:24:45 +03:00
|
|
|
// TODO(roasbeef): instead iterate over link nodes and query graph for
|
|
|
|
// each of the nodes.
|
2020-04-28 11:06:21 +03:00
|
|
|
selfPub := s.identityECDH.PubKey().SerializeCompressed()
|
2017-08-09 02:51:41 +03:00
|
|
|
err = sourceNode.ForEachChannel(nil, func(
|
2020-01-10 05:47:38 +03:00
|
|
|
tx kvdb.ReadTx,
|
2019-02-04 02:01:31 +03:00
|
|
|
chanInfo *channeldb.ChannelEdgeInfo,
|
2017-08-22 09:54:10 +03:00
|
|
|
policy, _ *channeldb.ChannelEdgePolicy) error {
|
2017-04-14 23:17:51 +03:00
|
|
|
|
2019-02-04 02:01:31 +03:00
|
|
|
// If the remote party has announced the channel to us, but we
|
|
|
|
// haven't yet, then we won't have a policy. However, we don't
|
|
|
|
// need this to connect to the peer, so we'll log it and move on.
|
|
|
|
if policy == nil {
|
|
|
|
srvrLog.Warnf("No channel policy found for "+
|
|
|
|
"ChannelPoint(%v): ", chanInfo.ChannelPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll now fetch the peer opposite from us within this
|
|
|
|
// channel so we can queue up a direct connection to them.
|
|
|
|
channelPeer, err := chanInfo.FetchOtherNode(tx, selfPub)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to fetch channel peer for "+
|
|
|
|
"ChannelPoint(%v): %v", chanInfo.ChannelPoint,
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
|
|
|
pubStr := string(channelPeer.PubKeyBytes[:])
|
2017-03-25 11:40:33 +03:00
|
|
|
|
2019-02-04 02:01:31 +03:00
|
|
|
// Add all unique addresses from channel
|
|
|
|
// graph/NodeAnnouncements to the list of addresses we'll
|
|
|
|
// connect to for this peer.
|
2018-09-07 01:41:23 +03:00
|
|
|
addrSet := make(map[string]net.Addr)
|
2019-02-04 02:01:31 +03:00
|
|
|
for _, addr := range channelPeer.Addresses {
|
2018-09-04 03:16:56 +03:00
|
|
|
switch addr.(type) {
|
2018-09-07 01:41:23 +03:00
|
|
|
case *net.TCPAddr:
|
2018-09-04 03:16:56 +03:00
|
|
|
addrSet[addr.String()] = addr
|
2018-09-07 01:41:23 +03:00
|
|
|
|
|
|
|
// We'll only attempt to connect to Tor addresses if Tor
|
|
|
|
// outbound support is enabled.
|
|
|
|
case *tor.OnionAddr:
|
2020-05-14 15:18:11 +03:00
|
|
|
if s.cfg.Tor.Active {
|
2018-09-07 01:41:23 +03:00
|
|
|
addrSet[addr.String()] = addr
|
|
|
|
}
|
2018-09-04 03:16:56 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this peer is also recorded as a link node, we'll add any
|
|
|
|
// additional addresses that have not already been selected.
|
2017-03-25 11:40:33 +03:00
|
|
|
linkNodeAddrs, ok := nodeAddrsMap[pubStr]
|
|
|
|
if ok {
|
|
|
|
for _, lnAddress := range linkNodeAddrs.addresses {
|
2018-09-04 03:16:56 +03:00
|
|
|
switch lnAddress.(type) {
|
2018-09-07 01:41:23 +03:00
|
|
|
case *net.TCPAddr:
|
2018-09-04 03:16:56 +03:00
|
|
|
addrSet[lnAddress.String()] = lnAddress
|
2018-09-07 01:41:23 +03:00
|
|
|
|
|
|
|
// We'll only attempt to connect to Tor
|
|
|
|
// addresses if Tor outbound support is enabled.
|
|
|
|
case *tor.OnionAddr:
|
2020-05-14 15:18:11 +03:00
|
|
|
if s.cfg.Tor.Active {
|
2018-09-07 01:41:23 +03:00
|
|
|
addrSet[lnAddress.String()] = lnAddress
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-04 03:16:56 +03:00
|
|
|
// Construct a slice of the deduped addresses.
|
|
|
|
var addrs []net.Addr
|
|
|
|
for _, addr := range addrSet {
|
|
|
|
addrs = append(addrs, addr)
|
|
|
|
}
|
|
|
|
|
2018-01-31 07:30:00 +03:00
|
|
|
n := &nodeAddresses{
|
2017-03-25 11:40:33 +03:00
|
|
|
addresses: addrs,
|
|
|
|
}
|
2019-02-04 02:01:31 +03:00
|
|
|
n.pubKey, err = channelPeer.PubKey()
|
2018-01-31 07:30:00 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
|
2018-01-31 07:30:00 +03:00
|
|
|
nodeAddrsMap[pubStr] = n
|
2017-03-25 11:40:33 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
|
2017-04-24 05:21:32 +03:00
|
|
|
return err
|
2017-03-25 11:40:33 +03:00
|
|
|
}
|
|
|
|
|
2018-01-23 03:04:40 +03:00
|
|
|
// Acquire and hold server lock until all persistent connection requests
|
|
|
|
// have been recorded and sent to the connection manager.
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
// Iterate through the combined list of addresses from prior links and
|
|
|
|
// node announcements and attempt to reconnect to each node.
|
2019-04-04 12:25:31 +03:00
|
|
|
var numOutboundConns int
|
2017-03-25 11:40:33 +03:00
|
|
|
for pubStr, nodeAddr := range nodeAddrsMap {
|
2017-04-24 05:21:32 +03:00
|
|
|
// Add this peer to the set of peers we should maintain a
|
2019-02-15 06:17:52 +03:00
|
|
|
// persistent connection with. We set the value to false to
|
|
|
|
// indicate that we should not continue to reconnect if the
|
|
|
|
// number of channels returns to zero, since this peer has not
|
|
|
|
// been requested as perm by the user.
|
|
|
|
s.persistentPeers[pubStr] = false
|
2018-02-01 11:48:38 +03:00
|
|
|
if _, ok := s.persistentPeersBackoff[pubStr]; !ok {
|
2020-05-14 15:18:11 +03:00
|
|
|
s.persistentPeersBackoff[pubStr] = s.cfg.MinBackoff
|
2018-02-01 11:48:38 +03:00
|
|
|
}
|
2017-04-24 05:21:32 +03:00
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
for _, address := range nodeAddr.addresses {
|
|
|
|
// Create a wrapper address which couples the IP and
|
|
|
|
// the pubkey so the brontide authenticated connection
|
|
|
|
// can be established.
|
2017-02-25 02:46:02 +03:00
|
|
|
lnAddr := &lnwire.NetAddress{
|
2017-03-25 11:40:33 +03:00
|
|
|
IdentityKey: nodeAddr.pubKey,
|
2017-02-25 02:46:02 +03:00
|
|
|
Address: address,
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
srvrLog.Debugf("Attempting persistent connection to "+
|
|
|
|
"channel peer %v", lnAddr)
|
2017-04-01 15:33:17 +03:00
|
|
|
|
|
|
|
// Send the persistent connection request to the
|
|
|
|
// connection manager, saving the request itself so we
|
|
|
|
// can cancel/restart the process as needed.
|
2017-02-25 02:46:02 +03:00
|
|
|
connReq := &connmgr.ConnReq{
|
|
|
|
Addr: lnAddr,
|
|
|
|
Permanent: true,
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
s.persistentConnReqs[pubStr] = append(
|
|
|
|
s.persistentConnReqs[pubStr], connReq)
|
2017-04-24 05:21:32 +03:00
|
|
|
|
2019-04-04 12:25:31 +03:00
|
|
|
// We'll connect to the first 10 peers immediately, then
|
|
|
|
// randomly stagger any remaining connections if the
|
|
|
|
// stagger initial reconnect flag is set. This ensures
|
|
|
|
// that mobile nodes or nodes with a small number of
|
|
|
|
// channels obtain connectivity quickly, but larger
|
|
|
|
// nodes are able to disperse the costs of connecting to
|
|
|
|
// all peers at once.
|
|
|
|
if numOutboundConns < numInstantInitReconnect ||
|
2020-05-14 15:18:11 +03:00
|
|
|
!s.cfg.StaggerInitialReconnect {
|
2019-04-04 12:25:31 +03:00
|
|
|
|
|
|
|
go s.connMgr.Connect(connReq)
|
|
|
|
} else {
|
|
|
|
go s.delayInitialReconnect(connReq)
|
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
2019-04-04 12:25:31 +03:00
|
|
|
|
|
|
|
numOutboundConns++
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-04 12:25:31 +03:00
|
|
|
// delayInitialReconnect will attempt a reconnection using the passed connreq
|
|
|
|
// after sampling a value for the delay between 0s and the
|
|
|
|
// maxInitReconnectDelay.
|
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
|
|
|
func (s *server) delayInitialReconnect(connReq *connmgr.ConnReq) {
|
|
|
|
delay := time.Duration(prand.Intn(maxInitReconnectDelay)) * time.Second
|
|
|
|
select {
|
|
|
|
case <-time.After(delay):
|
|
|
|
s.connMgr.Connect(connReq)
|
|
|
|
case <-s.quit:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-15 22:33:24 +03:00
|
|
|
// prunePersistentPeerConnection removes all internal state related to
|
|
|
|
// persistent connections to a peer within the server. This is used to avoid
|
|
|
|
// persistent connection retries to peers we do not have any open channels with.
|
|
|
|
func (s *server) prunePersistentPeerConnection(compressedPubKey [33]byte) {
|
|
|
|
pubKeyStr := string(compressedPubKey[:])
|
|
|
|
|
|
|
|
s.mu.Lock()
|
2019-02-15 06:17:52 +03:00
|
|
|
if perm, ok := s.persistentPeers[pubKeyStr]; ok && !perm {
|
|
|
|
delete(s.persistentPeers, pubKeyStr)
|
|
|
|
delete(s.persistentPeersBackoff, pubKeyStr)
|
|
|
|
s.cancelConnReqs(pubKeyStr, nil)
|
|
|
|
s.mu.Unlock()
|
|
|
|
|
|
|
|
srvrLog.Infof("Pruned peer %x from persistent connections, "+
|
|
|
|
"peer has no open channels", compressedPubKey)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
2018-06-15 22:33:24 +03:00
|
|
|
s.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// BroadcastMessage sends a request to the server to broadcast a set of
|
2018-06-06 13:18:44 +03:00
|
|
|
// messages to all peers other than the one specified by the `skips` parameter.
|
2019-03-06 04:09:14 +03:00
|
|
|
// All messages sent via BroadcastMessage will be queued for lazy delivery to
|
|
|
|
// the target peers.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2019-04-05 18:36:11 +03:00
|
|
|
func (s *server) BroadcastMessage(skips map[route.Vertex]struct{},
|
2017-10-17 00:53:38 +03:00
|
|
|
msgs ...lnwire.Message) error {
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
srvrLog.Debugf("Broadcasting %v messages", len(msgs))
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2018-06-06 13:18:44 +03:00
|
|
|
// Filter out peers found in the skips map. We synchronize access to
|
|
|
|
// peersByPub throughout this process to ensure we deliver messages to
|
|
|
|
// exact set of peers present at the time of invocation.
|
|
|
|
s.mu.RLock()
|
|
|
|
peers := make([]*peer, 0, len(s.peersByPub))
|
2017-12-26 18:25:35 +03:00
|
|
|
for _, sPeer := range s.peersByPub {
|
|
|
|
if skips != nil {
|
|
|
|
if _, ok := skips[sPeer.pubKeyBytes]; ok {
|
|
|
|
srvrLog.Tracef("Skipping %x in broadcast",
|
|
|
|
sPeer.pubKeyBytes[:])
|
|
|
|
continue
|
|
|
|
}
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
|
|
|
|
2018-06-06 13:18:44 +03:00
|
|
|
peers = append(peers, sPeer)
|
|
|
|
}
|
|
|
|
s.mu.RUnlock()
|
|
|
|
|
|
|
|
// Iterate over all known peers, dispatching a go routine to enqueue
|
|
|
|
// all messages to each of peers.
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for _, sPeer := range peers {
|
2017-10-17 00:53:38 +03:00
|
|
|
// Dispatch a go routine to enqueue all messages to this peer.
|
|
|
|
wg.Add(1)
|
|
|
|
s.wg.Add(1)
|
2019-03-06 04:09:14 +03:00
|
|
|
go func(p lnpeer.Peer) {
|
|
|
|
defer s.wg.Done()
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
p.SendMessageLazy(false, msgs...)
|
|
|
|
}(sPeer)
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
2017-08-11 07:20:51 +03:00
|
|
|
|
2017-10-17 00:53:38 +03:00
|
|
|
// Wait for all messages to have been dispatched before returning to
|
|
|
|
// caller.
|
|
|
|
wg.Wait()
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
return nil
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
|
|
|
|
2017-09-13 15:38:06 +03:00
|
|
|
// NotifyWhenOnline can be called by other subsystems to get notified when a
|
2018-07-06 04:03:04 +03:00
|
|
|
// particular peer comes online. The peer itself is sent across the peerChan.
|
2017-09-13 15:38:06 +03:00
|
|
|
//
|
|
|
|
// NOTE: This function is safe for concurrent access.
|
2019-05-31 00:37:30 +03:00
|
|
|
func (s *server) NotifyWhenOnline(peerKey [33]byte,
|
2018-07-06 04:03:04 +03:00
|
|
|
peerChan chan<- lnpeer.Peer) {
|
|
|
|
|
2017-09-13 15:38:06 +03:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
// Compute the target peer's identifier.
|
2019-05-31 00:37:30 +03:00
|
|
|
pubStr := string(peerKey[:])
|
2017-09-13 15:38:06 +03:00
|
|
|
|
|
|
|
// Check if peer is connected.
|
2018-07-06 04:03:04 +03:00
|
|
|
peer, ok := s.peersByPub[pubStr]
|
2017-09-13 15:38:06 +03:00
|
|
|
if ok {
|
|
|
|
// Connected, can return early.
|
2019-05-31 00:37:30 +03:00
|
|
|
srvrLog.Debugf("Notifying that peer %x is online", peerKey)
|
2018-07-06 04:03:04 +03:00
|
|
|
|
|
|
|
select {
|
|
|
|
case peerChan <- peer:
|
|
|
|
case <-s.quit:
|
|
|
|
}
|
|
|
|
|
2017-09-13 15:38:06 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Not connected, store this listener such that it can be notified when
|
|
|
|
// the peer comes online.
|
|
|
|
s.peerConnectedListeners[pubStr] = append(
|
2018-07-06 04:03:04 +03:00
|
|
|
s.peerConnectedListeners[pubStr], peerChan,
|
|
|
|
)
|
2017-09-13 15:38:06 +03:00
|
|
|
}
|
|
|
|
|
2019-01-10 07:16:27 +03:00
|
|
|
// NotifyWhenOffline delivers a notification to the caller of when the peer with
|
|
|
|
// the given public key has been disconnected. The notification is signaled by
|
|
|
|
// closing the channel returned.
|
|
|
|
func (s *server) NotifyWhenOffline(peerPubKey [33]byte) <-chan struct{} {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
c := make(chan struct{})
|
|
|
|
|
|
|
|
// If the peer is already offline, we can immediately trigger the
|
|
|
|
// notification.
|
|
|
|
peerPubKeyStr := string(peerPubKey[:])
|
|
|
|
if _, ok := s.peersByPub[peerPubKeyStr]; !ok {
|
|
|
|
srvrLog.Debugf("Notifying that peer %x is offline", peerPubKey)
|
|
|
|
close(c)
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, the peer is online, so we'll keep track of the channel to
|
|
|
|
// trigger the notification once the server detects the peer
|
|
|
|
// disconnects.
|
|
|
|
s.peerDisconnectedListeners[peerPubKeyStr] = append(
|
|
|
|
s.peerDisconnectedListeners[peerPubKeyStr], c,
|
|
|
|
)
|
|
|
|
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// FindPeer will return the peer that corresponds to the passed in public key.
|
2017-01-13 06:40:38 +03:00
|
|
|
// This function is used by the funding manager, allowing it to update the
|
|
|
|
// daemon's local representation of the remote peer.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
|
|
|
func (s *server) FindPeer(peerKey *btcec.PublicKey) (*peer, error) {
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2017-08-09 02:51:41 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
pubStr := string(peerKey.SerializeCompressed())
|
2017-01-13 06:40:38 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
return s.findPeerByPubStr(pubStr)
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// FindPeerByPubStr will return the peer that corresponds to the passed peerID,
|
|
|
|
// which should be a string representation of the peer's serialized, compressed
|
|
|
|
// public key.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2017-08-31 11:15:39 +03:00
|
|
|
func (s *server) FindPeerByPubStr(pubStr string) (*peer, error) {
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2017-01-13 06:40:38 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
return s.findPeerByPubStr(pubStr)
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// findPeerByPubStr is an internal method that retrieves the specified peer from
|
|
|
|
// the server's internal state using.
|
|
|
|
func (s *server) findPeerByPubStr(pubStr string) (*peer, error) {
|
|
|
|
peer, ok := s.peersByPub[pubStr]
|
|
|
|
if !ok {
|
2018-04-15 23:19:15 +03:00
|
|
|
return nil, ErrPeerNotConnected
|
2017-01-13 06:40:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return peer, nil
|
|
|
|
}
|
|
|
|
|
2018-03-31 02:19:52 +03:00
|
|
|
// nextPeerBackoff computes the next backoff duration for a peer's pubkey using
|
|
|
|
// exponential backoff. If no previous backoff was known, the default is
|
|
|
|
// returned.
|
2018-08-02 10:23:03 +03:00
|
|
|
func (s *server) nextPeerBackoff(pubStr string,
|
|
|
|
startTime time.Time) time.Duration {
|
|
|
|
|
2018-03-31 02:19:52 +03:00
|
|
|
// Now, determine the appropriate backoff to use for the retry.
|
|
|
|
backoff, ok := s.persistentPeersBackoff[pubStr]
|
|
|
|
if !ok {
|
|
|
|
// If an existing backoff was unknown, use the default.
|
2020-05-14 15:18:11 +03:00
|
|
|
return s.cfg.MinBackoff
|
2018-03-31 02:19:52 +03:00
|
|
|
}
|
|
|
|
|
2018-08-02 10:23:03 +03:00
|
|
|
// If the peer failed to start properly, we'll just use the previous
|
|
|
|
// backoff to compute the subsequent randomized exponential backoff
|
|
|
|
// duration. This will roughly double on average.
|
|
|
|
if startTime.IsZero() {
|
2020-05-14 15:18:11 +03:00
|
|
|
return computeNextBackoff(backoff, s.cfg.MaxBackoff)
|
2018-08-02 10:23:03 +03:00
|
|
|
}
|
|
|
|
|
2018-09-02 00:09:16 +03:00
|
|
|
// The peer succeeded in starting. If the connection didn't last long
|
|
|
|
// enough to be considered stable, we'll continue to back off retries
|
|
|
|
// with this peer.
|
2020-04-14 18:57:44 +03:00
|
|
|
connDuration := time.Since(startTime)
|
2018-09-02 00:09:16 +03:00
|
|
|
if connDuration < defaultStableConnDuration {
|
2020-05-14 15:18:11 +03:00
|
|
|
return computeNextBackoff(backoff, s.cfg.MaxBackoff)
|
2018-09-02 00:09:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// The peer succeed in starting and this was stable peer, so we'll
|
|
|
|
// reduce the timeout duration by the length of the connection after
|
|
|
|
// applying randomized exponential backoff. We'll only apply this in the
|
|
|
|
// case that:
|
2019-01-04 05:38:36 +03:00
|
|
|
// reb(curBackoff) - connDuration > cfg.MinBackoff
|
2020-05-14 15:18:11 +03:00
|
|
|
relaxedBackoff := computeNextBackoff(backoff, s.cfg.MaxBackoff) - connDuration
|
|
|
|
if relaxedBackoff > s.cfg.MinBackoff {
|
2018-09-02 00:09:16 +03:00
|
|
|
return relaxedBackoff
|
2018-08-02 10:23:03 +03:00
|
|
|
}
|
|
|
|
|
2019-01-04 05:38:36 +03:00
|
|
|
// Lastly, if reb(currBackoff) - connDuration <= cfg.MinBackoff, meaning
|
2018-09-02 00:09:16 +03:00
|
|
|
// the stable connection lasted much longer than our previous backoff.
|
|
|
|
// To reward such good behavior, we'll reconnect after the default
|
|
|
|
// timeout.
|
2020-05-14 15:18:11 +03:00
|
|
|
return s.cfg.MinBackoff
|
2018-03-31 02:19:52 +03:00
|
|
|
}
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// shouldDropConnection determines if our local connection to a remote peer
|
|
|
|
// should be dropped in the case of concurrent connection establishment. In
|
|
|
|
// order to deterministically decide which connection should be dropped, we'll
|
|
|
|
// utilize the ordering of the local and remote public key. If we didn't use
|
|
|
|
// such a tie breaker, then we risk _both_ connections erroneously being
|
|
|
|
// dropped.
|
|
|
|
func shouldDropLocalConnection(local, remote *btcec.PublicKey) bool {
|
|
|
|
localPubBytes := local.SerializeCompressed()
|
|
|
|
remotePubPbytes := remote.SerializeCompressed()
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// The connection that comes from the node with a "smaller" pubkey
|
|
|
|
// should be kept. Therefore, if our pubkey is "greater" than theirs, we
|
|
|
|
// should drop our established connection.
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
return bytes.Compare(localPubBytes, remotePubPbytes) > 0
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// InboundPeerConnected initializes a new peer in response to a new inbound
|
2016-12-15 05:11:31 +03:00
|
|
|
// connection.
|
2017-08-09 02:51:41 +03:00
|
|
|
//
|
|
|
|
// NOTE: This function is safe for concurrent access.
|
|
|
|
func (s *server) InboundPeerConnected(conn net.Conn) {
|
|
|
|
// Exit early if we have already been instructed to shutdown, this
|
|
|
|
// prevents any delayed callbacks from accidentally registering peers.
|
|
|
|
if s.Stopped() {
|
|
|
|
return
|
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-05-11 03:42:53 +03:00
|
|
|
nodePub := conn.(*brontide.Conn).RemotePub()
|
|
|
|
pubStr := string(nodePub.SerializeCompressed())
|
2017-08-09 02:51:41 +03:00
|
|
|
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2018-06-09 06:34:31 +03:00
|
|
|
// If we already have an outbound connection to this peer, then ignore
|
2017-08-09 02:51:41 +03:00
|
|
|
// this new connection.
|
2018-06-09 06:34:31 +03:00
|
|
|
if _, ok := s.outboundPeers[pubStr]; ok {
|
2018-06-30 03:22:03 +03:00
|
|
|
srvrLog.Debugf("Already have outbound connection for %x, "+
|
|
|
|
"ignoring inbound connection",
|
|
|
|
nodePub.SerializeCompressed())
|
2018-06-09 06:34:31 +03:00
|
|
|
|
2017-05-11 03:42:53 +03:00
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-05-08 06:18:15 +03:00
|
|
|
// If we already have a valid connection that is scheduled to take
|
|
|
|
// precedence once the prior peer has finished disconnecting, we'll
|
|
|
|
// ignore this connection.
|
|
|
|
if _, ok := s.scheduledPeerConnection[pubStr]; ok {
|
|
|
|
srvrLog.Debugf("Ignoring connection, peer already scheduled")
|
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-14 00:41:54 +03:00
|
|
|
srvrLog.Infof("New inbound connection from %v", conn.RemoteAddr())
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-10-06 02:14:07 +03:00
|
|
|
// Check to see if we already have a connection with this peer. If so,
|
|
|
|
// we may need to drop our existing connection. This prevents us from
|
|
|
|
// having duplicate connections to the same peer. We forgo adding a
|
|
|
|
// default case as we expect these to be the only error values returned
|
|
|
|
// from findPeerByPubStr.
|
2017-08-31 11:15:39 +03:00
|
|
|
connectedPeer, err := s.findPeerByPubStr(pubStr)
|
|
|
|
switch err {
|
2018-04-15 23:19:15 +03:00
|
|
|
case ErrPeerNotConnected:
|
2017-08-31 11:15:39 +03:00
|
|
|
// We were unable to locate an existing connection with the
|
|
|
|
// target peer, proceed to connect.
|
2018-06-09 07:16:21 +03:00
|
|
|
s.cancelConnReqs(pubStr, nil)
|
2018-04-03 08:16:04 +03:00
|
|
|
s.peerConnected(conn, nil, true)
|
2017-08-31 11:15:39 +03:00
|
|
|
|
|
|
|
case nil:
|
2017-10-06 02:14:07 +03:00
|
|
|
// We already have a connection with the incoming peer. If the
|
2019-03-27 02:41:30 +03:00
|
|
|
// connection we've already established should be kept and is
|
|
|
|
// not of the same type of the new connection (inbound), then
|
|
|
|
// we'll close out the new connection s.t there's only a single
|
|
|
|
// connection between us.
|
2020-04-28 11:06:21 +03:00
|
|
|
localPub := s.identityECDH.PubKey()
|
2019-03-27 02:41:30 +03:00
|
|
|
if !connectedPeer.inbound &&
|
|
|
|
!shouldDropLocalConnection(localPub, nodePub) {
|
|
|
|
|
|
|
|
srvrLog.Warnf("Received inbound connection from "+
|
|
|
|
"peer %v, but already have outbound "+
|
|
|
|
"connection, dropping conn", connectedPeer)
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, if we should drop the connection, then we'll
|
2017-10-06 02:14:07 +03:00
|
|
|
// disconnect our already connected peer.
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
srvrLog.Debugf("Disconnecting stale connection to %v",
|
|
|
|
connectedPeer)
|
2017-08-09 02:51:41 +03:00
|
|
|
|
2018-06-09 07:16:21 +03:00
|
|
|
s.cancelConnReqs(pubStr, nil)
|
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// Remove the current peer from the server's internal state and
|
|
|
|
// signal that the peer termination watcher does not need to
|
|
|
|
// execute for this peer.
|
2017-08-09 02:51:41 +03:00
|
|
|
s.removePeer(connectedPeer)
|
2017-08-31 11:15:39 +03:00
|
|
|
s.ignorePeerTermination[connectedPeer] = struct{}{}
|
2018-05-08 06:18:15 +03:00
|
|
|
s.scheduledPeerConnection[pubStr] = func() {
|
2018-04-03 08:16:04 +03:00
|
|
|
s.peerConnected(conn, nil, true)
|
2018-05-08 06:18:15 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// OutboundPeerConnected initializes a new peer in response to a new outbound
|
2016-12-15 05:11:31 +03:00
|
|
|
// connection.
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
|
|
|
func (s *server) OutboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn) {
|
|
|
|
// Exit early if we have already been instructed to shutdown, this
|
|
|
|
// prevents any delayed callbacks from accidentally registering peers.
|
|
|
|
if s.Stopped() {
|
|
|
|
return
|
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
|
|
|
|
nodePub := conn.(*brontide.Conn).RemotePub()
|
|
|
|
pubStr := string(nodePub.SerializeCompressed())
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2018-06-09 06:34:31 +03:00
|
|
|
// If we already have an inbound connection to this peer, then ignore
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// this new connection.
|
2018-06-09 06:34:31 +03:00
|
|
|
if _, ok := s.inboundPeers[pubStr]; ok {
|
2018-06-30 03:22:03 +03:00
|
|
|
srvrLog.Debugf("Already have inbound connection for %x, "+
|
2018-06-09 06:34:31 +03:00
|
|
|
"ignoring outbound connection",
|
|
|
|
nodePub.SerializeCompressed())
|
|
|
|
|
2018-03-31 02:19:52 +03:00
|
|
|
if connReq != nil {
|
|
|
|
s.connMgr.Remove(connReq.ID())
|
|
|
|
}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if _, ok := s.persistentConnReqs[pubStr]; !ok && connReq != nil {
|
2019-10-03 18:22:43 +03:00
|
|
|
srvrLog.Debugf("Ignoring canceled outbound connection")
|
2018-03-31 02:19:52 +03:00
|
|
|
s.connMgr.Remove(connReq.ID())
|
2017-01-24 07:32:49 +03:00
|
|
|
conn.Close()
|
2016-12-15 05:11:31 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-05-08 06:18:15 +03:00
|
|
|
// If we already have a valid connection that is scheduled to take
|
|
|
|
// precedence once the prior peer has finished disconnecting, we'll
|
|
|
|
// ignore this connection.
|
|
|
|
if _, ok := s.scheduledPeerConnection[pubStr]; ok {
|
|
|
|
srvrLog.Debugf("Ignoring connection, peer already scheduled")
|
2018-06-09 06:36:41 +03:00
|
|
|
|
|
|
|
if connReq != nil {
|
|
|
|
s.connMgr.Remove(connReq.ID())
|
|
|
|
}
|
|
|
|
|
2018-05-08 06:18:15 +03:00
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-03-27 02:41:13 +03:00
|
|
|
srvrLog.Infof("Established connection to: %x@%v", pubStr,
|
|
|
|
conn.RemoteAddr())
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
2018-03-31 02:19:52 +03:00
|
|
|
if connReq != nil {
|
|
|
|
// A successful connection was returned by the connmgr.
|
|
|
|
// Immediately cancel all pending requests, excluding the
|
|
|
|
// outbound connection we just established.
|
|
|
|
ignore := connReq.ID()
|
|
|
|
s.cancelConnReqs(pubStr, &ignore)
|
|
|
|
} else {
|
|
|
|
// This was a successful connection made by some other
|
|
|
|
// subsystem. Remove all requests being managed by the connmgr.
|
|
|
|
s.cancelConnReqs(pubStr, nil)
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
}
|
|
|
|
|
2017-10-06 02:14:07 +03:00
|
|
|
// If we already have a connection with this peer, decide whether or not
|
|
|
|
// we need to drop the stale connection. We forgo adding a default case
|
|
|
|
// as we expect these to be the only error values returned from
|
|
|
|
// findPeerByPubStr.
|
2017-08-31 11:15:39 +03:00
|
|
|
connectedPeer, err := s.findPeerByPubStr(pubStr)
|
|
|
|
switch err {
|
2018-04-15 23:19:15 +03:00
|
|
|
case ErrPeerNotConnected:
|
2017-08-31 11:15:39 +03:00
|
|
|
// We were unable to locate an existing connection with the
|
|
|
|
// target peer, proceed to connect.
|
2018-04-03 08:16:04 +03:00
|
|
|
s.peerConnected(conn, connReq, false)
|
2017-08-31 11:15:39 +03:00
|
|
|
|
|
|
|
case nil:
|
2018-10-16 17:39:38 +03:00
|
|
|
// We already have a connection with the incoming peer. If the
|
2019-03-27 02:41:30 +03:00
|
|
|
// connection we've already established should be kept and is
|
|
|
|
// not of the same type of the new connection (outbound), then
|
|
|
|
// we'll close out the new connection s.t there's only a single
|
|
|
|
// connection between us.
|
2020-04-28 11:06:21 +03:00
|
|
|
localPub := s.identityECDH.PubKey()
|
2019-03-27 02:41:30 +03:00
|
|
|
if connectedPeer.inbound &&
|
|
|
|
shouldDropLocalConnection(localPub, nodePub) {
|
|
|
|
|
|
|
|
srvrLog.Warnf("Established outbound connection to "+
|
|
|
|
"peer %v, but already have inbound "+
|
|
|
|
"connection, dropping conn", connectedPeer)
|
2017-08-16 03:49:10 +03:00
|
|
|
if connReq != nil {
|
|
|
|
s.connMgr.Remove(connReq.ID())
|
|
|
|
}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, _their_ connection should be dropped. So we'll
|
|
|
|
// disconnect the peer and send the now obsolete peer to the
|
|
|
|
// server for garbage collection.
|
|
|
|
srvrLog.Debugf("Disconnecting stale connection to %v",
|
|
|
|
connectedPeer)
|
2017-08-09 02:51:41 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// Remove the current peer from the server's internal state and
|
|
|
|
// signal that the peer termination watcher does not need to
|
|
|
|
// execute for this peer.
|
2017-08-09 02:51:41 +03:00
|
|
|
s.removePeer(connectedPeer)
|
2017-08-31 11:15:39 +03:00
|
|
|
s.ignorePeerTermination[connectedPeer] = struct{}{}
|
2018-05-08 06:18:15 +03:00
|
|
|
s.scheduledPeerConnection[pubStr] = func() {
|
2018-04-03 08:16:04 +03:00
|
|
|
s.peerConnected(conn, connReq, false)
|
2018-05-08 06:18:15 +03:00
|
|
|
}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
|
2018-03-31 02:19:52 +03:00
|
|
|
// UnassignedConnID is the default connection ID that a request can have before
|
|
|
|
// it actually is submitted to the connmgr.
|
|
|
|
// TODO(conner): move into connmgr package, or better, add connmgr method for
|
|
|
|
// generating atomic IDs
|
|
|
|
const UnassignedConnID uint64 = 0
|
|
|
|
|
|
|
|
// cancelConnReqs stops all persistent connection requests for a given pubkey.
|
|
|
|
// Any attempts initiated by the peerTerminationWatcher are canceled first.
|
|
|
|
// Afterwards, each connection request removed from the connmgr. The caller can
|
|
|
|
// optionally specify a connection ID to ignore, which prevents us from
|
|
|
|
// canceling a successful request. All persistent connreqs for the provided
|
|
|
|
// pubkey are discarded after the operationjw.
|
|
|
|
func (s *server) cancelConnReqs(pubStr string, skip *uint64) {
|
|
|
|
// First, cancel any lingering persistent retry attempts, which will
|
|
|
|
// prevent retries for any with backoffs that are still maturing.
|
|
|
|
if cancelChan, ok := s.persistentRetryCancels[pubStr]; ok {
|
|
|
|
close(cancelChan)
|
|
|
|
delete(s.persistentRetryCancels, pubStr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, check to see if we have any outstanding persistent connection
|
|
|
|
// requests to this peer. If so, then we'll remove all of these
|
|
|
|
// connection requests, and also delete the entry from the map.
|
|
|
|
connReqs, ok := s.persistentConnReqs[pubStr]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, connReq := range connReqs {
|
|
|
|
// Atomically capture the current request identifier.
|
|
|
|
connID := connReq.ID()
|
|
|
|
|
|
|
|
// Skip any zero IDs, this indicates the request has not
|
|
|
|
// yet been schedule.
|
|
|
|
if connID == UnassignedConnID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip a particular connection ID if instructed.
|
|
|
|
if skip != nil && connID == *skip {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
s.connMgr.Remove(connID)
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(s.persistentConnReqs, pubStr)
|
|
|
|
}
|
|
|
|
|
2018-08-02 03:31:26 +03:00
|
|
|
// peerConnected is a function that handles initialization a newly connected
|
|
|
|
// peer by adding it to the server's global list of all active peers, and
|
|
|
|
// starting all the goroutines the peer needs to function properly. The inbound
|
|
|
|
// boolean should be true if the peer initiated the connection to us.
|
|
|
|
func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq,
|
|
|
|
inbound bool) {
|
|
|
|
|
|
|
|
brontideConn := conn.(*brontide.Conn)
|
|
|
|
addr := conn.RemoteAddr()
|
|
|
|
pubKey := brontideConn.RemotePub()
|
|
|
|
|
2019-03-27 02:41:13 +03:00
|
|
|
srvrLog.Infof("Finalizing connection to %x@%s, inbound=%v",
|
|
|
|
pubKey.SerializeCompressed(), addr, inbound)
|
2018-08-02 03:31:26 +03:00
|
|
|
|
|
|
|
peerAddr := &lnwire.NetAddress{
|
|
|
|
IdentityKey: pubKey,
|
|
|
|
Address: addr,
|
|
|
|
ChainNet: activeNetParams.Net,
|
|
|
|
}
|
|
|
|
|
2019-11-08 16:29:29 +03:00
|
|
|
// With the brontide connection established, we'll now craft the feature
|
|
|
|
// vectors to advertise to the remote node.
|
|
|
|
initFeatures := s.featureMgr.Get(feature.SetInit)
|
|
|
|
legacyFeatures := s.featureMgr.Get(feature.SetLegacyGlobal)
|
2018-08-02 03:31:26 +03:00
|
|
|
|
2020-03-17 09:22:35 +03:00
|
|
|
// Lookup past error caches for the peer in the server. If no buffer is
|
|
|
|
// found, create a fresh buffer.
|
|
|
|
pkStr := string(peerAddr.IdentityKey.SerializeCompressed())
|
|
|
|
errBuffer, ok := s.peerErrors[pkStr]
|
|
|
|
if !ok {
|
|
|
|
var err error
|
|
|
|
errBuffer, err = queue.NewCircularBuffer(errorBufferSize)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to create peer %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 14:05:43 +03:00
|
|
|
// Now that we've established a connection, create a peer, and it to the
|
2019-04-03 13:18:19 +03:00
|
|
|
// set of currently active peers. Configure the peer with the incoming
|
|
|
|
// and outgoing broadcast deltas to prevent htlcs from being accepted or
|
|
|
|
// offered that would trigger channel closure. In case of outgoing
|
|
|
|
// htlcs, an extra block is added to prevent the channel from being
|
|
|
|
// closed when the htlc is outstanding and a new block comes in.
|
2019-02-15 04:13:44 +03:00
|
|
|
p, err := newPeer(
|
2020-05-14 15:26:07 +03:00
|
|
|
s.cfg, conn, connReq, s, peerAddr, inbound, initFeatures,
|
2020-05-14 15:18:11 +03:00
|
|
|
legacyFeatures, s.cfg.ChanEnableTimeout,
|
2020-05-13 16:47:45 +03:00
|
|
|
lncfg.DefaultOutgoingCltvRejectDelta, errBuffer,
|
2019-02-15 04:13:44 +03:00
|
|
|
)
|
2018-08-02 03:31:26 +03:00
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to create peer %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(roasbeef): update IP address for link-node
|
|
|
|
// * also mark last-seen, do it one single transaction?
|
|
|
|
|
|
|
|
s.addPeer(p)
|
|
|
|
|
2020-03-17 09:22:35 +03:00
|
|
|
// Once we have successfully added the peer to the server, we can
|
|
|
|
// delete the previous error buffer from the server's map of error
|
|
|
|
// buffers.
|
|
|
|
delete(s.peerErrors, pkStr)
|
|
|
|
|
2018-08-02 03:31:26 +03:00
|
|
|
// Dispatch a goroutine to asynchronously start the peer. This process
|
|
|
|
// includes sending and receiving Init messages, which would be a DOS
|
|
|
|
// vector if we held the server's mutex throughout the procedure.
|
|
|
|
s.wg.Add(1)
|
|
|
|
go s.peerInitializer(p)
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// addPeer adds the passed peer to the server's global state of all active
|
|
|
|
// peers.
|
|
|
|
func (s *server) addPeer(p *peer) {
|
|
|
|
if p == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore new peers if we're shutting down.
|
2017-08-31 11:15:39 +03:00
|
|
|
if s.Stopped() {
|
|
|
|
p.Disconnect(ErrServerShuttingDown)
|
2016-06-21 21:52:09 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
// Track the new peer in our indexes so we can quickly look it up either
|
2018-02-07 06:13:07 +03:00
|
|
|
// according to its public key, or its peer ID.
|
2016-12-27 08:42:23 +03:00
|
|
|
// TODO(roasbeef): pipe all requests through to the
|
|
|
|
// queryHandler/peerManager
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
2019-07-29 17:59:48 +03:00
|
|
|
pubSer := p.addr.IdentityKey.SerializeCompressed()
|
|
|
|
pubStr := string(pubSer)
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
|
|
|
s.peersByPub[pubStr] = p
|
|
|
|
|
|
|
|
if p.inbound {
|
|
|
|
s.inboundPeers[pubStr] = p
|
|
|
|
} else {
|
|
|
|
s.outboundPeers[pubStr] = p
|
|
|
|
}
|
2019-07-29 17:59:48 +03:00
|
|
|
|
|
|
|
// Inform the peer notifier of a peer online event so that it can be reported
|
|
|
|
// to clients listening for peer events.
|
|
|
|
var pubKey [33]byte
|
|
|
|
copy(pubKey[:], pubSer)
|
|
|
|
|
|
|
|
s.peerNotifier.NotifyPeerOnline(pubKey)
|
2018-07-31 06:58:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// peerInitializer asynchronously starts a newly connected peer after it has
|
|
|
|
// been added to the server's peer map. This method sets up a
|
|
|
|
// peerTerminationWatcher for the given peer, and ensures that it executes even
|
|
|
|
// if the peer failed to start. In the event of a successful connection, this
|
|
|
|
// method reads the negotiated, local feature-bits and spawns the appropriate
|
|
|
|
// graph synchronization method. Any registered clients of NotifyWhenOnline will
|
|
|
|
// be signaled of the new peer once the method returns.
|
|
|
|
//
|
|
|
|
// NOTE: This MUST be launched as a goroutine.
|
|
|
|
func (s *server) peerInitializer(p *peer) {
|
|
|
|
defer s.wg.Done()
|
|
|
|
|
|
|
|
// Avoid initializing peers while the server is exiting.
|
|
|
|
if s.Stopped() {
|
|
|
|
return
|
|
|
|
}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
2018-07-31 06:58:16 +03:00
|
|
|
// Create a channel that will be used to signal a successful start of
|
|
|
|
// the link. This prevents the peer termination watcher from beginning
|
|
|
|
// its duty too early.
|
|
|
|
ready := make(chan struct{})
|
|
|
|
|
|
|
|
// Before starting the peer, launch a goroutine to watch for the
|
|
|
|
// unexpected termination of this peer, which will ensure all resources
|
|
|
|
// are properly cleaned up, and re-establish persistent connections when
|
|
|
|
// necessary. The peer termination watcher will be short circuited if
|
|
|
|
// the peer is ever added to the ignorePeerTermination map, indicating
|
|
|
|
// that the server has already handled the removal of this peer.
|
2017-08-31 11:15:39 +03:00
|
|
|
s.wg.Add(1)
|
2018-07-31 06:58:16 +03:00
|
|
|
go s.peerTerminationWatcher(p, ready)
|
|
|
|
|
2018-09-06 11:48:46 +03:00
|
|
|
// Start the peer! If an error occurs, we Disconnect the peer, which
|
2018-07-31 06:58:16 +03:00
|
|
|
// will unblock the peerTerminationWatcher.
|
|
|
|
if err := p.Start(); err != nil {
|
2018-09-04 03:16:56 +03:00
|
|
|
p.Disconnect(fmt.Errorf("unable to start peer: %v", err))
|
2018-07-31 06:58:16 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, signal to the peerTerminationWatcher that the peer startup
|
|
|
|
// was successful, and to begin watching the peer's wait group.
|
|
|
|
close(ready)
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
2018-07-31 06:58:16 +03:00
|
|
|
pubStr := string(p.addr.IdentityKey.SerializeCompressed())
|
|
|
|
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2017-09-13 15:38:06 +03:00
|
|
|
// Check if there are listeners waiting for this peer to come online.
|
2019-03-27 02:41:13 +03:00
|
|
|
srvrLog.Debugf("Notifying that peer %v is online", p)
|
2018-07-06 04:03:04 +03:00
|
|
|
for _, peerChan := range s.peerConnectedListeners[pubStr] {
|
|
|
|
select {
|
|
|
|
case peerChan <- p:
|
|
|
|
case <-s.quit:
|
|
|
|
return
|
|
|
|
}
|
2017-09-13 15:38:06 +03:00
|
|
|
}
|
|
|
|
delete(s.peerConnectedListeners, pubStr)
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
|
|
|
|
2018-08-02 03:31:26 +03:00
|
|
|
// peerTerminationWatcher waits until a peer has been disconnected unexpectedly,
|
|
|
|
// and then cleans up all resources allocated to the peer, notifies relevant
|
|
|
|
// sub-systems of its demise, and finally handles re-connecting to the peer if
|
|
|
|
// it's persistent. If the server intentionally disconnects a peer, it should
|
|
|
|
// have a corresponding entry in the ignorePeerTermination map which will cause
|
|
|
|
// the cleanup routine to exit early. The passed `ready` chan is used to
|
|
|
|
// synchronize when WaitForDisconnect should begin watching on the peer's
|
|
|
|
// waitgroup. The ready chan should only be signaled if the peer starts
|
|
|
|
// successfully, otherwise the peer should be disconnected instead.
|
|
|
|
//
|
|
|
|
// NOTE: This MUST be launched as a goroutine.
|
|
|
|
func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) {
|
|
|
|
defer s.wg.Done()
|
|
|
|
|
|
|
|
p.WaitForDisconnect(ready)
|
|
|
|
|
|
|
|
srvrLog.Debugf("Peer %v has been disconnected", p)
|
|
|
|
|
|
|
|
// If the server is exiting then we can bail out early ourselves as all
|
|
|
|
// the other sub-systems will already be shutting down.
|
|
|
|
if s.Stopped() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll cancel all pending funding reservations with this node.
|
|
|
|
// If we tried to initiate any funding flows that haven't yet finished,
|
|
|
|
// then we need to unlock those committed outputs so they're still
|
|
|
|
// available for use.
|
|
|
|
s.fundingMgr.CancelPeerReservations(p.PubKey())
|
|
|
|
|
|
|
|
pubKey := p.addr.IdentityKey
|
|
|
|
|
|
|
|
// We'll also inform the gossiper that this peer is no longer active,
|
|
|
|
// so we don't need to maintain sync state for it any longer.
|
2019-03-23 05:56:33 +03:00
|
|
|
s.authGossiper.PruneSyncState(p.PubKey())
|
2018-08-02 03:31:26 +03:00
|
|
|
|
|
|
|
// Tell the switch to remove all links associated with this peer.
|
|
|
|
// Passing nil as the target link indicates that all links associated
|
|
|
|
// with this interface should be closed.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): instead add a PurgeInterfaceLinks function?
|
|
|
|
links, err := p.server.htlcSwitch.GetLinksByInterface(p.pubKeyBytes)
|
2018-08-29 06:10:35 +03:00
|
|
|
if err != nil && err != htlcswitch.ErrNoLinksFound {
|
2019-03-27 02:41:13 +03:00
|
|
|
srvrLog.Errorf("Unable to get channel links for %v: %v", p, err)
|
2018-08-02 03:31:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, link := range links {
|
|
|
|
p.server.htlcSwitch.RemoveLink(link.ChanID())
|
|
|
|
}
|
|
|
|
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2019-01-10 07:16:27 +03:00
|
|
|
// If there were any notification requests for when this peer
|
|
|
|
// disconnected, we can trigger them now.
|
2019-11-06 02:05:04 +03:00
|
|
|
srvrLog.Debugf("Notifying that peer %v is offline", p)
|
2019-01-10 07:16:27 +03:00
|
|
|
pubStr := string(pubKey.SerializeCompressed())
|
|
|
|
for _, offlineChan := range s.peerDisconnectedListeners[pubStr] {
|
|
|
|
close(offlineChan)
|
|
|
|
}
|
|
|
|
delete(s.peerDisconnectedListeners, pubStr)
|
|
|
|
|
2018-08-02 03:31:26 +03:00
|
|
|
// If the server has already removed this peer, we can short circuit the
|
|
|
|
// peer termination watcher and skip cleanup.
|
|
|
|
if _, ok := s.ignorePeerTermination[p]; ok {
|
|
|
|
delete(s.ignorePeerTermination, p)
|
|
|
|
|
|
|
|
pubKey := p.PubKey()
|
|
|
|
pubStr := string(pubKey[:])
|
|
|
|
|
|
|
|
// If a connection callback is present, we'll go ahead and
|
|
|
|
// execute it now that previous peer has fully disconnected. If
|
|
|
|
// the callback is not present, this likely implies the peer was
|
|
|
|
// purposefully disconnected via RPC, and that no reconnect
|
|
|
|
// should be attempted.
|
|
|
|
connCallback, ok := s.scheduledPeerConnection[pubStr]
|
|
|
|
if ok {
|
|
|
|
delete(s.scheduledPeerConnection, pubStr)
|
|
|
|
connCallback()
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// First, cleanup any remaining state the server has regarding the peer
|
|
|
|
// in question.
|
|
|
|
s.removePeer(p)
|
|
|
|
|
|
|
|
// Next, check to see if this is a persistent peer or not.
|
|
|
|
_, ok := s.persistentPeers[pubStr]
|
|
|
|
if ok {
|
|
|
|
// We'll only need to re-launch a connection request if one
|
|
|
|
// isn't already currently pending.
|
|
|
|
if _, ok := s.persistentConnReqs[pubStr]; ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll ensure that we locate an advertised address to use
|
|
|
|
// within the peer's address for reconnection purposes.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): use them all?
|
|
|
|
if p.inbound {
|
2020-04-15 02:16:26 +03:00
|
|
|
advertisedAddr, err := s.fetchNodeAdvertisedAddr(pubKey)
|
|
|
|
switch {
|
|
|
|
// We found an advertised address, so use it.
|
|
|
|
case err == nil:
|
2018-08-02 03:31:26 +03:00
|
|
|
p.addr.Address = advertisedAddr
|
2020-04-15 02:16:26 +03:00
|
|
|
|
|
|
|
// The peer doesn't have an advertised address.
|
|
|
|
case err == errNoAdvertisedAddr:
|
|
|
|
// Fall back to the existing peer address if
|
|
|
|
// we're not accepting connections over Tor.
|
|
|
|
if s.torController == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we are, the peer's address won't be known
|
|
|
|
// to us (we'll see a private address, which is
|
|
|
|
// the address used by our onion service to dial
|
|
|
|
// to lnd), so we don't have enough information
|
|
|
|
// to attempt a reconnect.
|
|
|
|
srvrLog.Debugf("Ignoring reconnection attempt "+
|
|
|
|
"to inbound peer %v without "+
|
|
|
|
"advertised address", p)
|
|
|
|
return
|
|
|
|
|
|
|
|
// We came across an error retrieving an advertised
|
|
|
|
// address, log it, and fall back to the existing peer
|
|
|
|
// address.
|
|
|
|
default:
|
|
|
|
srvrLog.Errorf("Unable to retrieve advertised "+
|
|
|
|
"address for node %x: %v", p.PubKey(),
|
|
|
|
err)
|
2018-08-02 03:31:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll launch a new connection request in order to
|
|
|
|
// attempt to maintain a persistent connection with this peer.
|
|
|
|
connReq := &connmgr.ConnReq{
|
|
|
|
Addr: p.addr,
|
|
|
|
Permanent: true,
|
|
|
|
}
|
|
|
|
s.persistentConnReqs[pubStr] = append(
|
|
|
|
s.persistentConnReqs[pubStr], connReq)
|
|
|
|
|
|
|
|
// Record the computed backoff in the backoff map.
|
|
|
|
backoff := s.nextPeerBackoff(pubStr, p.StartTime())
|
|
|
|
s.persistentPeersBackoff[pubStr] = backoff
|
|
|
|
|
|
|
|
// Initialize a retry canceller for this peer if one does not
|
|
|
|
// exist.
|
|
|
|
cancelChan, ok := s.persistentRetryCancels[pubStr]
|
|
|
|
if !ok {
|
|
|
|
cancelChan = make(chan struct{})
|
|
|
|
s.persistentRetryCancels[pubStr] = cancelChan
|
|
|
|
}
|
|
|
|
|
|
|
|
// We choose not to wait group this go routine since the Connect
|
|
|
|
// call can stall for arbitrarily long if we shutdown while an
|
|
|
|
// outbound connection attempt is being made.
|
|
|
|
go func() {
|
|
|
|
srvrLog.Debugf("Scheduling connection re-establishment to "+
|
|
|
|
"persistent peer %v in %s", p, backoff)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(backoff):
|
|
|
|
case <-cancelChan:
|
|
|
|
return
|
|
|
|
case <-s.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
srvrLog.Debugf("Attempting to re-establish persistent "+
|
|
|
|
"connection to peer %v", p)
|
|
|
|
|
|
|
|
s.connMgr.Connect(connReq)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// removePeer removes the passed peer from the server's state of all active
|
|
|
|
// peers.
|
|
|
|
func (s *server) removePeer(p *peer) {
|
|
|
|
if p == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
srvrLog.Debugf("removing peer %v", p)
|
|
|
|
|
2017-02-07 02:04:52 +03:00
|
|
|
// As the peer is now finished, ensure that the TCP connection is
|
|
|
|
// closed and all of its related goroutines have exited.
|
2017-08-31 11:15:39 +03:00
|
|
|
p.Disconnect(fmt.Errorf("server: disconnecting peer %v", p))
|
|
|
|
|
|
|
|
// If this peer had an active persistent connection request, remove it.
|
|
|
|
if p.connReq != nil {
|
|
|
|
s.connMgr.Remove(p.connReq.ID())
|
|
|
|
}
|
2017-02-07 02:04:52 +03:00
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Ignore deleting peers if we're shutting down.
|
2017-08-31 11:15:39 +03:00
|
|
|
if s.Stopped() {
|
2016-06-21 21:52:09 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-07-29 17:59:48 +03:00
|
|
|
pubSer := p.addr.IdentityKey.SerializeCompressed()
|
|
|
|
pubStr := string(pubSer)
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
|
|
|
delete(s.peersByPub, pubStr)
|
|
|
|
|
|
|
|
if p.inbound {
|
|
|
|
delete(s.inboundPeers, pubStr)
|
|
|
|
} else {
|
|
|
|
delete(s.outboundPeers, pubStr)
|
|
|
|
}
|
2019-07-29 17:59:48 +03:00
|
|
|
|
2020-03-17 09:22:35 +03:00
|
|
|
// Copy the peer's error buffer across to the server if it has any items
|
|
|
|
// in it so that we can restore peer errors across connections.
|
|
|
|
if p.errorBuffer.Total() > 0 {
|
|
|
|
s.peerErrors[pubStr] = p.errorBuffer
|
|
|
|
}
|
|
|
|
|
2019-07-29 17:59:48 +03:00
|
|
|
// Inform the peer notifier of a peer offline event so that it can be
|
|
|
|
// reported to clients listening for peer events.
|
|
|
|
var pubKey [33]byte
|
|
|
|
copy(pubKey[:], pubSer)
|
|
|
|
|
|
|
|
s.peerNotifier.NotifyPeerOffline(pubKey)
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// openChanReq is a message sent to the server in order to request the
|
2017-07-31 01:53:53 +03:00
|
|
|
// initiation of a channel funding workflow to the peer with either the
|
|
|
|
// specified relative peer ID, or a global lightning ID.
|
2016-06-21 22:32:32 +03:00
|
|
|
type openChanReq struct {
|
2016-10-28 05:49:10 +03:00
|
|
|
targetPubkey *btcec.PublicKey
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-07-31 01:53:53 +03:00
|
|
|
chainHash chainhash.Hash
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2019-07-11 14:14:38 +03:00
|
|
|
subtractFees bool
|
2019-07-11 14:14:36 +03:00
|
|
|
localFundingAmt btcutil.Amount
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-08-22 09:25:41 +03:00
|
|
|
pushAmt lnwire.MilliSatoshi
|
2017-01-10 06:05:11 +03:00
|
|
|
|
2019-10-31 05:43:05 +03:00
|
|
|
fundingFeePerKw chainfee.SatPerKWeight
|
2017-11-23 22:36:12 +03:00
|
|
|
|
2017-11-14 02:48:54 +03:00
|
|
|
private bool
|
|
|
|
|
2019-11-15 12:09:27 +03:00
|
|
|
// minHtlcIn is the minimum incoming htlc that we accept.
|
|
|
|
minHtlcIn lnwire.MilliSatoshi
|
2017-12-17 02:00:37 +03:00
|
|
|
|
2018-03-14 16:15:01 +03:00
|
|
|
remoteCsvDelay uint16
|
|
|
|
|
2018-08-10 05:26:01 +03:00
|
|
|
// minConfs indicates the minimum number of confirmations that each
|
|
|
|
// output selected to fund the channel should satisfy.
|
|
|
|
minConfs int32
|
|
|
|
|
2019-12-03 12:38:29 +03:00
|
|
|
// shutdownScript is an optional upfront shutdown script for the channel.
|
|
|
|
// This value is optional, so may be nil.
|
|
|
|
shutdownScript lnwire.DeliveryAddress
|
|
|
|
|
2017-07-31 01:53:53 +03:00
|
|
|
// TODO(roasbeef): add ability to specify channel constraints as well
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2019-11-14 07:58:54 +03:00
|
|
|
// chanFunder is an optional channel funder that allows the caller to
|
|
|
|
// control exactly how the channel funding is carried out. If not
|
|
|
|
// specified, then the default chanfunding.WalletAssembler will be
|
|
|
|
// used.
|
|
|
|
chanFunder chanfunding.Assembler
|
|
|
|
|
|
|
|
// pendingChanID is not all zeroes (the default value), then this will
|
|
|
|
// be the pending channel ID used for the funding flow within the wire
|
|
|
|
// protocol.
|
|
|
|
pendingChanID [32]byte
|
|
|
|
|
2016-08-31 02:52:53 +03:00
|
|
|
updates chan *lnrpc.OpenStatusUpdate
|
|
|
|
err chan error
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// ConnectToPeer requests that the server connect to a Lightning Network peer
|
|
|
|
// at the specified address. This function will *block* until either a
|
|
|
|
// connection is established, or the initial handshake process fails.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
|
|
|
func (s *server) ConnectToPeer(addr *lnwire.NetAddress, perm bool) error {
|
|
|
|
targetPub := string(addr.IdentityKey.SerializeCompressed())
|
2016-06-21 21:52:09 +03:00
|
|
|
|
2017-08-11 07:20:51 +03:00
|
|
|
// Acquire mutex, but use explicit unlocking instead of defer for
|
|
|
|
// better granularity. In certain conditions, this method requires
|
|
|
|
// making an outbound connection to a remote peer, which requires the
|
|
|
|
// lock to be released, and subsequently reacquired.
|
2017-08-09 02:51:41 +03:00
|
|
|
s.mu.Lock()
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-08-11 07:20:51 +03:00
|
|
|
// Ensure we're not already connected to this peer.
|
2017-08-31 11:15:39 +03:00
|
|
|
peer, err := s.findPeerByPubStr(targetPub)
|
2017-10-06 02:14:07 +03:00
|
|
|
if err == nil {
|
2017-08-31 11:15:39 +03:00
|
|
|
s.mu.Unlock()
|
2019-04-10 05:45:48 +03:00
|
|
|
return &errPeerAlreadyConnected{peer: peer}
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-10-06 02:14:07 +03:00
|
|
|
// Peer was not found, continue to pursue connection with peer.
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
// If there's already a pending connection request for this pubkey,
|
|
|
|
// then we ignore this request to ensure we don't create a redundant
|
|
|
|
// connection.
|
2018-03-31 02:19:52 +03:00
|
|
|
if reqs, ok := s.persistentConnReqs[targetPub]; ok {
|
|
|
|
srvrLog.Warnf("Already have %d persistent connection "+
|
2019-03-27 02:41:13 +03:00
|
|
|
"requests for %x@%v, connecting anyway.", len(reqs),
|
|
|
|
targetPub, addr)
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// If there's not already a pending or active connection to this node,
|
|
|
|
// then instruct the connection manager to attempt to establish a
|
|
|
|
// persistent connection to the peer.
|
2019-03-27 02:41:13 +03:00
|
|
|
srvrLog.Debugf("Connecting to %x@%v", targetPub, addr)
|
2017-08-09 02:51:41 +03:00
|
|
|
if perm {
|
2017-05-05 02:47:48 +03:00
|
|
|
connReq := &connmgr.ConnReq{
|
2017-01-10 06:08:52 +03:00
|
|
|
Addr: addr,
|
|
|
|
Permanent: true,
|
2017-05-05 02:47:48 +03:00
|
|
|
}
|
|
|
|
|
2019-02-15 06:17:52 +03:00
|
|
|
// Since the user requested a permanent connection, we'll set
|
|
|
|
// the entry to true which will tell the server to continue
|
|
|
|
// reconnecting even if the number of channels with this peer is
|
|
|
|
// zero.
|
|
|
|
s.persistentPeers[targetPub] = true
|
2018-02-01 11:48:38 +03:00
|
|
|
if _, ok := s.persistentPeersBackoff[targetPub]; !ok {
|
2020-05-14 15:18:11 +03:00
|
|
|
s.persistentPeersBackoff[targetPub] = s.cfg.MinBackoff
|
2018-02-01 11:48:38 +03:00
|
|
|
}
|
2017-08-09 02:51:41 +03:00
|
|
|
s.persistentConnReqs[targetPub] = append(
|
2018-12-10 07:08:32 +03:00
|
|
|
s.persistentConnReqs[targetPub], connReq,
|
|
|
|
)
|
2017-08-09 02:51:41 +03:00
|
|
|
s.mu.Unlock()
|
2017-05-05 02:47:48 +03:00
|
|
|
|
|
|
|
go s.connMgr.Connect(connReq)
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
return nil
|
2017-01-10 06:08:52 +03:00
|
|
|
}
|
2017-08-09 02:51:41 +03:00
|
|
|
s.mu.Unlock()
|
|
|
|
|
|
|
|
// If we're not making a persistent connection, then we'll attempt to
|
|
|
|
// connect to the target peer. If the we can't make the connection, or
|
|
|
|
// the crypto negotiation breaks down, then return an error to the
|
2017-10-25 05:59:32 +03:00
|
|
|
// caller.
|
2018-05-08 07:45:36 +03:00
|
|
|
errChan := make(chan error, 1)
|
|
|
|
s.connectToPeer(addr, errChan)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
return err
|
|
|
|
case <-s.quit:
|
|
|
|
return ErrServerShuttingDown
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// connectToPeer establishes a connection to a remote peer. errChan is used to
|
|
|
|
// notify the caller if the connection attempt has failed. Otherwise, it will be
|
|
|
|
// closed.
|
|
|
|
func (s *server) connectToPeer(addr *lnwire.NetAddress, errChan chan<- error) {
|
2020-04-28 11:06:21 +03:00
|
|
|
conn, err := brontide.Dial(s.identityECDH, addr, s.cfg.net.Dial)
|
2017-08-09 02:51:41 +03:00
|
|
|
if err != nil {
|
2018-05-08 07:45:36 +03:00
|
|
|
srvrLog.Errorf("Unable to connect to %v: %v", addr, err)
|
|
|
|
select {
|
|
|
|
case errChan <- err:
|
|
|
|
case <-s.quit:
|
|
|
|
}
|
|
|
|
return
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
|
|
|
|
2018-05-08 07:45:36 +03:00
|
|
|
close(errChan)
|
2017-08-09 02:51:41 +03:00
|
|
|
|
2018-05-08 07:45:36 +03:00
|
|
|
s.OutboundPeerConnected(nil, conn)
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// DisconnectPeer sends the request to server to close the connection with peer
|
|
|
|
// identified by public key.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
|
|
|
func (s *server) DisconnectPeer(pubKey *btcec.PublicKey) error {
|
|
|
|
pubBytes := pubKey.SerializeCompressed()
|
2017-05-06 02:02:03 +03:00
|
|
|
pubStr := string(pubBytes)
|
2017-05-02 22:31:35 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// Check that were actually connected to this peer. If not, then we'll
|
|
|
|
// exit in an error as we can't disconnect from a peer that we're not
|
2017-10-06 02:14:07 +03:00
|
|
|
// currently connected to.
|
2017-08-31 11:15:39 +03:00
|
|
|
peer, err := s.findPeerByPubStr(pubStr)
|
2018-04-15 23:19:15 +03:00
|
|
|
if err == ErrPeerNotConnected {
|
|
|
|
return fmt.Errorf("peer %x is not connected", pubBytes)
|
2017-05-02 22:31:35 +03:00
|
|
|
}
|
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
srvrLog.Infof("Disconnecting from %v", peer)
|
|
|
|
|
2018-04-01 02:26:19 +03:00
|
|
|
s.cancelConnReqs(pubStr, nil)
|
2018-03-31 02:19:52 +03:00
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// If this peer was formerly a persistent connection, then we'll remove
|
|
|
|
// them from this map so we don't attempt to re-connect after we
|
|
|
|
// disconnect.
|
2017-08-31 11:15:39 +03:00
|
|
|
delete(s.persistentPeers, pubStr)
|
2018-02-01 11:48:38 +03:00
|
|
|
delete(s.persistentPeersBackoff, pubStr)
|
2017-05-02 22:31:35 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// Remove the current peer from the server's internal state and signal
|
|
|
|
// that the peer termination watcher does not need to execute for this
|
|
|
|
// peer.
|
|
|
|
s.removePeer(peer)
|
|
|
|
s.ignorePeerTermination[peer] = struct{}{}
|
2017-05-06 02:02:03 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
return nil
|
2017-05-02 22:31:35 +03:00
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// OpenChannel sends a request to the server to open a channel to the specified
|
2018-02-20 01:55:22 +03:00
|
|
|
// peer identified by nodeKey with the passed channel funding parameters.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2018-08-10 05:17:16 +03:00
|
|
|
func (s *server) OpenChannel(
|
|
|
|
req *openChanReq) (chan *lnrpc.OpenStatusUpdate, chan error) {
|
2017-08-09 02:51:41 +03:00
|
|
|
|
2018-08-10 05:17:16 +03:00
|
|
|
// The updateChan will have a buffer of 2, since we expect a ChanPending
|
|
|
|
// + a ChanOpen update, and we want to make sure the funding process is
|
|
|
|
// not blocked if the caller is not reading the updates.
|
|
|
|
req.updates = make(chan *lnrpc.OpenStatusUpdate, 2)
|
|
|
|
req.err = make(chan error, 1)
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// First attempt to locate the target peer to open a channel with, if
|
|
|
|
// we're unable to locate the peer then this request will fail.
|
2018-08-10 05:17:16 +03:00
|
|
|
pubKeyBytes := req.targetPubkey.SerializeCompressed()
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RLock()
|
2018-08-10 05:17:16 +03:00
|
|
|
peer, ok := s.peersByPub[string(pubKeyBytes)]
|
|
|
|
if !ok {
|
2018-08-19 04:17:55 +03:00
|
|
|
s.mu.RUnlock()
|
|
|
|
|
2018-08-10 05:17:16 +03:00
|
|
|
req.err <- fmt.Errorf("peer %x is not online", pubKeyBytes)
|
|
|
|
return req.updates, req.err
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RUnlock()
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2019-10-02 00:35:23 +03:00
|
|
|
// We'll wait until the peer is active before beginning the channel
|
|
|
|
// opening process.
|
|
|
|
select {
|
|
|
|
case <-peer.activeSignal:
|
|
|
|
case <-peer.quit:
|
|
|
|
req.err <- fmt.Errorf("peer %x disconnected", pubKeyBytes)
|
|
|
|
return req.updates, req.err
|
|
|
|
case <-s.quit:
|
|
|
|
req.err <- ErrServerShuttingDown
|
|
|
|
return req.updates, req.err
|
|
|
|
}
|
|
|
|
|
2018-02-13 17:05:19 +03:00
|
|
|
// If the fee rate wasn't specified, then we'll use a default
|
|
|
|
// confirmation target.
|
2018-08-10 05:17:16 +03:00
|
|
|
if req.fundingFeePerKw == 0 {
|
2017-12-13 04:47:31 +03:00
|
|
|
estimator := s.cc.feeEstimator
|
2018-08-10 05:17:16 +03:00
|
|
|
feeRate, err := estimator.EstimateFeePerKW(6)
|
2017-12-13 04:47:31 +03:00
|
|
|
if err != nil {
|
2018-08-10 05:17:16 +03:00
|
|
|
req.err <- err
|
|
|
|
return req.updates, req.err
|
2017-12-13 04:47:31 +03:00
|
|
|
}
|
2018-08-10 05:17:16 +03:00
|
|
|
req.fundingFeePerKw = feeRate
|
2017-12-13 04:47:31 +03:00
|
|
|
}
|
|
|
|
|
2018-08-10 05:17:16 +03:00
|
|
|
// Spawn a goroutine to send the funding workflow request to the funding
|
|
|
|
// manager. This allows the server to continue handling queries instead
|
|
|
|
// of blocking on this request which is exported as a synchronous
|
|
|
|
// request to the outside world.
|
|
|
|
go s.fundingMgr.initFundingWorkflow(peer, req)
|
|
|
|
|
|
|
|
return req.updates, req.err
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Peers returns a slice of all active peers.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2016-06-21 21:52:09 +03:00
|
|
|
func (s *server) Peers() []*peer {
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2017-06-05 08:18:12 +03:00
|
|
|
|
2018-02-14 08:48:42 +03:00
|
|
|
peers := make([]*peer, 0, len(s.peersByPub))
|
|
|
|
for _, peer := range s.peersByPub {
|
2017-06-05 08:18:12 +03:00
|
|
|
peers = append(peers, peer)
|
|
|
|
}
|
2016-06-21 21:52:09 +03:00
|
|
|
|
2017-06-05 08:18:12 +03:00
|
|
|
return peers
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
2017-12-16 02:06:20 +03:00
|
|
|
|
|
|
|
// parseHexColor takes a hex string representation of a color in the
|
|
|
|
// form "#RRGGBB", parses the hex color values, and returns a color.RGBA
|
|
|
|
// struct of the same color.
|
|
|
|
func parseHexColor(colorStr string) (color.RGBA, error) {
|
2018-11-04 05:00:19 +03:00
|
|
|
// Check if the hex color string is a valid color representation.
|
|
|
|
if !validColorRegexp.MatchString(colorStr) {
|
|
|
|
return color.RGBA{}, errors.New("Color must be specified " +
|
|
|
|
"using a hexadecimal value in the form #RRGGBB")
|
2017-12-16 02:06:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Decode the hex color string to bytes.
|
|
|
|
// The resulting byte array is in the form [R, G, B].
|
|
|
|
colorBytes, err := hex.DecodeString(colorStr[1:])
|
|
|
|
if err != nil {
|
|
|
|
return color.RGBA{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return color.RGBA{R: colorBytes[0], G: colorBytes[1], B: colorBytes[2]}, nil
|
|
|
|
}
|
2018-02-01 11:48:38 +03:00
|
|
|
|
|
|
|
// computeNextBackoff uses a truncated exponential backoff to compute the next
|
|
|
|
// backoff using the value of the exiting backoff. The returned duration is
|
|
|
|
// randomized in either direction by 1/20 to prevent tight loops from
|
|
|
|
// stabilizing.
|
2020-05-14 15:18:11 +03:00
|
|
|
func computeNextBackoff(currBackoff, maxBackoff time.Duration) time.Duration {
|
2018-02-01 11:48:38 +03:00
|
|
|
// Double the current backoff, truncating if it exceeds our maximum.
|
|
|
|
nextBackoff := 2 * currBackoff
|
2020-05-14 15:18:11 +03:00
|
|
|
if nextBackoff > maxBackoff {
|
|
|
|
nextBackoff = maxBackoff
|
2018-02-01 11:48:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Using 1/10 of our duration as a margin, compute a random offset to
|
|
|
|
// avoid the nodes entering connection cycles.
|
|
|
|
margin := nextBackoff / 10
|
|
|
|
|
|
|
|
var wiggle big.Int
|
|
|
|
wiggle.SetUint64(uint64(margin))
|
|
|
|
if _, err := rand.Int(rand.Reader, &wiggle); err != nil {
|
|
|
|
// Randomizing is not mission critical, so we'll just return the
|
|
|
|
// current backoff.
|
|
|
|
return nextBackoff
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise add in our wiggle, but subtract out half of the margin so
|
|
|
|
// that the backoff can tweaked by 1/20 in either direction.
|
|
|
|
return nextBackoff + (time.Duration(wiggle.Uint64()) - margin/2)
|
|
|
|
}
|
2018-03-31 01:59:05 +03:00
|
|
|
|
2020-04-15 02:16:26 +03:00
|
|
|
// errNoAdvertisedAddr is an error returned when we attempt to retrieve the
|
|
|
|
// advertised address of a node, but they don't have one.
|
|
|
|
var errNoAdvertisedAddr = errors.New("no advertised address found")
|
|
|
|
|
2018-05-02 09:30:56 +03:00
|
|
|
// fetchNodeAdvertisedAddr attempts to fetch an advertised address of a node.
|
|
|
|
func (s *server) fetchNodeAdvertisedAddr(pub *btcec.PublicKey) (net.Addr, error) {
|
2019-12-18 14:43:18 +03:00
|
|
|
vertex, err := route.NewVertexFromBytes(pub.SerializeCompressed())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-12-20 12:14:13 +03:00
|
|
|
node, err := s.chanDB.ChannelGraph().FetchLightningNode(nil, vertex)
|
2018-03-31 01:59:05 +03:00
|
|
|
if err != nil {
|
2018-05-02 09:30:56 +03:00
|
|
|
return nil, err
|
2018-03-31 01:59:05 +03:00
|
|
|
}
|
|
|
|
|
2018-05-02 09:30:56 +03:00
|
|
|
if len(node.Addresses) == 0 {
|
2020-04-15 02:16:26 +03:00
|
|
|
return nil, errNoAdvertisedAddr
|
2018-03-31 01:59:05 +03:00
|
|
|
}
|
|
|
|
|
2018-05-02 09:30:56 +03:00
|
|
|
return node.Addresses[0], nil
|
2018-03-31 01:59:05 +03:00
|
|
|
}
|
2018-06-14 05:43:42 +03:00
|
|
|
|
2018-08-22 10:32:44 +03:00
|
|
|
// fetchLastChanUpdate returns a function which is able to retrieve our latest
|
|
|
|
// channel update for a target channel.
|
|
|
|
func (s *server) fetchLastChanUpdate() func(lnwire.ShortChannelID) (
|
|
|
|
*lnwire.ChannelUpdate, error) {
|
|
|
|
|
2020-04-28 11:06:21 +03:00
|
|
|
ourPubKey := s.identityECDH.PubKey().SerializeCompressed()
|
2018-08-22 10:32:44 +03:00
|
|
|
return func(cid lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) {
|
|
|
|
info, edge1, edge2, err := s.chanRouter.GetChannelByID(cid)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-02-15 04:13:44 +03:00
|
|
|
|
2019-02-15 04:12:03 +03:00
|
|
|
return netann.ExtractChannelUpdate(
|
|
|
|
ourPubKey[:], info, edge1, edge2,
|
|
|
|
)
|
2018-06-14 05:43:42 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// applyChannelUpdate applies the channel update to the different sub-systems of
|
|
|
|
// the server.
|
|
|
|
func (s *server) applyChannelUpdate(update *lnwire.ChannelUpdate) error {
|
2020-04-28 11:06:21 +03:00
|
|
|
pubKey := s.identityECDH.PubKey()
|
2018-06-14 05:43:42 +03:00
|
|
|
errChan := s.authGossiper.ProcessLocalAnnouncement(update, pubKey)
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
return err
|
|
|
|
case <-s.quit:
|
|
|
|
return ErrServerShuttingDown
|
|
|
|
}
|
|
|
|
}
|
2019-06-14 03:30:22 +03:00
|
|
|
|
|
|
|
// newSweepPkScriptGen creates closure that generates a new public key script
|
|
|
|
// which should be used to sweep any funds into the on-chain wallet.
|
|
|
|
// Specifically, the script generated is a version 0, pay-to-witness-pubkey-hash
|
|
|
|
// (p2wkh) output.
|
|
|
|
func newSweepPkScriptGen(
|
|
|
|
wallet lnwallet.WalletController) func() ([]byte, error) {
|
|
|
|
|
|
|
|
return func() ([]byte, error) {
|
|
|
|
sweepAddr, err := wallet.NewAddress(lnwallet.WitnessPubKey, false)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return txscript.PayToAddrScript(sweepAddr)
|
|
|
|
}
|
|
|
|
}
|