2015-12-26 09:09:17 +03:00
|
|
|
package main
|
2016-01-14 08:41:46 +03:00
|
|
|
|
|
|
|
import (
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
"bytes"
|
2018-02-01 11:48:38 +03:00
|
|
|
"crypto/rand"
|
2017-03-16 04:56:25 +03:00
|
|
|
"crypto/sha256"
|
2016-12-27 08:42:23 +03:00
|
|
|
"encoding/hex"
|
2016-01-14 08:41:46 +03:00
|
|
|
"fmt"
|
2017-12-03 05:38:14 +03:00
|
|
|
"image/color"
|
2018-02-01 11:48:38 +03:00
|
|
|
"math/big"
|
2016-01-14 08:41:46 +03:00
|
|
|
"net"
|
2018-02-24 04:33:05 +03:00
|
|
|
"path/filepath"
|
2017-04-14 00:41:54 +03:00
|
|
|
"strconv"
|
2016-01-14 08:41:46 +03:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2016-12-15 05:11:31 +03:00
|
|
|
"time"
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2018-03-11 06:00:57 +03:00
|
|
|
"github.com/coreos/bbolt"
|
2018-03-26 20:10:47 +03:00
|
|
|
"github.com/go-errors/errors"
|
2016-09-21 03:15:26 +03:00
|
|
|
"github.com/lightningnetwork/lightning-onion"
|
2017-09-04 02:58:14 +03:00
|
|
|
"github.com/lightningnetwork/lnd/autopilot"
|
2016-10-28 05:49:10 +03:00
|
|
|
"github.com/lightningnetwork/lnd/brontide"
|
2016-03-23 04:49:22 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2018-01-17 07:25:34 +03:00
|
|
|
"github.com/lightningnetwork/lnd/contractcourt"
|
2017-03-20 00:06:10 +03:00
|
|
|
"github.com/lightningnetwork/lnd/discovery"
|
2018-03-26 20:10:47 +03:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
2018-06-08 06:09:40 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnpeer"
|
2016-08-31 02:52:53 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2018-01-17 07:25:34 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
2016-10-28 05:49:10 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2017-02-02 05:29:46 +03:00
|
|
|
"github.com/lightningnetwork/lnd/routing"
|
2018-04-27 23:59:19 +03:00
|
|
|
"github.com/lightningnetwork/lnd/tor"
|
2016-05-15 17:17:44 +03:00
|
|
|
"github.com/roasbeef/btcd/btcec"
|
2017-07-31 01:53:53 +03:00
|
|
|
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
2017-01-06 00:58:06 +03:00
|
|
|
"github.com/roasbeef/btcd/connmgr"
|
2017-09-01 13:12:02 +03:00
|
|
|
"github.com/roasbeef/btcd/wire"
|
2016-06-21 21:52:09 +03:00
|
|
|
"github.com/roasbeef/btcutil"
|
2016-01-14 08:41:46 +03:00
|
|
|
)
|
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
var (
|
2018-04-15 23:19:15 +03:00
|
|
|
// ErrPeerNotConnected signals that the server has no connection to the
|
2017-08-31 11:15:39 +03:00
|
|
|
// given peer.
|
2018-04-15 23:19:15 +03:00
|
|
|
ErrPeerNotConnected = errors.New("peer is not connected")
|
2017-08-31 11:15:39 +03:00
|
|
|
|
|
|
|
// ErrServerShuttingDown indicates that the server is in the process of
|
|
|
|
// gracefully exiting.
|
2017-10-06 02:14:07 +03:00
|
|
|
ErrServerShuttingDown = errors.New("server is shutting down")
|
2018-02-01 11:48:38 +03:00
|
|
|
|
|
|
|
// defaultBackoff is the starting point for exponential backoff for
|
|
|
|
// reconnecting to persistent peers.
|
|
|
|
defaultBackoff = time.Second
|
|
|
|
|
|
|
|
// maximumBackoff is the largest backoff we will permit when
|
|
|
|
// reattempting connections to persistent peers.
|
2018-03-31 02:19:52 +03:00
|
|
|
maximumBackoff = time.Hour
|
2017-08-31 11:15:39 +03:00
|
|
|
)
|
|
|
|
|
2016-12-25 03:51:25 +03:00
|
|
|
// server is the main server of the Lightning Network Daemon. The server houses
|
|
|
|
// global state pertaining to the wallet, database, and the rpcserver.
|
2016-06-21 21:52:09 +03:00
|
|
|
// Additionally, the server is also used as a central messaging bus to interact
|
|
|
|
// with any of its companion objects.
|
2016-01-14 08:41:46 +03:00
|
|
|
type server struct {
|
|
|
|
started int32 // atomic
|
|
|
|
shutdown int32 // atomic
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// identityPriv is the private key used to authenticate any incoming
|
|
|
|
// connections.
|
|
|
|
identityPriv *btcec.PrivateKey
|
2016-01-17 06:07:44 +03:00
|
|
|
|
2017-04-14 21:17:41 +03:00
|
|
|
// nodeSigner is an implementation of the MessageSigner implementation
|
2017-05-18 21:55:25 +03:00
|
|
|
// that's backed by the identity private key of the running lnd node.
|
2017-04-14 21:17:41 +03:00
|
|
|
nodeSigner *nodeSigner
|
|
|
|
|
2016-07-06 04:48:35 +03:00
|
|
|
// lightningID is the sha256 of the public key corresponding to our
|
|
|
|
// long-term identity private key.
|
|
|
|
lightningID [32]byte
|
|
|
|
|
2018-04-28 00:02:05 +03:00
|
|
|
// listenAddrs is the list of addresses the server is currently
|
|
|
|
// listening on.
|
|
|
|
listenAddrs []string
|
|
|
|
|
|
|
|
// torController is a client that will communicate with a locally
|
|
|
|
// running Tor server. This client will handle initiating and
|
|
|
|
// authenticating the connection to the Tor server, automatically
|
|
|
|
// creating and setting up onion services, etc.
|
|
|
|
torController *tor.Controller
|
|
|
|
|
2018-01-23 03:04:40 +03:00
|
|
|
mu sync.RWMutex
|
2016-12-15 05:11:31 +03:00
|
|
|
peersByPub map[string]*peer
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
inboundPeers map[string]*peer
|
|
|
|
outboundPeers map[string]*peer
|
|
|
|
|
2017-09-13 15:38:06 +03:00
|
|
|
peerConnectedListeners map[string][]chan<- struct{}
|
|
|
|
|
2018-02-01 11:48:38 +03:00
|
|
|
persistentPeers map[string]struct{}
|
|
|
|
persistentPeersBackoff map[string]time.Duration
|
|
|
|
persistentConnReqs map[string][]*connmgr.ConnReq
|
2018-03-31 02:19:52 +03:00
|
|
|
persistentRetryCancels map[string]chan struct{}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// ignorePeerTermination tracks peers for which the server has initiated
|
|
|
|
// a disconnect. Adding a peer to this map causes the peer termination
|
|
|
|
// watcher to short circuit in the event that peers are purposefully
|
|
|
|
// disconnected.
|
|
|
|
ignorePeerTermination map[*peer]struct{}
|
|
|
|
|
2018-05-08 06:18:15 +03:00
|
|
|
// scheduledPeerConnection maps a pubkey string to a callback that
|
|
|
|
// should be executed in the peerTerminationWatcher the prior peer with
|
|
|
|
// the same pubkey exits. This allows the server to wait until the
|
|
|
|
// prior peer has cleaned up successfully, before adding the new peer
|
|
|
|
// intended to replace it.
|
|
|
|
scheduledPeerConnection map[string]func()
|
|
|
|
|
2017-05-18 21:55:25 +03:00
|
|
|
cc *chainControl
|
2016-06-21 21:52:09 +03:00
|
|
|
|
|
|
|
fundingMgr *fundingManager
|
2017-05-18 21:55:25 +03:00
|
|
|
|
|
|
|
chanDB *channeldb.DB
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2018-01-17 07:25:34 +03:00
|
|
|
htlcSwitch *htlcswitch.Switch
|
|
|
|
|
|
|
|
invoices *invoiceRegistry
|
|
|
|
|
|
|
|
witnessBeacon contractcourt.WitnessBeacon
|
|
|
|
|
2016-11-29 06:43:57 +03:00
|
|
|
breachArbiter *breachArbiter
|
2016-07-10 02:36:25 +03:00
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
chanRouter *routing.ChannelRouter
|
2016-07-15 14:02:59 +03:00
|
|
|
|
2017-08-22 09:54:10 +03:00
|
|
|
authGossiper *discovery.AuthenticatedGossiper
|
2017-03-20 00:06:10 +03:00
|
|
|
|
2016-09-12 22:37:51 +03:00
|
|
|
utxoNursery *utxoNursery
|
|
|
|
|
2018-01-17 07:25:34 +03:00
|
|
|
chainArb *contractcourt.ChainArbitrator
|
|
|
|
|
2017-06-29 16:52:55 +03:00
|
|
|
sphinx *htlcswitch.OnionProcessor
|
2016-09-21 03:15:26 +03:00
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
connMgr *connmgr.ConnManager
|
|
|
|
|
2017-02-16 15:39:38 +03:00
|
|
|
// globalFeatures feature vector which affects HTLCs and thus are also
|
|
|
|
// advertised to other nodes.
|
|
|
|
globalFeatures *lnwire.FeatureVector
|
|
|
|
|
2017-08-05 04:32:25 +03:00
|
|
|
// currentNodeAnn is the node announcement that has been broadcast to
|
|
|
|
// the network upon startup, if the attributes of the node (us) has
|
|
|
|
// changed since last start.
|
|
|
|
currentNodeAnn *lnwire.NodeAnnouncement
|
|
|
|
|
2017-08-11 07:18:57 +03:00
|
|
|
quit chan struct{}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
wg sync.WaitGroup
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2018-04-27 23:59:19 +03:00
|
|
|
// parseAddr parses an address from its string format to a net.Addr.
|
|
|
|
func parseAddr(address string) (net.Addr, error) {
|
|
|
|
var (
|
|
|
|
host string
|
|
|
|
port int
|
|
|
|
)
|
|
|
|
|
|
|
|
// Split the address into its host and port components.
|
|
|
|
h, p, err := net.SplitHostPort(address)
|
|
|
|
if err != nil {
|
|
|
|
// If a port wasn't specified, we'll assume the address only
|
|
|
|
// contains the host so we'll use the default port.
|
|
|
|
host = address
|
|
|
|
port = defaultPeerPort
|
|
|
|
} else {
|
|
|
|
// Otherwise, we'll note both the host and ports.
|
|
|
|
host = h
|
|
|
|
portNum, err := strconv.Atoi(p)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
port = portNum
|
|
|
|
}
|
|
|
|
|
|
|
|
if tor.IsOnionHost(host) {
|
|
|
|
return &tor.OnionAddr{OnionService: host, Port: port}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the host is part of a TCP address, we'll use the network
|
|
|
|
// specific ResolveTCPAddr function in order to resolve these
|
|
|
|
// addresses over Tor in order to prevent leaking your real IP
|
|
|
|
// address.
|
|
|
|
hostPort := net.JoinHostPort(host, strconv.Itoa(port))
|
|
|
|
return cfg.net.ResolveTCPAddr("tcp", hostPort)
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// newServer creates a new instance of the server which is to listen using the
|
|
|
|
// passed listener address.
|
2017-06-06 01:18:06 +03:00
|
|
|
func newServer(listenAddrs []string, chanDB *channeldb.DB, cc *chainControl,
|
|
|
|
privKey *btcec.PrivateKey) (*server, error) {
|
|
|
|
|
|
|
|
var err error
|
2016-01-17 06:07:44 +03:00
|
|
|
|
|
|
|
listeners := make([]net.Listener, len(listenAddrs))
|
|
|
|
for i, addr := range listenAddrs {
|
2018-03-11 00:02:19 +03:00
|
|
|
// Note: though brontide.NewListener uses ResolveTCPAddr, it
|
|
|
|
// doesn't need to call the general lndResolveTCP function
|
|
|
|
// since we are resolving a local address.
|
2016-10-28 05:49:10 +03:00
|
|
|
listeners[i], err = brontide.NewListener(privKey, addr)
|
2016-01-17 06:07:44 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-11 21:37:54 +03:00
|
|
|
globalFeatures := lnwire.NewRawFeatureVector()
|
|
|
|
|
2018-05-08 06:00:32 +03:00
|
|
|
var serializedPubKey [33]byte
|
|
|
|
copy(serializedPubKey[:], privKey.PubKey().SerializeCompressed())
|
2018-02-24 04:33:05 +03:00
|
|
|
|
|
|
|
// Initialize the sphinx router, placing it's persistent replay log in
|
|
|
|
// the same directory as the channel graph database.
|
2018-03-11 00:02:19 +03:00
|
|
|
graphDir := chanDB.Path()
|
2018-02-24 04:33:05 +03:00
|
|
|
sharedSecretPath := filepath.Join(graphDir, "sphinxreplay.db")
|
2018-03-26 20:10:47 +03:00
|
|
|
replayLog := htlcswitch.NewDecayedLog(sharedSecretPath, cc.chainNotifier)
|
|
|
|
sphinxRouter := sphinx.NewRouter(privKey, activeNetParams.Params, replayLog)
|
2018-02-24 04:33:05 +03:00
|
|
|
|
2016-01-17 06:07:44 +03:00
|
|
|
s := &server{
|
2017-05-18 21:55:25 +03:00
|
|
|
chanDB: chanDB,
|
|
|
|
cc: cc,
|
|
|
|
|
|
|
|
invoices: newInvoiceRegistry(chanDB),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
|
|
|
identityPriv: privKey,
|
2017-04-14 21:17:41 +03:00
|
|
|
nodeSigner: newNodeSigner(privKey),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
2018-04-28 00:02:05 +03:00
|
|
|
listenAddrs: listenAddrs,
|
|
|
|
|
2016-09-21 03:15:26 +03:00
|
|
|
// TODO(roasbeef): derive proper onion key based on rotation
|
|
|
|
// schedule
|
2018-02-24 04:33:05 +03:00
|
|
|
sphinx: htlcswitch.NewOnionProcessor(sphinxRouter),
|
2018-05-08 06:00:32 +03:00
|
|
|
lightningID: sha256.Sum256(serializedPubKey[:]),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
2018-05-08 06:18:15 +03:00
|
|
|
persistentPeers: make(map[string]struct{}),
|
|
|
|
persistentPeersBackoff: make(map[string]time.Duration),
|
|
|
|
persistentConnReqs: make(map[string][]*connmgr.ConnReq),
|
|
|
|
persistentRetryCancels: make(map[string]chan struct{}),
|
|
|
|
ignorePeerTermination: make(map[*peer]struct{}),
|
|
|
|
scheduledPeerConnection: make(map[string]func()),
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-09-13 15:38:06 +03:00
|
|
|
peersByPub: make(map[string]*peer),
|
|
|
|
inboundPeers: make(map[string]*peer),
|
|
|
|
outboundPeers: make(map[string]*peer),
|
|
|
|
peerConnectedListeners: make(map[string][]chan<- struct{}),
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-10-11 21:37:54 +03:00
|
|
|
globalFeatures: lnwire.NewFeatureVector(globalFeatures,
|
|
|
|
lnwire.GlobalFeatures),
|
2017-08-11 07:18:57 +03:00
|
|
|
quit: make(chan struct{}),
|
2016-01-17 06:07:44 +03:00
|
|
|
}
|
|
|
|
|
2018-01-20 04:41:08 +03:00
|
|
|
s.witnessBeacon = &preimageBeacon{
|
|
|
|
invoices: s.invoices,
|
|
|
|
wCache: chanDB.NewWitnessCache(),
|
2018-02-07 06:11:11 +03:00
|
|
|
subscribers: make(map[uint64]*preimageSubscriber),
|
2018-01-20 04:41:08 +03:00
|
|
|
}
|
2018-01-17 07:25:34 +03:00
|
|
|
|
2016-09-19 22:03:38 +03:00
|
|
|
// If the debug HTLC flag is on, then we invoice a "master debug"
|
|
|
|
// invoice which all outgoing payments will be sent and all incoming
|
2017-01-13 08:01:50 +03:00
|
|
|
// HTLCs with the debug R-Hash immediately settled.
|
2016-09-19 22:03:38 +03:00
|
|
|
if cfg.DebugHTLC {
|
|
|
|
kiloCoin := btcutil.Amount(btcutil.SatoshiPerBitcoin * 1000)
|
|
|
|
s.invoices.AddDebugInvoice(kiloCoin, *debugPre)
|
|
|
|
srvrLog.Debugf("Debug HTLC invoice inserted, preimage=%x, hash=%x",
|
|
|
|
debugPre[:], debugHash[:])
|
|
|
|
}
|
2016-07-13 03:14:07 +03:00
|
|
|
|
2018-05-08 06:00:32 +03:00
|
|
|
s.htlcSwitch, err = htlcswitch.New(htlcswitch.Config{
|
2018-02-28 11:14:03 +03:00
|
|
|
DB: chanDB,
|
2017-10-17 04:39:06 +03:00
|
|
|
SelfKey: s.identityPriv.PubKey(),
|
2017-05-02 23:04:58 +03:00
|
|
|
LocalChannelClose: func(pubKey []byte,
|
|
|
|
request *htlcswitch.ChanClose) {
|
2017-08-04 02:40:08 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
peer, err := s.FindPeerByPubStr(string(pubKey))
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to close channel, peer"+
|
|
|
|
" with %v id can't be found: %v",
|
|
|
|
pubKey, err,
|
|
|
|
)
|
2017-05-02 23:04:58 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
select {
|
|
|
|
case peer.localCloseChanReqs <- request:
|
2017-08-11 07:17:04 +03:00
|
|
|
srvrLog.Infof("Local close channel request "+
|
|
|
|
"delivered to peer: %x", pubKey[:])
|
2017-08-09 02:51:41 +03:00
|
|
|
case <-peer.quit:
|
2017-08-11 07:17:04 +03:00
|
|
|
srvrLog.Errorf("Unable to deliver local close "+
|
|
|
|
"channel request to peer %x, err: %v",
|
|
|
|
pubKey[:], err)
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
2017-05-02 23:04:58 +03:00
|
|
|
},
|
2018-05-08 06:00:32 +03:00
|
|
|
FwdingLog: chanDB.ForwardingLog(),
|
|
|
|
SwitchPackager: channeldb.NewSwitchPackager(),
|
|
|
|
ExtractErrorEncrypter: s.sphinx.ExtractErrorEncrypter,
|
|
|
|
FetchLastChannelUpdate: fetchLastChanUpdate(s, serializedPubKey),
|
2017-05-02 23:04:58 +03:00
|
|
|
})
|
2018-02-28 11:14:03 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-05-02 23:04:58 +03:00
|
|
|
|
2017-12-25 02:15:40 +03:00
|
|
|
// Gather external IPs from config
|
|
|
|
externalIPs := cfg.ExternalIPs
|
|
|
|
|
|
|
|
if cfg.UpnpSupport {
|
|
|
|
|
2018-01-20 21:58:06 +03:00
|
|
|
externalIP, err := configureUpnp()
|
2017-12-25 02:15:40 +03:00
|
|
|
if err != nil {
|
2018-01-20 21:58:06 +03:00
|
|
|
externalIPs = append(externalIPs, externalIP)
|
2017-12-25 02:15:40 +03:00
|
|
|
}
|
|
|
|
|
2018-01-20 21:58:06 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if cfg.NatPmp {
|
2017-12-25 02:15:40 +03:00
|
|
|
|
2018-01-20 21:58:06 +03:00
|
|
|
externalIP, err := configureNatPmp()
|
2017-12-25 02:15:40 +03:00
|
|
|
if err != nil {
|
2018-01-20 21:58:06 +03:00
|
|
|
externalIPs = append(externalIPs, externalIP)
|
2017-12-25 02:15:40 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-02-23 03:24:22 +03:00
|
|
|
// If external IP addresses have been specified, add those to the list
|
2018-04-27 23:59:19 +03:00
|
|
|
// of this server's addresses.
|
2017-03-30 02:55:28 +03:00
|
|
|
selfAddrs := make([]net.Addr, 0, len(cfg.ExternalIPs))
|
2017-02-23 03:24:22 +03:00
|
|
|
for _, ip := range cfg.ExternalIPs {
|
2018-04-27 23:59:19 +03:00
|
|
|
addr, err := parseAddr(ip)
|
2017-02-23 03:24:22 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-04-14 00:41:54 +03:00
|
|
|
|
2018-04-27 23:59:19 +03:00
|
|
|
selfAddrs = append(selfAddrs, addr)
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2016-10-05 23:47:02 +03:00
|
|
|
|
2018-04-28 00:02:05 +03:00
|
|
|
// If we were requested to route connections through Tor and to
|
|
|
|
// automatically create an onion service, we'll initiate our Tor
|
|
|
|
// controller and establish a connection to the Tor server.
|
|
|
|
//
|
|
|
|
// NOTE: v3 onion services cannot be created automatically yet. In the
|
|
|
|
// future, this will be expanded to do so.
|
|
|
|
if cfg.Tor.Active && cfg.Tor.V2 {
|
|
|
|
s.torController = tor.NewController(cfg.Tor.Control)
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2016-10-05 23:47:02 +03:00
|
|
|
|
2017-03-27 20:25:44 +03:00
|
|
|
chanGraph := chanDB.ChannelGraph()
|
2017-03-28 22:08:14 +03:00
|
|
|
|
2017-12-16 02:06:20 +03:00
|
|
|
// Parse node color from configuration.
|
|
|
|
color, err := parseHexColor(cfg.Color)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to parse color: %v\n", err)
|
|
|
|
return nil, err
|
2017-12-03 05:38:14 +03:00
|
|
|
}
|
|
|
|
|
2017-12-16 02:06:20 +03:00
|
|
|
// If no alias is provided, default to first 10 characters of public key
|
|
|
|
alias := cfg.Alias
|
|
|
|
if alias == "" {
|
|
|
|
alias = hex.EncodeToString(serializedPubKey[:10])
|
|
|
|
}
|
|
|
|
nodeAlias, err := lnwire.NewNodeAlias(alias)
|
2017-08-22 09:54:10 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-08-05 04:32:25 +03:00
|
|
|
selfNode := &channeldb.LightningNode{
|
2017-07-14 22:40:26 +03:00
|
|
|
HaveNodeAnnouncement: true,
|
|
|
|
LastUpdate: time.Now(),
|
|
|
|
Addresses: selfAddrs,
|
2017-12-16 02:06:20 +03:00
|
|
|
Alias: nodeAlias.String(),
|
2017-10-11 21:37:54 +03:00
|
|
|
Features: s.globalFeatures,
|
2017-12-16 02:06:20 +03:00
|
|
|
Color: color,
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2018-01-31 07:30:00 +03:00
|
|
|
copy(selfNode.PubKeyBytes[:], privKey.PubKey().SerializeCompressed())
|
2017-03-27 20:25:44 +03:00
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
// If our information has changed since our last boot, then we'll
|
|
|
|
// re-sign our node announcement so a fresh authenticated version of it
|
|
|
|
// can be propagated throughout the network upon startup.
|
2017-08-05 04:32:25 +03:00
|
|
|
//
|
2017-04-24 05:21:32 +03:00
|
|
|
// TODO(roasbeef): don't always set timestamp above to _now.
|
2017-08-05 04:32:25 +03:00
|
|
|
nodeAnn := &lnwire.NodeAnnouncement{
|
|
|
|
Timestamp: uint32(selfNode.LastUpdate.Unix()),
|
|
|
|
Addresses: selfNode.Addresses,
|
2018-01-31 07:30:00 +03:00
|
|
|
NodeID: selfNode.PubKeyBytes,
|
2017-12-16 02:06:20 +03:00
|
|
|
Alias: nodeAlias,
|
2017-10-11 21:37:54 +03:00
|
|
|
Features: selfNode.Features.RawFeatureVector,
|
2017-12-16 02:06:20 +03:00
|
|
|
RGBColor: color,
|
2017-08-05 04:32:25 +03:00
|
|
|
}
|
2018-01-31 07:30:00 +03:00
|
|
|
authSig, err := discovery.SignAnnouncement(
|
|
|
|
s.nodeSigner, s.identityPriv.PubKey(), nodeAnn,
|
2017-04-01 15:33:17 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
2017-03-27 20:25:44 +03:00
|
|
|
return nil, fmt.Errorf("unable to generate signature for "+
|
|
|
|
"self node announcement: %v", err)
|
|
|
|
}
|
2017-08-05 04:32:25 +03:00
|
|
|
|
2018-01-31 07:30:00 +03:00
|
|
|
selfNode.AuthSigBytes = authSig.Serialize()
|
|
|
|
s.currentNodeAnn = nodeAnn
|
|
|
|
|
2017-08-05 04:32:25 +03:00
|
|
|
if err := chanGraph.SetSourceNode(selfNode); err != nil {
|
2017-03-27 20:25:44 +03:00
|
|
|
return nil, fmt.Errorf("can't set self node: %v", err)
|
2016-10-05 23:47:02 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2018-01-31 07:30:00 +03:00
|
|
|
nodeAnn.Signature, err = lnwire.NewSigFromRawSignature(selfNode.AuthSigBytes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-27 08:42:23 +03:00
|
|
|
s.chanRouter, err = routing.New(routing.Config{
|
2017-05-11 03:34:15 +03:00
|
|
|
Graph: chanGraph,
|
2017-05-18 21:55:25 +03:00
|
|
|
Chain: cc.chainIO,
|
|
|
|
ChainView: cc.chainView,
|
2018-01-31 07:30:00 +03:00
|
|
|
SendToSwitch: func(firstHopPub [33]byte,
|
2017-06-29 16:52:55 +03:00
|
|
|
htlcAdd *lnwire.UpdateAddHTLC,
|
|
|
|
circuit *sphinx.Circuit) ([32]byte, error) {
|
|
|
|
|
2017-07-15 06:08:29 +03:00
|
|
|
// Using the created circuit, initialize the error
|
2017-10-11 05:38:31 +03:00
|
|
|
// decrypter so we can parse+decode any failures
|
2017-07-15 06:08:29 +03:00
|
|
|
// incurred by this payment within the switch.
|
2017-10-11 05:38:31 +03:00
|
|
|
errorDecryptor := &htlcswitch.SphinxErrorDecrypter{
|
|
|
|
OnionErrorDecrypter: sphinx.NewOnionErrorDecrypter(circuit),
|
2017-06-29 16:52:55 +03:00
|
|
|
}
|
2017-06-17 01:10:17 +03:00
|
|
|
|
2017-07-15 06:08:29 +03:00
|
|
|
return s.htlcSwitch.SendHTLC(firstHopPub, htlcAdd, errorDecryptor)
|
2017-02-02 05:29:46 +03:00
|
|
|
},
|
2017-10-05 05:39:38 +03:00
|
|
|
ChannelPruneExpiry: time.Duration(time.Hour * 24 * 14),
|
|
|
|
GraphPruneInterval: time.Duration(time.Hour),
|
2018-05-08 07:04:31 +03:00
|
|
|
QueryBandwidth: func(edge *channeldb.ChannelEdgeInfo) lnwire.MilliSatoshi {
|
|
|
|
// If we aren't on either side of this edge, then we'll
|
|
|
|
// just thread through the capacity of the edge as we
|
|
|
|
// know it.
|
|
|
|
if !bytes.Equal(edge.NodeKey1Bytes[:], selfNode.PubKeyBytes[:]) &&
|
|
|
|
!bytes.Equal(edge.NodeKey2Bytes[:], selfNode.PubKeyBytes[:]) {
|
|
|
|
|
|
|
|
return lnwire.NewMSatFromSatoshis(edge.Capacity)
|
|
|
|
}
|
|
|
|
|
|
|
|
cid := lnwire.NewChanIDFromOutPoint(&edge.ChannelPoint)
|
|
|
|
link, err := s.htlcSwitch.GetLink(cid)
|
|
|
|
if err != nil {
|
|
|
|
// If the link isn't online, then we'll report
|
|
|
|
// that it has zero bandwidth to the router.
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the link is found within the switch, but it isn't
|
|
|
|
// yet eligible to forward any HTLCs, then we'll treat
|
|
|
|
// it as if it isn't online in the first place.
|
|
|
|
if !link.EligibleToForward() {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll return the current best estimate
|
|
|
|
// for the available bandwidth for the link.
|
|
|
|
return link.Bandwidth()
|
|
|
|
},
|
2016-12-27 08:42:23 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
2017-03-27 20:25:44 +03:00
|
|
|
return nil, fmt.Errorf("can't create router: %v", err)
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2016-07-15 14:02:59 +03:00
|
|
|
|
2017-08-22 09:54:10 +03:00
|
|
|
s.authGossiper, err = discovery.New(discovery.Config{
|
2018-06-08 06:09:40 +03:00
|
|
|
Router: s.chanRouter,
|
|
|
|
Notifier: s.cc.chainNotifier,
|
|
|
|
ChainHash: *activeNetParams.GenesisHash,
|
|
|
|
Broadcast: s.BroadcastMessage,
|
|
|
|
ChanSeries: &chanSeries{s.chanDB.ChannelGraph()},
|
|
|
|
SendToPeer: s.SendToPeer,
|
|
|
|
FindPeer: func(pub *btcec.PublicKey) (lnpeer.Peer, error) {
|
|
|
|
return s.FindPeer(pub)
|
|
|
|
},
|
2017-11-18 06:24:21 +03:00
|
|
|
NotifyWhenOnline: s.NotifyWhenOnline,
|
2017-03-28 22:08:14 +03:00
|
|
|
ProofMatureDelta: 0,
|
2017-09-14 02:33:46 +03:00
|
|
|
TrickleDelay: time.Millisecond * time.Duration(cfg.TrickleDelay),
|
2017-09-25 04:47:48 +03:00
|
|
|
RetransmitDelay: time.Minute * 30,
|
2017-05-05 20:17:31 +03:00
|
|
|
DB: chanDB,
|
2017-08-22 09:54:10 +03:00
|
|
|
AnnSigner: s.nodeSigner,
|
|
|
|
},
|
|
|
|
s.identityPriv.PubKey(),
|
|
|
|
)
|
2017-03-20 00:06:10 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-03-15 03:07:01 +03:00
|
|
|
utxnStore, err := newNurseryStore(activeNetParams.GenesisHash, chanDB)
|
2017-10-02 06:39:40 +03:00
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to create nursery store: %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.utxoNursery = newUtxoNursery(&NurseryConfig{
|
|
|
|
ChainIO: cc.chainIO,
|
|
|
|
ConfDepth: 1,
|
|
|
|
DB: chanDB,
|
|
|
|
Estimator: cc.feeEstimator,
|
|
|
|
GenSweepScript: func() ([]byte, error) {
|
|
|
|
return newSweepPkScript(cc.wallet)
|
|
|
|
},
|
|
|
|
Notifier: cc.chainNotifier,
|
|
|
|
PublishTransaction: cc.wallet.PublishTransaction,
|
|
|
|
Signer: cc.wallet.Cfg.Signer,
|
|
|
|
Store: utxnStore,
|
|
|
|
})
|
|
|
|
|
2017-09-01 13:12:02 +03:00
|
|
|
// Construct a closure that wraps the htlcswitch's CloseLink method.
|
|
|
|
closeLink := func(chanPoint *wire.OutPoint,
|
|
|
|
closureType htlcswitch.ChannelCloseType) {
|
|
|
|
// TODO(conner): Properly respect the update and error channels
|
|
|
|
// returned by CloseLink.
|
2017-11-23 22:36:12 +03:00
|
|
|
s.htlcSwitch.CloseLink(chanPoint, closureType, 0)
|
2017-09-01 13:12:02 +03:00
|
|
|
}
|
|
|
|
|
2018-04-18 15:01:18 +03:00
|
|
|
// We will use the following channel to reliably hand off contract
|
|
|
|
// breach events from the ChannelArbitrator to the breachArbiter,
|
|
|
|
contractBreaches := make(chan *ContractBreachEvent, 1)
|
|
|
|
|
2018-01-17 07:25:34 +03:00
|
|
|
s.chainArb = contractcourt.NewChainArbitrator(contractcourt.ChainArbitratorConfig{
|
|
|
|
ChainHash: *activeNetParams.GenesisHash,
|
|
|
|
// TODO(roasbeef): properly configure
|
|
|
|
// * needs to be << or specified final hop time delta
|
|
|
|
BroadcastDelta: defaultBroadcastDelta,
|
|
|
|
NewSweepAddr: func() ([]byte, error) {
|
|
|
|
return newSweepPkScript(cc.wallet)
|
|
|
|
},
|
|
|
|
PublishTx: cc.wallet.PublishTransaction,
|
|
|
|
DeliverResolutionMsg: func(msgs ...contractcourt.ResolutionMsg) error {
|
|
|
|
for _, msg := range msgs {
|
|
|
|
err := s.htlcSwitch.ProcessContractResolution(msg)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
IncubateOutputs: func(chanPoint wire.OutPoint,
|
|
|
|
commitRes *lnwallet.CommitOutputResolution,
|
|
|
|
outHtlcRes *lnwallet.OutgoingHtlcResolution,
|
|
|
|
inHtlcRes *lnwallet.IncomingHtlcResolution) error {
|
|
|
|
|
|
|
|
var (
|
|
|
|
inRes []lnwallet.IncomingHtlcResolution
|
|
|
|
outRes []lnwallet.OutgoingHtlcResolution
|
|
|
|
)
|
|
|
|
if inHtlcRes != nil {
|
|
|
|
inRes = append(inRes, *inHtlcRes)
|
|
|
|
}
|
|
|
|
if outHtlcRes != nil {
|
|
|
|
outRes = append(outRes, *outHtlcRes)
|
|
|
|
}
|
|
|
|
|
|
|
|
return s.utxoNursery.IncubateOutputs(
|
|
|
|
chanPoint, commitRes, outRes, inRes,
|
|
|
|
)
|
|
|
|
},
|
|
|
|
PreimageDB: s.witnessBeacon,
|
|
|
|
Notifier: cc.chainNotifier,
|
|
|
|
Signer: cc.wallet.Cfg.Signer,
|
|
|
|
FeeEstimator: cc.feeEstimator,
|
|
|
|
ChainIO: cc.chainIO,
|
|
|
|
MarkLinkInactive: func(chanPoint wire.OutPoint) error {
|
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(&chanPoint)
|
|
|
|
return s.htlcSwitch.RemoveLink(chanID)
|
|
|
|
},
|
2018-01-20 04:17:14 +03:00
|
|
|
IsOurAddress: func(addr btcutil.Address) bool {
|
|
|
|
_, err := cc.wallet.GetPrivKey(addr)
|
|
|
|
return err == nil
|
|
|
|
},
|
2018-04-18 15:01:18 +03:00
|
|
|
ContractBreach: func(chanPoint wire.OutPoint,
|
|
|
|
breachRet *lnwallet.BreachRetribution) error {
|
|
|
|
event := &ContractBreachEvent{
|
|
|
|
ChanPoint: chanPoint,
|
|
|
|
ProcessACK: make(chan error, 1),
|
|
|
|
BreachRetribution: breachRet,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send the contract breach event to the breachArbiter.
|
|
|
|
select {
|
|
|
|
case contractBreaches <- event:
|
|
|
|
case <-s.quit:
|
|
|
|
return ErrServerShuttingDown
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the breachArbiter to ACK the event.
|
|
|
|
select {
|
|
|
|
case err := <-event.ProcessACK:
|
|
|
|
return err
|
|
|
|
case <-s.quit:
|
|
|
|
return ErrServerShuttingDown
|
|
|
|
}
|
|
|
|
},
|
2018-01-17 07:25:34 +03:00
|
|
|
}, chanDB)
|
|
|
|
|
2017-09-01 13:12:02 +03:00
|
|
|
s.breachArbiter = newBreachArbiter(&BreachConfig{
|
2017-10-02 06:39:40 +03:00
|
|
|
CloseLink: closeLink,
|
|
|
|
DB: chanDB,
|
|
|
|
Estimator: s.cc.feeEstimator,
|
2017-09-01 13:12:02 +03:00
|
|
|
GenSweepScript: func() ([]byte, error) {
|
|
|
|
return newSweepPkScript(cc.wallet)
|
|
|
|
},
|
2018-01-21 07:25:54 +03:00
|
|
|
Notifier: cc.chainNotifier,
|
|
|
|
PublishTransaction: cc.wallet.PublishTransaction,
|
2018-04-18 15:01:18 +03:00
|
|
|
ContractBreaches: contractBreaches,
|
|
|
|
Signer: cc.wallet.Cfg.Signer,
|
|
|
|
Store: newRetributionStore(chanDB),
|
2017-09-01 13:12:02 +03:00
|
|
|
})
|
2017-01-13 06:40:38 +03:00
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
// Create the connection manager which will be responsible for
|
|
|
|
// maintaining persistent outbound connections and also accepting new
|
|
|
|
// incoming connections
|
|
|
|
cmgr, err := connmgr.New(&connmgr.Config{
|
|
|
|
Listeners: listeners,
|
2017-08-09 02:51:41 +03:00
|
|
|
OnAccept: s.InboundPeerConnected,
|
2016-12-15 05:11:31 +03:00
|
|
|
RetryDuration: time.Second * 5,
|
|
|
|
TargetOutbound: 100,
|
|
|
|
Dial: noiseDial(s.identityPriv),
|
2017-08-09 02:51:41 +03:00
|
|
|
OnConnection: s.OutboundPeerConnected,
|
2016-12-15 05:11:31 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.connMgr = cmgr
|
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
2017-08-03 06:55:51 +03:00
|
|
|
// Started returns true if the server has been started, and false otherwise.
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2017-08-03 06:55:51 +03:00
|
|
|
func (s *server) Started() bool {
|
|
|
|
return atomic.LoadInt32(&s.started) != 0
|
|
|
|
}
|
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
// Start starts the main daemon server, all requested listeners, and any helper
|
|
|
|
// goroutines.
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2017-04-24 05:21:32 +03:00
|
|
|
func (s *server) Start() error {
|
|
|
|
// Already running?
|
2017-08-03 06:55:51 +03:00
|
|
|
if !atomic.CompareAndSwapInt32(&s.started, 0, 1) {
|
2017-04-24 05:21:32 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-04-28 00:02:05 +03:00
|
|
|
if s.torController != nil {
|
|
|
|
if err := s.initTorController(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
// Start the notification server. This is used so channel management
|
|
|
|
// goroutines can be notified when a funding transaction reaches a
|
|
|
|
// sufficient number of confirmations, or when the input for the
|
|
|
|
// funding transaction is spent in an attempt at an uncooperative close
|
|
|
|
// by the counterparty.
|
2017-05-18 21:55:25 +03:00
|
|
|
if err := s.cc.chainNotifier.Start(); err != nil {
|
2017-04-24 05:21:32 +03:00
|
|
|
return err
|
|
|
|
}
|
2018-02-24 04:33:05 +03:00
|
|
|
if err := s.sphinx.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-04-24 05:21:32 +03:00
|
|
|
if err := s.htlcSwitch.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := s.utxoNursery.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-01-17 07:25:34 +03:00
|
|
|
if err := s.chainArb.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-04-24 05:21:32 +03:00
|
|
|
if err := s.breachArbiter.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-08-22 09:54:10 +03:00
|
|
|
if err := s.authGossiper.Start(); err != nil {
|
2017-04-24 05:21:32 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := s.chanRouter.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-07-31 00:21:49 +03:00
|
|
|
// With all the relevant sub-systems started, we'll now attempt to
|
|
|
|
// establish persistent connections to our direct channel collaborators
|
2017-04-24 05:21:32 +03:00
|
|
|
// within the network.
|
|
|
|
if err := s.establishPersistentConnections(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
go s.connMgr.Start()
|
|
|
|
|
2017-09-04 02:58:14 +03:00
|
|
|
// If network bootstrapping hasn't been disabled, then we'll configure
|
|
|
|
// the set of active bootstrappers, and launch a dedicated goroutine to
|
|
|
|
// maintain a set of persistent connections.
|
2017-12-01 09:07:01 +03:00
|
|
|
if !cfg.NoNetBootstrap && !(cfg.Bitcoin.SimNet || cfg.Litecoin.SimNet) &&
|
|
|
|
!(cfg.Bitcoin.RegTest || cfg.Litecoin.RegTest) {
|
2018-03-15 03:07:01 +03:00
|
|
|
|
2017-09-04 02:58:14 +03:00
|
|
|
networkBootStrappers, err := initNetworkBootstrappers(s)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-03-15 03:07:01 +03:00
|
|
|
|
2017-09-04 02:58:14 +03:00
|
|
|
s.wg.Add(1)
|
|
|
|
go s.peerBootstrapper(3, networkBootStrappers)
|
2017-09-12 23:13:48 +03:00
|
|
|
} else {
|
|
|
|
srvrLog.Infof("Auto peer bootstrapping is disabled")
|
2017-09-04 02:58:14 +03:00
|
|
|
}
|
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop gracefully shutsdown the main daemon server. This function will signal
|
|
|
|
// any active goroutines, or helper objects to exit, then blocks until they've
|
|
|
|
// all successfully exited. Additionally, any/all listeners are closed.
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2017-04-24 05:21:32 +03:00
|
|
|
func (s *server) Stop() error {
|
|
|
|
// Bail if we're already shutting down.
|
2017-08-03 06:55:51 +03:00
|
|
|
if !atomic.CompareAndSwapInt32(&s.shutdown, 0, 1) {
|
2017-04-24 05:21:32 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-11 07:18:57 +03:00
|
|
|
close(s.quit)
|
|
|
|
|
2018-04-28 00:02:05 +03:00
|
|
|
if s.torController != nil {
|
|
|
|
s.torController.Stop()
|
|
|
|
}
|
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
// Shutdown the wallet, funding manager, and the rpc server.
|
2017-05-18 21:55:25 +03:00
|
|
|
s.cc.chainNotifier.Stop()
|
2017-04-24 05:21:32 +03:00
|
|
|
s.chanRouter.Stop()
|
|
|
|
s.htlcSwitch.Stop()
|
2018-02-24 04:33:05 +03:00
|
|
|
s.sphinx.Stop()
|
2017-04-24 05:21:32 +03:00
|
|
|
s.utxoNursery.Stop()
|
|
|
|
s.breachArbiter.Stop()
|
2017-08-22 09:54:10 +03:00
|
|
|
s.authGossiper.Stop()
|
2018-01-17 07:25:34 +03:00
|
|
|
s.chainArb.Stop()
|
2017-05-18 21:55:25 +03:00
|
|
|
s.cc.wallet.Shutdown()
|
|
|
|
s.cc.chainView.Stop()
|
2017-08-09 02:51:41 +03:00
|
|
|
s.connMgr.Stop()
|
2017-11-23 22:36:12 +03:00
|
|
|
s.cc.feeEstimator.Stop()
|
2017-08-09 02:51:41 +03:00
|
|
|
|
|
|
|
// Disconnect from each active peers to ensure that
|
|
|
|
// peerTerminationWatchers signal completion to each peer.
|
2017-08-31 11:15:39 +03:00
|
|
|
for _, peer := range s.Peers() {
|
2017-08-09 02:51:41 +03:00
|
|
|
s.DisconnectPeer(peer.addr.IdentityKey)
|
|
|
|
}
|
2017-04-24 05:21:32 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// Wait for all lingering goroutines to quit.
|
2017-04-24 05:21:32 +03:00
|
|
|
s.wg.Wait()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// Stopped returns true if the server has been instructed to shutdown.
|
|
|
|
// NOTE: This function is safe for concurrent access.
|
|
|
|
func (s *server) Stopped() bool {
|
|
|
|
return atomic.LoadInt32(&s.shutdown) != 0
|
|
|
|
}
|
|
|
|
|
2017-09-04 02:58:14 +03:00
|
|
|
// initNetworkBootstrappers initializes a set of network peer bootstrappers
|
|
|
|
// based on the server, and currently active bootstrap mechanisms as defined
|
|
|
|
// within the current configuration.
|
|
|
|
func initNetworkBootstrappers(s *server) ([]discovery.NetworkPeerBootstrapper, error) {
|
2018-02-07 06:11:11 +03:00
|
|
|
srvrLog.Infof("Initializing peer network bootstrappers!")
|
2017-09-04 02:58:14 +03:00
|
|
|
|
|
|
|
var bootStrappers []discovery.NetworkPeerBootstrapper
|
|
|
|
|
|
|
|
// First, we'll create an instance of the ChannelGraphBootstrapper as
|
|
|
|
// this can be used by default if we've already partially seeded the
|
|
|
|
// network.
|
|
|
|
chanGraph := autopilot.ChannelGraphFromDatabase(s.chanDB.ChannelGraph())
|
|
|
|
graphBootstrapper, err := discovery.NewGraphBootstrapper(chanGraph)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
bootStrappers = append(bootStrappers, graphBootstrapper)
|
|
|
|
|
|
|
|
// If this isn't simnet mode, then one of our additional bootstrapping
|
|
|
|
// sources will be the set of running DNS seeds.
|
|
|
|
if !cfg.Bitcoin.SimNet || !cfg.Litecoin.SimNet {
|
2017-12-03 05:05:43 +03:00
|
|
|
dnsSeeds, ok := chainDNSSeeds[*activeNetParams.GenesisHash]
|
2017-09-04 02:58:14 +03:00
|
|
|
|
|
|
|
// If we have a set of DNS seeds for this chain, then we'll add
|
2018-02-07 06:11:11 +03:00
|
|
|
// it as an additional bootstrapping source.
|
2017-09-04 02:58:14 +03:00
|
|
|
if ok {
|
2018-02-07 06:11:11 +03:00
|
|
|
srvrLog.Infof("Creating DNS peer bootstrapper with "+
|
2017-09-04 02:58:14 +03:00
|
|
|
"seeds: %v", dnsSeeds)
|
|
|
|
|
2018-04-29 07:44:55 +03:00
|
|
|
dnsBootStrapper := discovery.NewDNSSeedBootstrapper(
|
|
|
|
dnsSeeds, cfg.net,
|
2017-09-04 02:58:14 +03:00
|
|
|
)
|
|
|
|
bootStrappers = append(bootStrappers, dnsBootStrapper)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return bootStrappers, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// peerBootstrapper is a goroutine which is tasked with attempting to establish
|
|
|
|
// and maintain a target min number of outbound connections. With this
|
|
|
|
// invariant, we ensure that our node is connected to a diverse set of peers
|
|
|
|
// and that nodes newly joining the network receive an up to date network view
|
|
|
|
// as soon as possible.
|
|
|
|
func (s *server) peerBootstrapper(numTargetPeers uint32,
|
|
|
|
bootStrappers []discovery.NetworkPeerBootstrapper) {
|
|
|
|
|
|
|
|
defer s.wg.Done()
|
|
|
|
|
|
|
|
// To kick things off, we'll attempt to first query the set of
|
|
|
|
// bootstrappers for enough address to fill our quot.
|
|
|
|
bootStrapAddrs, err := discovery.MultiSourceBootstrap(
|
|
|
|
nil, numTargetPeers, bootStrappers...,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
// TODO(roasbeef): panic?
|
|
|
|
srvrLog.Errorf("Unable to retrieve initial bootstrap "+
|
|
|
|
"peers: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-10-05 02:58:51 +03:00
|
|
|
srvrLog.Debugf("Attempting to bootstrap connectivity with %v initial "+
|
2017-09-04 02:58:14 +03:00
|
|
|
"peers", len(bootStrapAddrs))
|
|
|
|
|
|
|
|
// With our initial set of peers obtained, we'll launch a goroutine to
|
|
|
|
// attempt to connect out to each of them. We'll be waking up shortly
|
2017-10-25 05:59:32 +03:00
|
|
|
// below to sample how many of these connections succeeded.
|
2017-09-04 02:58:14 +03:00
|
|
|
for _, addr := range bootStrapAddrs {
|
|
|
|
go func(a *lnwire.NetAddress) {
|
2017-10-25 05:59:32 +03:00
|
|
|
conn, err := brontide.Dial(s.identityPriv, a, cfg.net.Dial)
|
2017-09-04 02:58:14 +03:00
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to connect to %v: %v",
|
|
|
|
a, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s.OutboundPeerConnected(nil, conn)
|
|
|
|
}(addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll start with a 15 second backoff, and double the time every time
|
|
|
|
// an epoch fails up to a ceiling.
|
2018-02-07 06:11:11 +03:00
|
|
|
const backOffCeiling = time.Minute * 5
|
2017-09-04 02:58:14 +03:00
|
|
|
backOff := time.Second * 15
|
|
|
|
|
|
|
|
// We'll create a new ticker to wake us up every 15 seconds so we can
|
|
|
|
// see if we've reached our minimum number of peers.
|
|
|
|
sampleTicker := time.NewTicker(backOff)
|
|
|
|
defer sampleTicker.Stop()
|
|
|
|
|
|
|
|
// We'll use the number of attempts and errors to determine if we need
|
|
|
|
// to increase the time between discovery epochs.
|
2018-06-01 01:41:41 +03:00
|
|
|
var epochErrors uint32 // To be used atomically.
|
|
|
|
var epochAttempts uint32
|
2017-09-04 02:58:14 +03:00
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
// The ticker has just woken us up, so we'll need to check if
|
|
|
|
// we need to attempt to connect our to any more peers.
|
|
|
|
case <-sampleTicker.C:
|
2017-10-11 05:16:43 +03:00
|
|
|
// Obtain the current number of peers, so we can gauge
|
|
|
|
// if we need to sample more peers or not.
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RLock()
|
2017-10-11 05:16:43 +03:00
|
|
|
numActivePeers := uint32(len(s.peersByPub))
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RUnlock()
|
2017-10-11 05:16:43 +03:00
|
|
|
|
|
|
|
// If we have enough peers, then we can loop back
|
|
|
|
// around to the next round as we're done here.
|
|
|
|
if numActivePeers >= numTargetPeers {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-09-04 02:58:14 +03:00
|
|
|
// If all of our attempts failed during this last back
|
|
|
|
// off period, then will increase our backoff to 5
|
|
|
|
// minute ceiling to avoid an excessive number of
|
|
|
|
// queries
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): add reverse policy too?
|
2017-10-11 05:16:43 +03:00
|
|
|
|
2017-09-04 02:58:14 +03:00
|
|
|
if epochAttempts > 0 &&
|
|
|
|
atomic.LoadUint32(&epochErrors) >= epochAttempts {
|
|
|
|
|
|
|
|
sampleTicker.Stop()
|
|
|
|
|
|
|
|
backOff *= 2
|
2018-02-07 06:11:11 +03:00
|
|
|
if backOff > backOffCeiling {
|
|
|
|
backOff = backOffCeiling
|
2017-09-04 02:58:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
srvrLog.Debugf("Backing off peer bootstrapper to "+
|
|
|
|
"%v", backOff)
|
|
|
|
sampleTicker = time.NewTicker(backOff)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
atomic.StoreUint32(&epochErrors, 0)
|
|
|
|
epochAttempts = 0
|
|
|
|
|
|
|
|
// Since we know need more peers, we'll compute the
|
|
|
|
// exact number we need to reach our threshold.
|
|
|
|
numNeeded := numTargetPeers - numActivePeers
|
|
|
|
|
2017-10-05 00:52:48 +03:00
|
|
|
srvrLog.Debugf("Attempting to obtain %v more network "+
|
2017-09-04 02:58:14 +03:00
|
|
|
"peers", numNeeded)
|
|
|
|
|
|
|
|
// With the number of peers we need calculated, we'll
|
|
|
|
// query the network bootstrappers to sample a set of
|
|
|
|
// random addrs for us.
|
2018-01-29 01:50:15 +03:00
|
|
|
s.mu.RLock()
|
2017-09-04 02:58:14 +03:00
|
|
|
ignoreList := make(map[autopilot.NodeID]struct{})
|
|
|
|
for _, peer := range s.peersByPub {
|
|
|
|
nID := autopilot.NewNodeID(peer.addr.IdentityKey)
|
|
|
|
ignoreList[nID] = struct{}{}
|
|
|
|
}
|
2018-01-29 01:50:15 +03:00
|
|
|
s.mu.RUnlock()
|
2017-09-04 02:58:14 +03:00
|
|
|
|
|
|
|
peerAddrs, err := discovery.MultiSourceBootstrap(
|
|
|
|
ignoreList, numNeeded*2, bootStrappers...,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("Unable to retrieve bootstrap "+
|
|
|
|
"peers: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll launch a new goroutine for each
|
|
|
|
// prospective peer candidates.
|
|
|
|
for _, addr := range peerAddrs {
|
2017-09-04 03:04:53 +03:00
|
|
|
epochAttempts++
|
2017-09-04 02:58:14 +03:00
|
|
|
|
|
|
|
go func(a *lnwire.NetAddress) {
|
|
|
|
// TODO(roasbeef): can do AS, subnet,
|
|
|
|
// country diversity, etc
|
2017-10-25 05:59:32 +03:00
|
|
|
conn, err := brontide.Dial(s.identityPriv,
|
|
|
|
a, cfg.net.Dial)
|
2017-09-04 02:58:14 +03:00
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to connect "+
|
|
|
|
"to %v: %v", a, err)
|
|
|
|
atomic.AddUint32(&epochErrors, 1)
|
|
|
|
return
|
|
|
|
}
|
2018-01-06 07:37:31 +03:00
|
|
|
|
2017-09-04 02:58:14 +03:00
|
|
|
s.OutboundPeerConnected(nil, conn)
|
|
|
|
}(addr)
|
|
|
|
}
|
|
|
|
case <-s.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-28 00:02:05 +03:00
|
|
|
// initTorController initiliazes the Tor controller backed by lnd and
|
|
|
|
// automatically sets up a v2 onion service in order to listen for inbound
|
|
|
|
// connections over Tor.
|
|
|
|
func (s *server) initTorController() error {
|
|
|
|
if err := s.torController.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine the different ports the server is listening on. The onion
|
|
|
|
// service's virtual port will map to these ports and one will be picked
|
|
|
|
// at random when the onion service is being accessed.
|
|
|
|
listenPorts := make(map[int]struct{})
|
|
|
|
for _, listenAddr := range s.listenAddrs {
|
|
|
|
// At this point, the listen addresses should have already been
|
|
|
|
// normalized, so it's safe to ignore the errors.
|
|
|
|
_, portStr, _ := net.SplitHostPort(listenAddr)
|
|
|
|
port, _ := strconv.Atoi(portStr)
|
|
|
|
listenPorts[port] = struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Once the port mapping has been set, we can go ahead and automatically
|
|
|
|
// create our onion service. The service's private key will be saved to
|
|
|
|
// disk in order to regain access to this service when restarting `lnd`.
|
|
|
|
virtToTargPorts := tor.VirtToTargPorts{defaultPeerPort: listenPorts}
|
|
|
|
onionServiceAddrs, err := s.torController.AddOnionV2(
|
|
|
|
cfg.Tor.V2PrivateKeyPath, virtToTargPorts,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the onion service has been created, we'll add the different
|
|
|
|
// onion addresses it can be reached at to our list of advertised
|
|
|
|
// addresses.
|
|
|
|
for _, addr := range onionServiceAddrs {
|
|
|
|
s.currentNodeAnn.Addresses = append(
|
|
|
|
s.currentNodeAnn.Addresses, addr,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-05 04:32:25 +03:00
|
|
|
// genNodeAnnouncement generates and returns the current fully signed node
|
|
|
|
// announcement. If refresh is true, then the time stamp of the announcement
|
|
|
|
// will be updated in order to ensure it propagates through the network.
|
2017-08-09 02:51:41 +03:00
|
|
|
func (s *server) genNodeAnnouncement(
|
|
|
|
refresh bool) (lnwire.NodeAnnouncement, error) {
|
|
|
|
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2017-08-05 04:32:25 +03:00
|
|
|
|
|
|
|
if !refresh {
|
2017-08-08 15:47:22 +03:00
|
|
|
return *s.currentNodeAnn, nil
|
2017-08-05 04:32:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var err error
|
2017-08-06 01:06:01 +03:00
|
|
|
|
|
|
|
newStamp := uint32(time.Now().Unix())
|
|
|
|
if newStamp <= s.currentNodeAnn.Timestamp {
|
|
|
|
newStamp = s.currentNodeAnn.Timestamp + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
s.currentNodeAnn.Timestamp = newStamp
|
2018-01-31 07:30:00 +03:00
|
|
|
sig, err := discovery.SignAnnouncement(
|
2017-08-06 01:06:01 +03:00
|
|
|
s.nodeSigner, s.identityPriv.PubKey(), s.currentNodeAnn,
|
2017-08-05 04:32:25 +03:00
|
|
|
)
|
2018-01-31 07:30:00 +03:00
|
|
|
if err != nil {
|
|
|
|
return lnwire.NodeAnnouncement{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.currentNodeAnn.Signature, err = lnwire.NewSigFromSignature(sig)
|
|
|
|
if err != nil {
|
|
|
|
return lnwire.NodeAnnouncement{}, err
|
|
|
|
}
|
2017-08-05 04:32:25 +03:00
|
|
|
|
2018-01-31 07:30:00 +03:00
|
|
|
return *s.currentNodeAnn, nil
|
2017-08-05 04:32:25 +03:00
|
|
|
}
|
|
|
|
|
2017-08-11 07:20:51 +03:00
|
|
|
type nodeAddresses struct {
|
|
|
|
pubKey *btcec.PublicKey
|
2018-02-03 09:24:43 +03:00
|
|
|
addresses []net.Addr
|
2017-08-11 07:20:51 +03:00
|
|
|
}
|
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
// establishPersistentConnections attempts to establish persistent connections
|
|
|
|
// to all our direct channel collaborators. In order to promote liveness of
|
|
|
|
// our active channels, we instruct the connection manager to attempt to
|
|
|
|
// establish and maintain persistent connections to all our direct channel
|
|
|
|
// counterparties.
|
|
|
|
func (s *server) establishPersistentConnections() error {
|
2017-03-25 11:40:33 +03:00
|
|
|
// nodeAddrsMap stores the combination of node public keys and
|
|
|
|
// addresses that we'll attempt to reconnect to. PubKey strings are
|
|
|
|
// used as keys since other PubKey forms can't be compared.
|
|
|
|
nodeAddrsMap := map[string]*nodeAddresses{}
|
|
|
|
|
|
|
|
// Iterate through the list of LinkNodes to find addresses we should
|
|
|
|
// attempt to connect to based on our set of previous connections. Set
|
|
|
|
// the reconnection port to the default peer port.
|
2016-12-15 05:11:31 +03:00
|
|
|
linkNodes, err := s.chanDB.FetchAllLinkNodes()
|
|
|
|
if err != nil && err != channeldb.ErrLinkNodesNotFound {
|
2017-04-24 05:21:32 +03:00
|
|
|
return err
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
for _, node := range linkNodes {
|
|
|
|
pubStr := string(node.IdentityPub.SerializeCompressed())
|
2017-03-25 11:40:33 +03:00
|
|
|
nodeAddrs := &nodeAddresses{
|
|
|
|
pubKey: node.IdentityPub,
|
|
|
|
addresses: node.Addresses,
|
|
|
|
}
|
|
|
|
nodeAddrsMap[pubStr] = nodeAddrs
|
|
|
|
}
|
2017-02-25 02:46:02 +03:00
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
// After checking our previous connections for addresses to connect to,
|
|
|
|
// iterate through the nodes in our channel graph to find addresses
|
|
|
|
// that have been added via NodeAnnouncement messages.
|
2017-04-24 05:21:32 +03:00
|
|
|
chanGraph := s.chanDB.ChannelGraph()
|
2017-03-25 11:40:33 +03:00
|
|
|
sourceNode, err := chanGraph.SourceNode()
|
|
|
|
if err != nil {
|
2017-04-24 05:21:32 +03:00
|
|
|
return err
|
2017-03-25 11:40:33 +03:00
|
|
|
}
|
2017-05-05 01:24:45 +03:00
|
|
|
// TODO(roasbeef): instead iterate over link nodes and query graph for
|
|
|
|
// each of the nodes.
|
2017-08-09 02:51:41 +03:00
|
|
|
err = sourceNode.ForEachChannel(nil, func(
|
|
|
|
_ *bolt.Tx,
|
|
|
|
_ *channeldb.ChannelEdgeInfo,
|
2017-08-22 09:54:10 +03:00
|
|
|
policy, _ *channeldb.ChannelEdgePolicy) error {
|
2017-04-14 23:17:51 +03:00
|
|
|
|
2018-01-31 07:30:00 +03:00
|
|
|
pubStr := string(policy.Node.PubKeyBytes[:])
|
2017-03-25 11:40:33 +03:00
|
|
|
|
|
|
|
// Add addresses from channel graph/NodeAnnouncements to the
|
|
|
|
// list of addresses we'll connect to. If there are duplicates
|
|
|
|
// that have different ports specified, the port from the
|
|
|
|
// channel graph should supersede the port from the link node.
|
2018-02-03 09:24:43 +03:00
|
|
|
var addrs []net.Addr
|
2017-03-25 11:40:33 +03:00
|
|
|
linkNodeAddrs, ok := nodeAddrsMap[pubStr]
|
|
|
|
if ok {
|
|
|
|
for _, lnAddress := range linkNodeAddrs.addresses {
|
2018-05-02 09:33:17 +03:00
|
|
|
var addrHost string
|
|
|
|
switch addr := lnAddress.(type) {
|
|
|
|
case *net.TCPAddr:
|
|
|
|
addrHost = addr.IP.String()
|
|
|
|
case *tor.OnionAddr:
|
|
|
|
addrHost = addr.OnionService
|
|
|
|
default:
|
2018-02-03 09:24:43 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
var addrMatched bool
|
|
|
|
for _, polAddress := range policy.Node.Addresses {
|
2018-05-02 09:33:17 +03:00
|
|
|
switch addr := polAddress.(type) {
|
|
|
|
case *net.TCPAddr:
|
|
|
|
if addr.IP.String() == addrHost {
|
|
|
|
addrMatched = true
|
|
|
|
addrs = append(addrs, addr)
|
|
|
|
}
|
|
|
|
case *tor.OnionAddr:
|
|
|
|
if addr.OnionService == addrHost {
|
|
|
|
addrMatched = true
|
|
|
|
addrs = append(addrs, addr)
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if !addrMatched {
|
|
|
|
addrs = append(addrs, lnAddress)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for _, addr := range policy.Node.Addresses {
|
2018-05-02 09:33:17 +03:00
|
|
|
switch addr.(type) {
|
|
|
|
case *net.TCPAddr, *tor.OnionAddr:
|
|
|
|
addrs = append(addrs, addr)
|
2017-03-25 11:40:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-31 07:30:00 +03:00
|
|
|
n := &nodeAddresses{
|
2017-03-25 11:40:33 +03:00
|
|
|
addresses: addrs,
|
|
|
|
}
|
2018-01-31 07:30:00 +03:00
|
|
|
n.pubKey, err = policy.Node.PubKey()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
|
2018-01-31 07:30:00 +03:00
|
|
|
nodeAddrsMap[pubStr] = n
|
2017-03-25 11:40:33 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
|
2017-04-24 05:21:32 +03:00
|
|
|
return err
|
2017-03-25 11:40:33 +03:00
|
|
|
}
|
|
|
|
|
2018-01-23 03:04:40 +03:00
|
|
|
// Acquire and hold server lock until all persistent connection requests
|
|
|
|
// have been recorded and sent to the connection manager.
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
// Iterate through the combined list of addresses from prior links and
|
|
|
|
// node announcements and attempt to reconnect to each node.
|
|
|
|
for pubStr, nodeAddr := range nodeAddrsMap {
|
2017-04-24 05:21:32 +03:00
|
|
|
// Add this peer to the set of peers we should maintain a
|
|
|
|
// persistent connection with.
|
|
|
|
s.persistentPeers[pubStr] = struct{}{}
|
2018-02-01 11:48:38 +03:00
|
|
|
if _, ok := s.persistentPeersBackoff[pubStr]; !ok {
|
|
|
|
s.persistentPeersBackoff[pubStr] = defaultBackoff
|
|
|
|
}
|
2017-04-24 05:21:32 +03:00
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
for _, address := range nodeAddr.addresses {
|
|
|
|
// Create a wrapper address which couples the IP and
|
|
|
|
// the pubkey so the brontide authenticated connection
|
|
|
|
// can be established.
|
2017-02-25 02:46:02 +03:00
|
|
|
lnAddr := &lnwire.NetAddress{
|
2017-03-25 11:40:33 +03:00
|
|
|
IdentityKey: nodeAddr.pubKey,
|
2017-02-25 02:46:02 +03:00
|
|
|
Address: address,
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
srvrLog.Debugf("Attempting persistent connection to "+
|
|
|
|
"channel peer %v", lnAddr)
|
2017-04-01 15:33:17 +03:00
|
|
|
|
|
|
|
// Send the persistent connection request to the
|
|
|
|
// connection manager, saving the request itself so we
|
|
|
|
// can cancel/restart the process as needed.
|
2017-02-25 02:46:02 +03:00
|
|
|
connReq := &connmgr.ConnReq{
|
|
|
|
Addr: lnAddr,
|
|
|
|
Permanent: true,
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
s.persistentConnReqs[pubStr] = append(
|
|
|
|
s.persistentConnReqs[pubStr], connReq)
|
2017-04-24 05:21:32 +03:00
|
|
|
|
2017-02-25 02:46:02 +03:00
|
|
|
go s.connMgr.Connect(connReq)
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// BroadcastMessage sends a request to the server to broadcast a set of
|
2018-06-06 13:18:44 +03:00
|
|
|
// messages to all peers other than the one specified by the `skips` parameter.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2018-06-06 13:18:44 +03:00
|
|
|
func (s *server) BroadcastMessage(skips map[routing.Vertex]struct{},
|
2017-10-17 00:53:38 +03:00
|
|
|
msgs ...lnwire.Message) error {
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
srvrLog.Debugf("Broadcasting %v messages", len(msgs))
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2018-06-06 13:18:44 +03:00
|
|
|
// Filter out peers found in the skips map. We synchronize access to
|
|
|
|
// peersByPub throughout this process to ensure we deliver messages to
|
|
|
|
// exact set of peers present at the time of invocation.
|
|
|
|
s.mu.RLock()
|
|
|
|
peers := make([]*peer, 0, len(s.peersByPub))
|
2017-12-26 18:25:35 +03:00
|
|
|
for _, sPeer := range s.peersByPub {
|
|
|
|
if skips != nil {
|
|
|
|
if _, ok := skips[sPeer.pubKeyBytes]; ok {
|
|
|
|
srvrLog.Tracef("Skipping %x in broadcast",
|
|
|
|
sPeer.pubKeyBytes[:])
|
|
|
|
continue
|
|
|
|
}
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
|
|
|
|
2018-06-06 13:18:44 +03:00
|
|
|
peers = append(peers, sPeer)
|
|
|
|
}
|
|
|
|
s.mu.RUnlock()
|
|
|
|
|
|
|
|
// Iterate over all known peers, dispatching a go routine to enqueue
|
|
|
|
// all messages to each of peers.
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for _, sPeer := range peers {
|
2017-10-17 00:53:38 +03:00
|
|
|
// Dispatch a go routine to enqueue all messages to this peer.
|
|
|
|
wg.Add(1)
|
|
|
|
s.wg.Add(1)
|
|
|
|
go s.sendPeerMessages(sPeer, msgs, &wg)
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
2017-08-11 07:20:51 +03:00
|
|
|
|
2017-10-17 00:53:38 +03:00
|
|
|
// Wait for all messages to have been dispatched before returning to
|
|
|
|
// caller.
|
|
|
|
wg.Wait()
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
return nil
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// SendToPeer send a message to the server telling it to send the specific set
|
2016-12-27 08:42:23 +03:00
|
|
|
// of message to a particular peer. If the peer connect be found, then this
|
|
|
|
// method will return a non-nil error.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2017-10-17 00:53:38 +03:00
|
|
|
func (s *server) SendToPeer(target *btcec.PublicKey,
|
|
|
|
msgs ...lnwire.Message) error {
|
|
|
|
|
2018-06-06 12:25:14 +03:00
|
|
|
// Compute the target peer's identifier.
|
|
|
|
targetPubBytes := target.SerializeCompressed()
|
|
|
|
|
|
|
|
srvrLog.Tracef("Attempting to send msgs %v to: %x",
|
|
|
|
len(msgs), targetPubBytes)
|
|
|
|
|
|
|
|
// Lookup intended target in peersByPub, returning an error to the
|
|
|
|
// caller if the peer is unknown. Access to peersByPub is synchronized
|
|
|
|
// here to ensure we consider the exact set of peers present at the
|
|
|
|
// time of invocation.
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RLock()
|
2018-06-06 12:25:14 +03:00
|
|
|
targetPeer, err := s.findPeerByPubStr(string(targetPubBytes))
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RUnlock()
|
2018-06-06 12:25:14 +03:00
|
|
|
if err == ErrPeerNotConnected {
|
|
|
|
srvrLog.Errorf("unable to send message to %x, "+
|
|
|
|
"peer is not connected", targetPubBytes)
|
2018-01-23 03:04:40 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-06-06 12:25:14 +03:00
|
|
|
// Send messages to the peer and get the error channels that will be
|
|
|
|
// signaled by the peer's write handler.
|
|
|
|
errChans := s.sendPeerMessages(targetPeer, msgs, nil)
|
|
|
|
|
2018-01-23 03:04:40 +03:00
|
|
|
// With the server's shared lock released, we now handle all of the
|
|
|
|
// errors being returned from the target peer's write handler.
|
|
|
|
for _, errChan := range errChans {
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
return err
|
|
|
|
case <-targetPeer.quit:
|
2018-05-08 04:36:08 +03:00
|
|
|
return ErrPeerExiting
|
2018-01-23 03:04:40 +03:00
|
|
|
case <-s.quit:
|
|
|
|
return ErrServerShuttingDown
|
|
|
|
}
|
|
|
|
}
|
2017-08-09 02:51:41 +03:00
|
|
|
|
2018-01-23 03:04:40 +03:00
|
|
|
return nil
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
|
|
|
|
2017-09-13 15:38:06 +03:00
|
|
|
// NotifyWhenOnline can be called by other subsystems to get notified when a
|
|
|
|
// particular peer comes online.
|
|
|
|
//
|
|
|
|
// NOTE: This function is safe for concurrent access.
|
2017-10-17 00:53:38 +03:00
|
|
|
func (s *server) NotifyWhenOnline(peer *btcec.PublicKey,
|
|
|
|
connectedChan chan<- struct{}) {
|
2017-09-13 15:38:06 +03:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
// Compute the target peer's identifier.
|
|
|
|
pubStr := string(peer.SerializeCompressed())
|
|
|
|
|
|
|
|
// Check if peer is connected.
|
|
|
|
_, ok := s.peersByPub[pubStr]
|
|
|
|
if ok {
|
|
|
|
// Connected, can return early.
|
2018-03-30 23:14:22 +03:00
|
|
|
srvrLog.Debugf("Notifying that peer %x is online",
|
|
|
|
peer.SerializeCompressed())
|
2017-09-13 15:38:06 +03:00
|
|
|
close(connectedChan)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Not connected, store this listener such that it can be notified when
|
|
|
|
// the peer comes online.
|
|
|
|
s.peerConnectedListeners[pubStr] = append(
|
|
|
|
s.peerConnectedListeners[pubStr], connectedChan)
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// sendPeerMessages enqueues a list of messages into the outgoingQueue of the
|
2017-10-17 00:53:38 +03:00
|
|
|
// `targetPeer`. This method supports additional broadcast-level
|
|
|
|
// synchronization by using the additional `wg` to coordinate a particular
|
2017-11-16 05:24:59 +03:00
|
|
|
// broadcast. Since this method will wait for the return error from sending
|
|
|
|
// each message, it should be run as a goroutine (see comment below) and
|
|
|
|
// the error ignored if used for broadcasting messages, where the result
|
|
|
|
// from sending the messages is not of importance.
|
2017-10-17 00:53:38 +03:00
|
|
|
//
|
|
|
|
// NOTE: This method must be invoked with a non-nil `wg` if it is spawned as a
|
|
|
|
// go routine--both `wg` and the server's WaitGroup should be incremented
|
|
|
|
// beforehand. If this method is not spawned as a go routine, the provided
|
|
|
|
// `wg` should be nil, and the server's WaitGroup should not be tracking this
|
|
|
|
// invocation.
|
|
|
|
func (s *server) sendPeerMessages(
|
|
|
|
targetPeer *peer,
|
|
|
|
msgs []lnwire.Message,
|
2017-11-16 05:24:59 +03:00
|
|
|
wg *sync.WaitGroup) []chan error {
|
2017-10-17 00:53:38 +03:00
|
|
|
|
|
|
|
// If a WaitGroup is provided, we assume that this method was spawned
|
|
|
|
// as a go routine, and that it is being tracked by both the server's
|
|
|
|
// WaitGroup, as well as the broadcast-level WaitGroup `wg`. In this
|
|
|
|
// event, we defer a call to Done on both WaitGroups to 1) ensure that
|
|
|
|
// server will be able to shutdown after its go routines exit, and 2)
|
|
|
|
// so the server can return to the caller of BroadcastMessage.
|
2018-05-08 04:36:08 +03:00
|
|
|
isBroadcast := wg != nil
|
|
|
|
if isBroadcast {
|
2017-10-17 00:53:38 +03:00
|
|
|
defer s.wg.Done()
|
|
|
|
defer wg.Done()
|
|
|
|
}
|
|
|
|
|
2017-11-16 05:24:59 +03:00
|
|
|
// We queue each message, creating a slice of error channels that
|
|
|
|
// can be inspected after every message is successfully added to
|
|
|
|
// the queue.
|
|
|
|
var errChans []chan error
|
2017-08-09 02:51:41 +03:00
|
|
|
for _, msg := range msgs {
|
2018-05-08 04:36:08 +03:00
|
|
|
// If this is not broadcast, create error channels to provide
|
|
|
|
// synchronous feedback regarding the delivery of the message to
|
|
|
|
// a specific peer.
|
|
|
|
var errChan chan error
|
|
|
|
if !isBroadcast {
|
|
|
|
errChan = make(chan error, 1)
|
|
|
|
errChans = append(errChans, errChan)
|
|
|
|
}
|
|
|
|
|
2017-11-16 05:24:59 +03:00
|
|
|
targetPeer.queueMsg(msg, errChan)
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2017-11-16 05:24:59 +03:00
|
|
|
|
|
|
|
return errChans
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// FindPeer will return the peer that corresponds to the passed in public key.
|
2017-01-13 06:40:38 +03:00
|
|
|
// This function is used by the funding manager, allowing it to update the
|
|
|
|
// daemon's local representation of the remote peer.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
|
|
|
func (s *server) FindPeer(peerKey *btcec.PublicKey) (*peer, error) {
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2017-08-09 02:51:41 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
pubStr := string(peerKey.SerializeCompressed())
|
2017-01-13 06:40:38 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
return s.findPeerByPubStr(pubStr)
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// FindPeerByPubStr will return the peer that corresponds to the passed peerID,
|
|
|
|
// which should be a string representation of the peer's serialized, compressed
|
|
|
|
// public key.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2017-08-31 11:15:39 +03:00
|
|
|
func (s *server) FindPeerByPubStr(pubStr string) (*peer, error) {
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2017-01-13 06:40:38 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
return s.findPeerByPubStr(pubStr)
|
2017-08-09 02:51:41 +03:00
|
|
|
}
|
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// findPeerByPubStr is an internal method that retrieves the specified peer from
|
|
|
|
// the server's internal state using.
|
|
|
|
func (s *server) findPeerByPubStr(pubStr string) (*peer, error) {
|
|
|
|
peer, ok := s.peersByPub[pubStr]
|
|
|
|
if !ok {
|
2018-04-15 23:19:15 +03:00
|
|
|
return nil, ErrPeerNotConnected
|
2017-01-13 06:40:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return peer, nil
|
|
|
|
}
|
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// peerTerminationWatcher waits until a peer has been disconnected unexpectedly,
|
|
|
|
// and then cleans up all resources allocated to the peer, notifies relevant
|
|
|
|
// sub-systems of its demise, and finally handles re-connecting to the peer if
|
|
|
|
// it's persistent. If the server intentionally disconnects a peer, it should
|
|
|
|
// have a corresponding entry in the ignorePeerTermination map which will cause
|
|
|
|
// the cleanup routine to exit early.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-31 11:15:39 +03:00
|
|
|
// NOTE: This MUST be launched as a goroutine.
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
func (s *server) peerTerminationWatcher(p *peer) {
|
2017-08-31 11:15:39 +03:00
|
|
|
defer s.wg.Done()
|
2017-08-09 02:51:41 +03:00
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
p.WaitForDisconnect()
|
|
|
|
|
|
|
|
srvrLog.Debugf("Peer %v has been disconnected", p)
|
|
|
|
|
2017-07-31 00:21:49 +03:00
|
|
|
// If the server is exiting then we can bail out early ourselves as all
|
|
|
|
// the other sub-systems will already be shutting down.
|
2017-08-09 02:51:41 +03:00
|
|
|
if s.Stopped() {
|
2017-07-31 00:21:49 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
funding+server: ensure we cancel all reservations when a peer disconnects
In this commit, we fix an existing issue that could at times cause an
inconsistent view between the set of total coins, and the set of segwit
coins in the wallet of the node. This could be caused by initiating a
funding flow, but then the funding negotiation breaking down somewhere
along the lines. In this case, us or the other peer will disconnect.
When we initiate funding flows, we lock coins exclusively, to ensure
that concurrent funding flows don’t end up double spending the same
coin. Before this commit, we wouldn’t ever unlock those coins. As a
result, our view of available coins would be skewed.
The walletbalance call would show all the coins, but when adding the
—witness_only flag, some coins would be missing, or gone all together.
This is because the former call actually scans the txstore and manually
tallies the amount of available coins, while the latter looks at the
sent of available outputs, which is filtered based on which coins are
locked.
To remedy this, we now ensure that when a peer disconnects, we wipe all
existing reservations which will return any locked outputs to the set
of available outputs for funding flows.
2017-11-26 22:25:26 +03:00
|
|
|
// Next, we'll cancel all pending funding reservations with this node.
|
|
|
|
// If we tried to initiate any funding flows that haven't yet finished,
|
|
|
|
// then we need to unlock those committed outputs so they're still
|
|
|
|
// available for use.
|
|
|
|
s.fundingMgr.CancelPeerReservations(p.PubKey())
|
|
|
|
|
2018-06-09 06:30:17 +03:00
|
|
|
pubKey := p.addr.IdentityKey
|
|
|
|
|
2018-04-17 05:12:48 +03:00
|
|
|
// We'll also inform the gossiper that this peer is no longer active,
|
|
|
|
// so we don't need to maintain sync state for it any longer.
|
2018-06-09 06:30:17 +03:00
|
|
|
s.authGossiper.PruneSyncState(pubKey)
|
2018-04-17 05:12:48 +03:00
|
|
|
|
2017-05-02 23:04:58 +03:00
|
|
|
// Tell the switch to remove all links associated with this peer.
|
2017-05-06 02:02:03 +03:00
|
|
|
// Passing nil as the target link indicates that all links associated
|
|
|
|
// with this interface should be closed.
|
2017-07-31 00:21:49 +03:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): instead add a PurgeInterfaceLinks function?
|
2017-06-17 01:10:17 +03:00
|
|
|
links, err := p.server.htlcSwitch.GetLinksByInterface(p.pubKeyBytes)
|
2017-05-02 23:04:58 +03:00
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to get channel links: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, link := range links {
|
|
|
|
err := p.server.htlcSwitch.RemoveLink(link.ChanID())
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to remove channel link: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// If the server has already removed this peer, we can short circuit the
|
|
|
|
// peer termination watcher and skip cleanup.
|
|
|
|
if _, ok := s.ignorePeerTermination[p]; ok {
|
|
|
|
delete(s.ignorePeerTermination, p)
|
2018-05-08 06:18:15 +03:00
|
|
|
|
|
|
|
pubKey := p.PubKey()
|
|
|
|
pubStr := string(pubKey[:])
|
|
|
|
|
|
|
|
// If a connection callback is present, we'll go ahead and
|
|
|
|
// execute it now that previous peer has fully disconnected. If
|
|
|
|
// the callback is not present, this likely implies the peer was
|
|
|
|
// purposefully disconnected via RPC, and that no reconnect
|
|
|
|
// should be attempted.
|
|
|
|
connCallback, ok := s.scheduledPeerConnection[pubStr]
|
|
|
|
if ok {
|
|
|
|
delete(s.scheduledPeerConnection, pubStr)
|
|
|
|
connCallback()
|
|
|
|
}
|
2017-08-31 11:15:39 +03:00
|
|
|
return
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
}
|
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// First, cleanup any remaining state the server has regarding the peer
|
|
|
|
// in question.
|
|
|
|
s.removePeer(p)
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// Next, check to see if this is a persistent peer or not.
|
2018-06-09 06:30:17 +03:00
|
|
|
pubStr := string(pubKey.SerializeCompressed())
|
2017-05-06 02:02:03 +03:00
|
|
|
_, ok := s.persistentPeers[pubStr]
|
|
|
|
if ok {
|
2017-11-18 02:31:44 +03:00
|
|
|
// We'll only need to re-launch a connection request if one
|
|
|
|
// isn't already currently pending.
|
|
|
|
if _, ok := s.persistentConnReqs[pubStr]; ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-09 06:30:17 +03:00
|
|
|
// We'll ensure that we locate an advertised address to use
|
|
|
|
// within the peer's address for reconnection purposes.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): use them all?
|
|
|
|
if p.inbound {
|
|
|
|
advertisedAddr, err := s.fetchNodeAdvertisedAddr(
|
|
|
|
pubKey,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("Unable to retrieve advertised "+
|
|
|
|
"address for node %x: %v",
|
|
|
|
pubKey.SerializeCompressed(), err)
|
|
|
|
} else {
|
|
|
|
p.addr.Address = advertisedAddr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-01 11:48:38 +03:00
|
|
|
// Otherwise, we'll launch a new connection request in order to
|
|
|
|
// attempt to maintain a persistent connection with this peer.
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
connReq := &connmgr.ConnReq{
|
|
|
|
Addr: p.addr,
|
|
|
|
Permanent: true,
|
|
|
|
}
|
2017-08-09 02:51:41 +03:00
|
|
|
s.persistentConnReqs[pubStr] = append(
|
|
|
|
s.persistentConnReqs[pubStr], connReq)
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
2018-03-31 02:19:52 +03:00
|
|
|
// Record the computed backoff in the backoff map.
|
|
|
|
backoff := s.nextPeerBackoff(pubStr)
|
|
|
|
s.persistentPeersBackoff[pubStr] = backoff
|
|
|
|
|
|
|
|
// Initialize a retry canceller for this peer if one does not
|
|
|
|
// exist.
|
|
|
|
cancelChan, ok := s.persistentRetryCancels[pubStr]
|
2018-03-31 05:11:04 +03:00
|
|
|
if !ok {
|
2018-03-31 02:19:52 +03:00
|
|
|
cancelChan = make(chan struct{})
|
|
|
|
s.persistentRetryCancels[pubStr] = cancelChan
|
2018-03-31 05:11:04 +03:00
|
|
|
}
|
|
|
|
|
2018-02-01 11:48:38 +03:00
|
|
|
// We choose not to wait group this go routine since the Connect
|
|
|
|
// call can stall for arbitrarily long if we shutdown while an
|
|
|
|
// outbound connection attempt is being made.
|
|
|
|
go func() {
|
|
|
|
srvrLog.Debugf("Scheduling connection re-establishment to "+
|
2018-03-31 05:11:04 +03:00
|
|
|
"persistent peer %v in %s", p, backoff)
|
2018-02-01 11:48:38 +03:00
|
|
|
|
|
|
|
select {
|
2018-03-31 05:11:04 +03:00
|
|
|
case <-time.After(backoff):
|
2018-03-31 02:19:52 +03:00
|
|
|
case <-cancelChan:
|
|
|
|
return
|
2018-02-01 11:48:38 +03:00
|
|
|
case <-s.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
srvrLog.Debugf("Attempting to re-establish persistent "+
|
|
|
|
"connection to peer %v", p)
|
|
|
|
|
|
|
|
s.connMgr.Connect(connReq)
|
|
|
|
}()
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-31 02:19:52 +03:00
|
|
|
// nextPeerBackoff computes the next backoff duration for a peer's pubkey using
|
|
|
|
// exponential backoff. If no previous backoff was known, the default is
|
|
|
|
// returned.
|
|
|
|
func (s *server) nextPeerBackoff(pubStr string) time.Duration {
|
|
|
|
// Now, determine the appropriate backoff to use for the retry.
|
|
|
|
backoff, ok := s.persistentPeersBackoff[pubStr]
|
|
|
|
if !ok {
|
|
|
|
// If an existing backoff was unknown, use the default.
|
|
|
|
return defaultBackoff
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, use a previous backoff to compute the
|
|
|
|
// subsequent randomized exponential backoff duration.
|
|
|
|
return computeNextBackoff(backoff)
|
|
|
|
}
|
|
|
|
|
2017-10-19 01:18:02 +03:00
|
|
|
// shouldRequestGraphSync returns true if the servers deems it necessary that
|
|
|
|
// we sync channel graph state with the remote peer. This method is used to
|
|
|
|
// avoid _always_ syncing channel graph state with each peer that connects.
|
|
|
|
//
|
|
|
|
// NOTE: This MUST be called with the server's mutex held.
|
|
|
|
func (s *server) shouldRequestGraphSync() bool {
|
|
|
|
// Initially, we'll only request a graph sync iff we have less than two
|
|
|
|
// peers.
|
|
|
|
return len(s.peersByPub) <= 2
|
|
|
|
}
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
// peerConnected is a function that handles initialization a newly connected
|
|
|
|
// peer by adding it to the server's global list of all active peers, and
|
2018-04-03 08:16:04 +03:00
|
|
|
// starting all the goroutines the peer needs to function properly. The inbound
|
|
|
|
// boolean should be true if the peer initiated the connection to us.
|
2017-08-11 07:20:51 +03:00
|
|
|
func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq,
|
2017-08-09 02:51:41 +03:00
|
|
|
inbound bool) {
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
brontideConn := conn.(*brontide.Conn)
|
2018-03-31 01:59:05 +03:00
|
|
|
addr := conn.RemoteAddr()
|
|
|
|
pubKey := brontideConn.RemotePub()
|
|
|
|
|
2018-06-09 07:16:21 +03:00
|
|
|
srvrLog.Infof("finalizing connection to %x, inbound=%v",
|
|
|
|
pubKey.SerializeCompressed(), inbound)
|
|
|
|
|
2017-10-25 22:06:26 +03:00
|
|
|
peerAddr := &lnwire.NetAddress{
|
2018-03-31 01:59:05 +03:00
|
|
|
IdentityKey: pubKey,
|
|
|
|
Address: addr,
|
2017-10-25 22:06:26 +03:00
|
|
|
ChainNet: activeNetParams.Net,
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
|
2017-10-19 01:16:03 +03:00
|
|
|
// With the brontide connection established, we'll now craft the local
|
|
|
|
// feature vector to advertise to the remote node.
|
|
|
|
localFeatures := lnwire.NewRawFeatureVector()
|
2017-10-19 01:18:02 +03:00
|
|
|
|
2018-04-17 05:11:40 +03:00
|
|
|
// We'll signal that we understand the data loss protection feature,
|
|
|
|
// and also that we support the new gossip query features.
|
|
|
|
localFeatures.Set(lnwire.DataLossProtectOptional)
|
|
|
|
localFeatures.Set(lnwire.GossipQueriesOptional)
|
|
|
|
|
|
|
|
// We'll only request a full channel graph sync if we detect that that
|
2017-10-19 01:18:02 +03:00
|
|
|
// we aren't fully synced yet.
|
|
|
|
if s.shouldRequestGraphSync() {
|
2018-04-17 05:11:40 +03:00
|
|
|
// TODO(roasbeef): only do so if gossiper doesn't have active
|
|
|
|
// peers?
|
2017-10-19 01:18:02 +03:00
|
|
|
localFeatures.Set(lnwire.InitialRoutingSync)
|
|
|
|
}
|
|
|
|
|
2017-10-19 01:16:03 +03:00
|
|
|
// Now that we've established a connection, create a peer, and it to
|
|
|
|
// the set of currently active peers.
|
|
|
|
p, err := newPeer(conn, connReq, s, peerAddr, inbound, localFeatures)
|
2016-12-15 05:11:31 +03:00
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to create peer %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(roasbeef): update IP address for link-node
|
|
|
|
// * also mark last-seen, do it one single transaction?
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// Attempt to start the peer, if we're unable to do so, then disconnect
|
|
|
|
// this peer.
|
2017-02-22 12:10:07 +03:00
|
|
|
if err := p.Start(); err != nil {
|
2017-07-12 16:44:17 +03:00
|
|
|
p.Disconnect(errors.Errorf("unable to start peer: %v", err))
|
2017-02-16 15:39:38 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
s.addPeer(p)
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// shouldDropConnection determines if our local connection to a remote peer
|
|
|
|
// should be dropped in the case of concurrent connection establishment. In
|
|
|
|
// order to deterministically decide which connection should be dropped, we'll
|
|
|
|
// utilize the ordering of the local and remote public key. If we didn't use
|
|
|
|
// such a tie breaker, then we risk _both_ connections erroneously being
|
|
|
|
// dropped.
|
|
|
|
func shouldDropLocalConnection(local, remote *btcec.PublicKey) bool {
|
|
|
|
localPubBytes := local.SerializeCompressed()
|
|
|
|
remotePubPbytes := remote.SerializeCompressed()
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// The connection that comes from the node with a "smaller" pubkey
|
|
|
|
// should be kept. Therefore, if our pubkey is "greater" than theirs, we
|
|
|
|
// should drop our established connection.
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
return bytes.Compare(localPubBytes, remotePubPbytes) > 0
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// InboundPeerConnected initializes a new peer in response to a new inbound
|
2016-12-15 05:11:31 +03:00
|
|
|
// connection.
|
2017-08-09 02:51:41 +03:00
|
|
|
//
|
|
|
|
// NOTE: This function is safe for concurrent access.
|
|
|
|
func (s *server) InboundPeerConnected(conn net.Conn) {
|
|
|
|
// Exit early if we have already been instructed to shutdown, this
|
|
|
|
// prevents any delayed callbacks from accidentally registering peers.
|
|
|
|
if s.Stopped() {
|
|
|
|
return
|
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-05-11 03:42:53 +03:00
|
|
|
nodePub := conn.(*brontide.Conn).RemotePub()
|
|
|
|
pubStr := string(nodePub.SerializeCompressed())
|
2017-08-09 02:51:41 +03:00
|
|
|
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2018-06-09 06:34:31 +03:00
|
|
|
// If we already have an outbound connection to this peer, then ignore
|
2017-08-09 02:51:41 +03:00
|
|
|
// this new connection.
|
2018-06-09 06:34:31 +03:00
|
|
|
if _, ok := s.outboundPeers[pubStr]; ok {
|
|
|
|
srvrLog.Debugf("Already have outbound connection for %v, "+
|
|
|
|
"ignoring inbound connection", nodePub.SerializeCompressed())
|
|
|
|
|
2017-05-11 03:42:53 +03:00
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-05-08 06:18:15 +03:00
|
|
|
// If we already have a valid connection that is scheduled to take
|
|
|
|
// precedence once the prior peer has finished disconnecting, we'll
|
|
|
|
// ignore this connection.
|
|
|
|
if _, ok := s.scheduledPeerConnection[pubStr]; ok {
|
|
|
|
srvrLog.Debugf("Ignoring connection, peer already scheduled")
|
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-14 00:41:54 +03:00
|
|
|
srvrLog.Infof("New inbound connection from %v", conn.RemoteAddr())
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-10-06 02:14:07 +03:00
|
|
|
// Check to see if we already have a connection with this peer. If so,
|
|
|
|
// we may need to drop our existing connection. This prevents us from
|
|
|
|
// having duplicate connections to the same peer. We forgo adding a
|
|
|
|
// default case as we expect these to be the only error values returned
|
|
|
|
// from findPeerByPubStr.
|
2017-08-31 11:15:39 +03:00
|
|
|
connectedPeer, err := s.findPeerByPubStr(pubStr)
|
|
|
|
switch err {
|
2018-04-15 23:19:15 +03:00
|
|
|
case ErrPeerNotConnected:
|
2017-08-31 11:15:39 +03:00
|
|
|
// We were unable to locate an existing connection with the
|
|
|
|
// target peer, proceed to connect.
|
2018-06-09 07:16:21 +03:00
|
|
|
s.cancelConnReqs(pubStr, nil)
|
2018-04-03 08:16:04 +03:00
|
|
|
s.peerConnected(conn, nil, true)
|
2017-08-31 11:15:39 +03:00
|
|
|
|
|
|
|
case nil:
|
2017-10-06 02:14:07 +03:00
|
|
|
// We already have a connection with the incoming peer. If the
|
|
|
|
// connection we've already established should be kept, then
|
|
|
|
// we'll close out this connection s.t there's only a single
|
|
|
|
// connection between us.
|
2018-03-31 02:19:52 +03:00
|
|
|
localPub := s.identityPriv.PubKey()
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
if !shouldDropLocalConnection(localPub, nodePub) {
|
|
|
|
srvrLog.Warnf("Received inbound connection from "+
|
|
|
|
"peer %x, but already connected, dropping conn",
|
|
|
|
nodePub.SerializeCompressed())
|
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, if we should drop the connection, then we'll
|
2017-10-06 02:14:07 +03:00
|
|
|
// disconnect our already connected peer.
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
srvrLog.Debugf("Disconnecting stale connection to %v",
|
|
|
|
connectedPeer)
|
2017-08-09 02:51:41 +03:00
|
|
|
|
2018-06-09 07:16:21 +03:00
|
|
|
s.cancelConnReqs(pubStr, nil)
|
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// Remove the current peer from the server's internal state and
|
|
|
|
// signal that the peer termination watcher does not need to
|
|
|
|
// execute for this peer.
|
2017-08-09 02:51:41 +03:00
|
|
|
s.removePeer(connectedPeer)
|
2017-08-31 11:15:39 +03:00
|
|
|
s.ignorePeerTermination[connectedPeer] = struct{}{}
|
2018-05-08 06:18:15 +03:00
|
|
|
s.scheduledPeerConnection[pubStr] = func() {
|
2018-04-03 08:16:04 +03:00
|
|
|
s.peerConnected(conn, nil, true)
|
2018-05-08 06:18:15 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// OutboundPeerConnected initializes a new peer in response to a new outbound
|
2016-12-15 05:11:31 +03:00
|
|
|
// connection.
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
|
|
|
func (s *server) OutboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn) {
|
|
|
|
// Exit early if we have already been instructed to shutdown, this
|
|
|
|
// prevents any delayed callbacks from accidentally registering peers.
|
|
|
|
if s.Stopped() {
|
|
|
|
return
|
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
|
|
|
|
nodePub := conn.(*brontide.Conn).RemotePub()
|
|
|
|
pubStr := string(nodePub.SerializeCompressed())
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2018-06-09 06:34:31 +03:00
|
|
|
// If we already have an inbound connection to this peer, then ignore
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// this new connection.
|
2018-06-09 06:34:31 +03:00
|
|
|
if _, ok := s.inboundPeers[pubStr]; ok {
|
|
|
|
srvrLog.Debugf("Already have inbound connection for %v, "+
|
|
|
|
"ignoring outbound connection",
|
|
|
|
nodePub.SerializeCompressed())
|
|
|
|
|
2018-03-31 02:19:52 +03:00
|
|
|
if connReq != nil {
|
|
|
|
s.connMgr.Remove(connReq.ID())
|
|
|
|
}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if _, ok := s.persistentConnReqs[pubStr]; !ok && connReq != nil {
|
|
|
|
srvrLog.Debugf("Ignoring cancelled outbound connection")
|
2018-03-31 02:19:52 +03:00
|
|
|
s.connMgr.Remove(connReq.ID())
|
2017-01-24 07:32:49 +03:00
|
|
|
conn.Close()
|
2016-12-15 05:11:31 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-05-08 06:18:15 +03:00
|
|
|
// If we already have a valid connection that is scheduled to take
|
|
|
|
// precedence once the prior peer has finished disconnecting, we'll
|
|
|
|
// ignore this connection.
|
|
|
|
if _, ok := s.scheduledPeerConnection[pubStr]; ok {
|
|
|
|
srvrLog.Debugf("Ignoring connection, peer already scheduled")
|
2018-06-09 06:36:41 +03:00
|
|
|
|
|
|
|
if connReq != nil {
|
|
|
|
s.connMgr.Remove(connReq.ID())
|
|
|
|
}
|
|
|
|
|
2018-05-08 06:18:15 +03:00
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
srvrLog.Infof("Established connection to: %v", conn.RemoteAddr())
|
|
|
|
|
2018-03-31 02:19:52 +03:00
|
|
|
if connReq != nil {
|
|
|
|
// A successful connection was returned by the connmgr.
|
|
|
|
// Immediately cancel all pending requests, excluding the
|
|
|
|
// outbound connection we just established.
|
|
|
|
ignore := connReq.ID()
|
|
|
|
s.cancelConnReqs(pubStr, &ignore)
|
|
|
|
} else {
|
|
|
|
// This was a successful connection made by some other
|
|
|
|
// subsystem. Remove all requests being managed by the connmgr.
|
|
|
|
s.cancelConnReqs(pubStr, nil)
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
}
|
|
|
|
|
2017-10-06 02:14:07 +03:00
|
|
|
// If we already have a connection with this peer, decide whether or not
|
|
|
|
// we need to drop the stale connection. We forgo adding a default case
|
|
|
|
// as we expect these to be the only error values returned from
|
|
|
|
// findPeerByPubStr.
|
2017-08-31 11:15:39 +03:00
|
|
|
connectedPeer, err := s.findPeerByPubStr(pubStr)
|
|
|
|
switch err {
|
2018-04-15 23:19:15 +03:00
|
|
|
case ErrPeerNotConnected:
|
2017-08-31 11:15:39 +03:00
|
|
|
// We were unable to locate an existing connection with the
|
|
|
|
// target peer, proceed to connect.
|
2018-04-03 08:16:04 +03:00
|
|
|
s.peerConnected(conn, connReq, false)
|
2017-08-31 11:15:39 +03:00
|
|
|
|
|
|
|
case nil:
|
|
|
|
// We already have a connection open with the target peer.
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// If our (this) connection should be dropped, then we'll do
|
|
|
|
// so, in order to ensure we don't have any duplicate
|
|
|
|
// connections.
|
2018-03-31 02:19:52 +03:00
|
|
|
localPub := s.identityPriv.PubKey()
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
if shouldDropLocalConnection(localPub, nodePub) {
|
|
|
|
srvrLog.Warnf("Established outbound connection to "+
|
|
|
|
"peer %x, but already connected, dropping conn",
|
|
|
|
nodePub.SerializeCompressed())
|
2017-08-16 03:49:10 +03:00
|
|
|
if connReq != nil {
|
|
|
|
s.connMgr.Remove(connReq.ID())
|
|
|
|
}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, _their_ connection should be dropped. So we'll
|
|
|
|
// disconnect the peer and send the now obsolete peer to the
|
|
|
|
// server for garbage collection.
|
|
|
|
srvrLog.Debugf("Disconnecting stale connection to %v",
|
|
|
|
connectedPeer)
|
2017-08-09 02:51:41 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// Remove the current peer from the server's internal state and
|
|
|
|
// signal that the peer termination watcher does not need to
|
|
|
|
// execute for this peer.
|
2017-08-09 02:51:41 +03:00
|
|
|
s.removePeer(connectedPeer)
|
2017-08-31 11:15:39 +03:00
|
|
|
s.ignorePeerTermination[connectedPeer] = struct{}{}
|
2018-05-08 06:18:15 +03:00
|
|
|
s.scheduledPeerConnection[pubStr] = func() {
|
2018-04-03 08:16:04 +03:00
|
|
|
s.peerConnected(conn, connReq, false)
|
2018-05-08 06:18:15 +03:00
|
|
|
}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
|
2018-03-31 02:19:52 +03:00
|
|
|
// UnassignedConnID is the default connection ID that a request can have before
|
|
|
|
// it actually is submitted to the connmgr.
|
|
|
|
// TODO(conner): move into connmgr package, or better, add connmgr method for
|
|
|
|
// generating atomic IDs
|
|
|
|
const UnassignedConnID uint64 = 0
|
|
|
|
|
|
|
|
// cancelConnReqs stops all persistent connection requests for a given pubkey.
|
|
|
|
// Any attempts initiated by the peerTerminationWatcher are canceled first.
|
|
|
|
// Afterwards, each connection request removed from the connmgr. The caller can
|
|
|
|
// optionally specify a connection ID to ignore, which prevents us from
|
|
|
|
// canceling a successful request. All persistent connreqs for the provided
|
|
|
|
// pubkey are discarded after the operationjw.
|
|
|
|
func (s *server) cancelConnReqs(pubStr string, skip *uint64) {
|
|
|
|
// First, cancel any lingering persistent retry attempts, which will
|
|
|
|
// prevent retries for any with backoffs that are still maturing.
|
|
|
|
if cancelChan, ok := s.persistentRetryCancels[pubStr]; ok {
|
|
|
|
close(cancelChan)
|
|
|
|
delete(s.persistentRetryCancels, pubStr)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, check to see if we have any outstanding persistent connection
|
|
|
|
// requests to this peer. If so, then we'll remove all of these
|
|
|
|
// connection requests, and also delete the entry from the map.
|
|
|
|
connReqs, ok := s.persistentConnReqs[pubStr]
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, connReq := range connReqs {
|
|
|
|
// Atomically capture the current request identifier.
|
|
|
|
connID := connReq.ID()
|
|
|
|
|
|
|
|
// Skip any zero IDs, this indicates the request has not
|
|
|
|
// yet been schedule.
|
|
|
|
if connID == UnassignedConnID {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Skip a particular connection ID if instructed.
|
|
|
|
if skip != nil && connID == *skip {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
s.connMgr.Remove(connID)
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(s.persistentConnReqs, pubStr)
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// addPeer adds the passed peer to the server's global state of all active
|
|
|
|
// peers.
|
|
|
|
func (s *server) addPeer(p *peer) {
|
|
|
|
if p == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore new peers if we're shutting down.
|
2017-08-31 11:15:39 +03:00
|
|
|
if s.Stopped() {
|
|
|
|
p.Disconnect(ErrServerShuttingDown)
|
2016-06-21 21:52:09 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
// Track the new peer in our indexes so we can quickly look it up either
|
2018-02-07 06:13:07 +03:00
|
|
|
// according to its public key, or its peer ID.
|
2016-12-27 08:42:23 +03:00
|
|
|
// TODO(roasbeef): pipe all requests through to the
|
|
|
|
// queryHandler/peerManager
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
|
|
|
pubStr := string(p.addr.IdentityKey.SerializeCompressed())
|
|
|
|
|
|
|
|
s.peersByPub[pubStr] = p
|
|
|
|
|
|
|
|
if p.inbound {
|
|
|
|
s.inboundPeers[pubStr] = p
|
|
|
|
} else {
|
|
|
|
s.outboundPeers[pubStr] = p
|
|
|
|
}
|
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// Launch a goroutine to watch for the unexpected termination of this
|
|
|
|
// peer, which will ensure all resources are properly cleaned up, and
|
|
|
|
// re-establish persistent connections when necessary. The peer
|
2017-10-19 01:16:03 +03:00
|
|
|
// termination watcher will be short circuited if the peer is ever
|
|
|
|
// added to the ignorePeerTermination map, indicating that the server
|
|
|
|
// has already handled the removal of this peer.
|
2017-08-31 11:15:39 +03:00
|
|
|
s.wg.Add(1)
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
go s.peerTerminationWatcher(p)
|
|
|
|
|
2018-04-17 05:12:18 +03:00
|
|
|
switch {
|
|
|
|
// If the remote peer knows of the new gossip queries feature, then
|
|
|
|
// we'll create a new gossipSyncer in the AuthenticatedGossiper for it.
|
|
|
|
case p.remoteLocalFeatures.HasFeature(lnwire.GossipQueriesOptional):
|
|
|
|
srvrLog.Infof("Negotiated chan series queries with %x",
|
|
|
|
p.pubKeyBytes[:])
|
|
|
|
|
|
|
|
// We'll only request channel updates from the remote peer if
|
|
|
|
// its enabled in the config, or we're already getting updates
|
|
|
|
// from enough peers.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): craft s.t. we only get updates from a few
|
|
|
|
// peers
|
|
|
|
recvUpdates := !cfg.NoChanUpdates
|
2018-06-08 06:09:40 +03:00
|
|
|
go s.authGossiper.InitSyncState(p, recvUpdates)
|
2018-04-17 05:12:18 +03:00
|
|
|
|
2017-10-19 01:16:03 +03:00
|
|
|
// If the remote peer has the initial sync feature bit set, then we'll
|
|
|
|
// being the synchronization protocol to exchange authenticated channel
|
2018-04-17 05:12:18 +03:00
|
|
|
// graph edges/vertexes, but only if they don't know of the new gossip
|
|
|
|
// queries.
|
|
|
|
case p.remoteLocalFeatures.HasFeature(lnwire.InitialRoutingSync):
|
|
|
|
srvrLog.Infof("Requesting full table sync with %x",
|
|
|
|
p.pubKeyBytes[:])
|
|
|
|
|
2018-06-08 06:09:40 +03:00
|
|
|
go s.authGossiper.SynchronizeNode(p)
|
2017-10-11 21:38:45 +03:00
|
|
|
}
|
2017-09-13 15:38:06 +03:00
|
|
|
|
|
|
|
// Check if there are listeners waiting for this peer to come online.
|
|
|
|
for _, con := range s.peerConnectedListeners[pubStr] {
|
|
|
|
close(con)
|
|
|
|
}
|
|
|
|
delete(s.peerConnectedListeners, pubStr)
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// removePeer removes the passed peer from the server's state of all active
|
|
|
|
// peers.
|
|
|
|
func (s *server) removePeer(p *peer) {
|
|
|
|
if p == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
srvrLog.Debugf("removing peer %v", p)
|
|
|
|
|
2017-02-07 02:04:52 +03:00
|
|
|
// As the peer is now finished, ensure that the TCP connection is
|
|
|
|
// closed and all of its related goroutines have exited.
|
2017-08-31 11:15:39 +03:00
|
|
|
p.Disconnect(fmt.Errorf("server: disconnecting peer %v", p))
|
|
|
|
|
|
|
|
// If this peer had an active persistent connection request, remove it.
|
|
|
|
if p.connReq != nil {
|
|
|
|
s.connMgr.Remove(p.connReq.ID())
|
|
|
|
}
|
2017-02-07 02:04:52 +03:00
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Ignore deleting peers if we're shutting down.
|
2017-08-31 11:15:39 +03:00
|
|
|
if s.Stopped() {
|
2016-06-21 21:52:09 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
pubStr := string(p.addr.IdentityKey.SerializeCompressed())
|
|
|
|
|
|
|
|
delete(s.peersByPub, pubStr)
|
|
|
|
|
|
|
|
if p.inbound {
|
|
|
|
delete(s.inboundPeers, pubStr)
|
|
|
|
} else {
|
|
|
|
delete(s.outboundPeers, pubStr)
|
|
|
|
}
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// openChanReq is a message sent to the server in order to request the
|
2017-07-31 01:53:53 +03:00
|
|
|
// initiation of a channel funding workflow to the peer with either the
|
|
|
|
// specified relative peer ID, or a global lightning ID.
|
2016-06-21 22:32:32 +03:00
|
|
|
type openChanReq struct {
|
2016-10-28 05:49:10 +03:00
|
|
|
targetPubkey *btcec.PublicKey
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-07-31 01:53:53 +03:00
|
|
|
chainHash chainhash.Hash
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
localFundingAmt btcutil.Amount
|
|
|
|
remoteFundingAmt btcutil.Amount
|
|
|
|
|
2017-08-22 09:25:41 +03:00
|
|
|
pushAmt lnwire.MilliSatoshi
|
2017-01-10 06:05:11 +03:00
|
|
|
|
2018-02-13 17:05:19 +03:00
|
|
|
fundingFeePerVSize lnwallet.SatPerVByte
|
2017-11-23 22:36:12 +03:00
|
|
|
|
2017-11-14 02:48:54 +03:00
|
|
|
private bool
|
|
|
|
|
2017-12-17 02:00:37 +03:00
|
|
|
minHtlc lnwire.MilliSatoshi
|
|
|
|
|
2018-03-14 16:15:01 +03:00
|
|
|
remoteCsvDelay uint16
|
|
|
|
|
2017-07-31 01:53:53 +03:00
|
|
|
// TODO(roasbeef): add ability to specify channel constraints as well
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2016-08-31 02:52:53 +03:00
|
|
|
updates chan *lnrpc.OpenStatusUpdate
|
|
|
|
err chan error
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// ConnectToPeer requests that the server connect to a Lightning Network peer
|
|
|
|
// at the specified address. This function will *block* until either a
|
|
|
|
// connection is established, or the initial handshake process fails.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
|
|
|
func (s *server) ConnectToPeer(addr *lnwire.NetAddress, perm bool) error {
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
targetPub := string(addr.IdentityKey.SerializeCompressed())
|
2016-06-21 21:52:09 +03:00
|
|
|
|
2017-08-11 07:20:51 +03:00
|
|
|
// Acquire mutex, but use explicit unlocking instead of defer for
|
|
|
|
// better granularity. In certain conditions, this method requires
|
|
|
|
// making an outbound connection to a remote peer, which requires the
|
|
|
|
// lock to be released, and subsequently reacquired.
|
2017-08-09 02:51:41 +03:00
|
|
|
s.mu.Lock()
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-08-11 07:20:51 +03:00
|
|
|
// Ensure we're not already connected to this peer.
|
2017-08-31 11:15:39 +03:00
|
|
|
peer, err := s.findPeerByPubStr(targetPub)
|
2017-10-06 02:14:07 +03:00
|
|
|
if err == nil {
|
2017-08-31 11:15:39 +03:00
|
|
|
s.mu.Unlock()
|
2017-08-09 02:51:41 +03:00
|
|
|
return fmt.Errorf("already connected to peer: %v", peer)
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-10-06 02:14:07 +03:00
|
|
|
// Peer was not found, continue to pursue connection with peer.
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
// If there's already a pending connection request for this pubkey,
|
|
|
|
// then we ignore this request to ensure we don't create a redundant
|
|
|
|
// connection.
|
2018-03-31 02:19:52 +03:00
|
|
|
if reqs, ok := s.persistentConnReqs[targetPub]; ok {
|
|
|
|
srvrLog.Warnf("Already have %d persistent connection "+
|
|
|
|
"requests for %v, connecting anyway.", len(reqs), addr)
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// If there's not already a pending or active connection to this node,
|
|
|
|
// then instruct the connection manager to attempt to establish a
|
|
|
|
// persistent connection to the peer.
|
2016-12-25 03:51:25 +03:00
|
|
|
srvrLog.Debugf("Connecting to %v", addr)
|
2017-08-09 02:51:41 +03:00
|
|
|
if perm {
|
2017-05-05 02:47:48 +03:00
|
|
|
connReq := &connmgr.ConnReq{
|
2017-01-10 06:08:52 +03:00
|
|
|
Addr: addr,
|
|
|
|
Permanent: true,
|
2017-05-05 02:47:48 +03:00
|
|
|
}
|
|
|
|
|
2017-05-06 01:57:09 +03:00
|
|
|
s.persistentPeers[targetPub] = struct{}{}
|
2018-02-01 11:48:38 +03:00
|
|
|
if _, ok := s.persistentPeersBackoff[targetPub]; !ok {
|
|
|
|
s.persistentPeersBackoff[targetPub] = defaultBackoff
|
|
|
|
}
|
2017-08-09 02:51:41 +03:00
|
|
|
s.persistentConnReqs[targetPub] = append(
|
|
|
|
s.persistentConnReqs[targetPub], connReq)
|
|
|
|
s.mu.Unlock()
|
2017-05-05 02:47:48 +03:00
|
|
|
|
|
|
|
go s.connMgr.Connect(connReq)
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
return nil
|
2017-01-10 06:08:52 +03:00
|
|
|
}
|
2017-08-09 02:51:41 +03:00
|
|
|
s.mu.Unlock()
|
|
|
|
|
|
|
|
// If we're not making a persistent connection, then we'll attempt to
|
|
|
|
// connect to the target peer. If the we can't make the connection, or
|
|
|
|
// the crypto negotiation breaks down, then return an error to the
|
2017-10-25 05:59:32 +03:00
|
|
|
// caller.
|
|
|
|
conn, err := brontide.Dial(s.identityPriv, addr, cfg.net.Dial)
|
2017-08-09 02:51:41 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Once the connection has been made, we can notify the server of the
|
|
|
|
// new connection via our public endpoint, which will require the lock
|
|
|
|
// an add the peer to the server's internal state.
|
|
|
|
s.OutboundPeerConnected(nil, conn)
|
|
|
|
|
|
|
|
return nil
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// DisconnectPeer sends the request to server to close the connection with peer
|
|
|
|
// identified by public key.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
|
|
|
func (s *server) DisconnectPeer(pubKey *btcec.PublicKey) error {
|
|
|
|
pubBytes := pubKey.SerializeCompressed()
|
2017-05-06 02:02:03 +03:00
|
|
|
pubStr := string(pubBytes)
|
2017-05-02 22:31:35 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// Check that were actually connected to this peer. If not, then we'll
|
|
|
|
// exit in an error as we can't disconnect from a peer that we're not
|
2017-10-06 02:14:07 +03:00
|
|
|
// currently connected to.
|
2017-08-31 11:15:39 +03:00
|
|
|
peer, err := s.findPeerByPubStr(pubStr)
|
2018-04-15 23:19:15 +03:00
|
|
|
if err == ErrPeerNotConnected {
|
|
|
|
return fmt.Errorf("peer %x is not connected", pubBytes)
|
2017-05-02 22:31:35 +03:00
|
|
|
}
|
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
srvrLog.Infof("Disconnecting from %v", peer)
|
|
|
|
|
2018-04-01 02:26:19 +03:00
|
|
|
s.cancelConnReqs(pubStr, nil)
|
2018-03-31 02:19:52 +03:00
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// If this peer was formerly a persistent connection, then we'll remove
|
|
|
|
// them from this map so we don't attempt to re-connect after we
|
|
|
|
// disconnect.
|
2017-08-31 11:15:39 +03:00
|
|
|
delete(s.persistentPeers, pubStr)
|
2018-02-01 11:48:38 +03:00
|
|
|
delete(s.persistentPeersBackoff, pubStr)
|
2017-05-02 22:31:35 +03:00
|
|
|
|
2017-08-31 11:15:39 +03:00
|
|
|
// Remove the current peer from the server's internal state and signal
|
|
|
|
// that the peer termination watcher does not need to execute for this
|
|
|
|
// peer.
|
|
|
|
s.removePeer(peer)
|
|
|
|
s.ignorePeerTermination[peer] = struct{}{}
|
2017-05-06 02:02:03 +03:00
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
return nil
|
2017-05-02 22:31:35 +03:00
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:41 +03:00
|
|
|
// OpenChannel sends a request to the server to open a channel to the specified
|
2018-02-20 01:55:22 +03:00
|
|
|
// peer identified by nodeKey with the passed channel funding parameters.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2018-02-14 08:48:42 +03:00
|
|
|
func (s *server) OpenChannel(nodeKey *btcec.PublicKey,
|
2018-03-14 16:15:01 +03:00
|
|
|
localAmt btcutil.Amount, pushAmt, minHtlc lnwire.MilliSatoshi,
|
|
|
|
fundingFeePerVSize lnwallet.SatPerVByte, private bool,
|
|
|
|
remoteCsvDelay uint16) (chan *lnrpc.OpenStatusUpdate, chan error) {
|
2017-08-09 02:51:41 +03:00
|
|
|
|
2018-05-30 13:03:45 +03:00
|
|
|
// The updateChan will have a buffer of 2, since we expect a
|
|
|
|
// ChanPending + a ChanOpen update, and we want to make sure the
|
|
|
|
// funding process is not blocked if the caller is not reading the
|
|
|
|
// updates.
|
|
|
|
updateChan := make(chan *lnrpc.OpenStatusUpdate, 2)
|
2017-08-09 02:51:41 +03:00
|
|
|
errChan := make(chan error, 1)
|
|
|
|
|
2017-01-15 05:12:20 +03:00
|
|
|
var (
|
2017-02-21 03:33:14 +03:00
|
|
|
targetPeer *peer
|
|
|
|
pubKeyBytes []byte
|
2017-12-13 04:47:31 +03:00
|
|
|
err error
|
2017-01-15 05:12:20 +03:00
|
|
|
)
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-01-15 05:12:20 +03:00
|
|
|
// If the user is targeting the peer by public key, then we'll need to
|
|
|
|
// convert that into a string for our map. Otherwise, we expect them to
|
|
|
|
// target by peer ID instead.
|
2017-08-09 02:51:41 +03:00
|
|
|
if nodeKey != nil {
|
|
|
|
pubKeyBytes = nodeKey.SerializeCompressed()
|
2017-01-15 05:12:20 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// First attempt to locate the target peer to open a channel with, if
|
|
|
|
// we're unable to locate the peer then this request will fail.
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RLock()
|
2018-02-14 08:48:42 +03:00
|
|
|
if peer, ok := s.peersByPub[string(pubKeyBytes)]; ok {
|
2016-12-15 05:11:31 +03:00
|
|
|
targetPeer = peer
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RUnlock()
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
if targetPeer == nil {
|
2018-04-15 23:19:15 +03:00
|
|
|
errChan <- fmt.Errorf("peer is not connected NodeKey(%x)", pubKeyBytes)
|
2017-08-09 02:51:41 +03:00
|
|
|
return updateChan, errChan
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2018-02-13 17:05:19 +03:00
|
|
|
// If the fee rate wasn't specified, then we'll use a default
|
|
|
|
// confirmation target.
|
|
|
|
if fundingFeePerVSize == 0 {
|
2017-12-13 04:47:31 +03:00
|
|
|
estimator := s.cc.feeEstimator
|
2018-02-13 17:05:19 +03:00
|
|
|
fundingFeePerVSize, err = estimator.EstimateFeePerVSize(6)
|
2017-12-13 04:47:31 +03:00
|
|
|
if err != nil {
|
|
|
|
errChan <- err
|
|
|
|
return updateChan, errChan
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// Spawn a goroutine to send the funding workflow request to the
|
|
|
|
// funding manager. This allows the server to continue handling queries
|
|
|
|
// instead of blocking on this request which is exported as a
|
|
|
|
// synchronous request to the outside world.
|
2016-09-14 01:35:41 +03:00
|
|
|
req := &openChanReq{
|
2018-02-13 17:05:19 +03:00
|
|
|
targetPubkey: nodeKey,
|
|
|
|
chainHash: *activeNetParams.GenesisHash,
|
|
|
|
localFundingAmt: localAmt,
|
|
|
|
fundingFeePerVSize: fundingFeePerVSize,
|
|
|
|
pushAmt: pushAmt,
|
|
|
|
private: private,
|
|
|
|
minHtlc: minHtlc,
|
2018-03-14 16:15:01 +03:00
|
|
|
remoteCsvDelay: remoteCsvDelay,
|
2018-02-13 17:05:19 +03:00
|
|
|
updates: updateChan,
|
|
|
|
err: errChan,
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
2016-09-14 01:35:41 +03:00
|
|
|
|
2017-08-11 07:20:51 +03:00
|
|
|
// TODO(roasbeef): pass in chan that's closed if/when funding succeeds
|
|
|
|
// so can track as persistent peer?
|
2017-08-09 02:51:41 +03:00
|
|
|
go s.fundingMgr.initFundingWorkflow(targetPeer.addr, req)
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2016-08-31 02:52:53 +03:00
|
|
|
return updateChan, errChan
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Peers returns a slice of all active peers.
|
2017-08-11 07:20:51 +03:00
|
|
|
//
|
2017-08-09 02:51:41 +03:00
|
|
|
// NOTE: This function is safe for concurrent access.
|
2016-06-21 21:52:09 +03:00
|
|
|
func (s *server) Peers() []*peer {
|
2018-01-23 03:04:40 +03:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2017-06-05 08:18:12 +03:00
|
|
|
|
2018-02-14 08:48:42 +03:00
|
|
|
peers := make([]*peer, 0, len(s.peersByPub))
|
|
|
|
for _, peer := range s.peersByPub {
|
2017-06-05 08:18:12 +03:00
|
|
|
peers = append(peers, peer)
|
|
|
|
}
|
2016-06-21 21:52:09 +03:00
|
|
|
|
2017-06-05 08:18:12 +03:00
|
|
|
return peers
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
2017-12-16 02:06:20 +03:00
|
|
|
|
|
|
|
// parseHexColor takes a hex string representation of a color in the
|
|
|
|
// form "#RRGGBB", parses the hex color values, and returns a color.RGBA
|
|
|
|
// struct of the same color.
|
|
|
|
func parseHexColor(colorStr string) (color.RGBA, error) {
|
|
|
|
if len(colorStr) != 7 || colorStr[0] != '#' {
|
|
|
|
return color.RGBA{}, errors.New("Color must be in format #RRGGBB")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Decode the hex color string to bytes.
|
|
|
|
// The resulting byte array is in the form [R, G, B].
|
|
|
|
colorBytes, err := hex.DecodeString(colorStr[1:])
|
|
|
|
if err != nil {
|
|
|
|
return color.RGBA{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return color.RGBA{R: colorBytes[0], G: colorBytes[1], B: colorBytes[2]}, nil
|
|
|
|
}
|
2018-02-01 11:48:38 +03:00
|
|
|
|
|
|
|
// computeNextBackoff uses a truncated exponential backoff to compute the next
|
|
|
|
// backoff using the value of the exiting backoff. The returned duration is
|
|
|
|
// randomized in either direction by 1/20 to prevent tight loops from
|
|
|
|
// stabilizing.
|
|
|
|
func computeNextBackoff(currBackoff time.Duration) time.Duration {
|
|
|
|
// Double the current backoff, truncating if it exceeds our maximum.
|
|
|
|
nextBackoff := 2 * currBackoff
|
|
|
|
if nextBackoff > maximumBackoff {
|
|
|
|
nextBackoff = maximumBackoff
|
|
|
|
}
|
|
|
|
|
|
|
|
// Using 1/10 of our duration as a margin, compute a random offset to
|
|
|
|
// avoid the nodes entering connection cycles.
|
|
|
|
margin := nextBackoff / 10
|
|
|
|
|
|
|
|
var wiggle big.Int
|
|
|
|
wiggle.SetUint64(uint64(margin))
|
|
|
|
if _, err := rand.Int(rand.Reader, &wiggle); err != nil {
|
|
|
|
// Randomizing is not mission critical, so we'll just return the
|
|
|
|
// current backoff.
|
|
|
|
return nextBackoff
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise add in our wiggle, but subtract out half of the margin so
|
|
|
|
// that the backoff can tweaked by 1/20 in either direction.
|
|
|
|
return nextBackoff + (time.Duration(wiggle.Uint64()) - margin/2)
|
|
|
|
}
|
2018-03-31 01:59:05 +03:00
|
|
|
|
2018-05-02 09:30:56 +03:00
|
|
|
// fetchNodeAdvertisedAddr attempts to fetch an advertised address of a node.
|
|
|
|
func (s *server) fetchNodeAdvertisedAddr(pub *btcec.PublicKey) (net.Addr, error) {
|
2018-03-31 01:59:05 +03:00
|
|
|
node, err := s.chanDB.ChannelGraph().FetchLightningNode(pub)
|
|
|
|
if err != nil {
|
2018-05-02 09:30:56 +03:00
|
|
|
return nil, err
|
2018-03-31 01:59:05 +03:00
|
|
|
}
|
|
|
|
|
2018-05-02 09:30:56 +03:00
|
|
|
if len(node.Addresses) == 0 {
|
|
|
|
return nil, errors.New("no advertised addresses found")
|
2018-03-31 01:59:05 +03:00
|
|
|
}
|
|
|
|
|
2018-05-02 09:30:56 +03:00
|
|
|
return node.Addresses[0], nil
|
2018-03-31 01:59:05 +03:00
|
|
|
}
|