2015-12-26 09:09:17 +03:00
|
|
|
package main
|
2016-01-14 08:41:46 +03:00
|
|
|
|
|
|
|
import (
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
"bytes"
|
2017-03-16 04:56:25 +03:00
|
|
|
"crypto/sha256"
|
2016-12-27 08:42:23 +03:00
|
|
|
"encoding/hex"
|
2016-01-14 08:41:46 +03:00
|
|
|
"fmt"
|
|
|
|
"net"
|
2017-04-14 00:41:54 +03:00
|
|
|
"strconv"
|
2016-01-14 08:41:46 +03:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2016-12-15 05:11:31 +03:00
|
|
|
"time"
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2017-04-14 23:17:51 +03:00
|
|
|
"github.com/boltdb/bolt"
|
2016-09-21 03:15:26 +03:00
|
|
|
"github.com/lightningnetwork/lightning-onion"
|
2016-10-28 05:49:10 +03:00
|
|
|
"github.com/lightningnetwork/lnd/brontide"
|
2016-03-23 04:49:22 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2017-03-20 00:06:10 +03:00
|
|
|
"github.com/lightningnetwork/lnd/discovery"
|
2016-08-31 02:52:53 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2016-10-28 05:49:10 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2017-02-02 05:29:46 +03:00
|
|
|
"github.com/lightningnetwork/lnd/routing"
|
2016-05-15 17:17:44 +03:00
|
|
|
"github.com/roasbeef/btcd/btcec"
|
2017-01-06 00:58:06 +03:00
|
|
|
"github.com/roasbeef/btcd/connmgr"
|
2016-06-21 21:52:09 +03:00
|
|
|
"github.com/roasbeef/btcutil"
|
2017-05-02 23:04:58 +03:00
|
|
|
|
|
|
|
"github.com/go-errors/errors"
|
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
2016-01-14 08:41:46 +03:00
|
|
|
)
|
|
|
|
|
2016-12-25 03:51:25 +03:00
|
|
|
// server is the main server of the Lightning Network Daemon. The server houses
|
|
|
|
// global state pertaining to the wallet, database, and the rpcserver.
|
2016-06-21 21:52:09 +03:00
|
|
|
// Additionally, the server is also used as a central messaging bus to interact
|
|
|
|
// with any of its companion objects.
|
2016-01-14 08:41:46 +03:00
|
|
|
type server struct {
|
|
|
|
started int32 // atomic
|
|
|
|
shutdown int32 // atomic
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// identityPriv is the private key used to authenticate any incoming
|
|
|
|
// connections.
|
|
|
|
identityPriv *btcec.PrivateKey
|
2016-01-17 06:07:44 +03:00
|
|
|
|
2017-04-14 21:17:41 +03:00
|
|
|
// nodeSigner is an implementation of the MessageSigner implementation
|
2017-05-18 21:55:25 +03:00
|
|
|
// that's backed by the identity private key of the running lnd node.
|
2017-04-14 21:17:41 +03:00
|
|
|
nodeSigner *nodeSigner
|
|
|
|
|
2016-07-06 04:48:35 +03:00
|
|
|
// lightningID is the sha256 of the public key corresponding to our
|
|
|
|
// long-term identity private key.
|
|
|
|
lightningID [32]byte
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
peersMtx sync.RWMutex
|
|
|
|
peersByID map[int32]*peer
|
|
|
|
peersByPub map[string]*peer
|
2016-01-14 08:41:46 +03:00
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
persistentPeers map[string]struct{}
|
|
|
|
inboundPeers map[string]*peer
|
|
|
|
outboundPeers map[string]*peer
|
|
|
|
|
2017-05-18 21:55:25 +03:00
|
|
|
cc *chainControl
|
2016-06-21 21:52:09 +03:00
|
|
|
|
|
|
|
fundingMgr *fundingManager
|
2017-05-18 21:55:25 +03:00
|
|
|
|
|
|
|
chanDB *channeldb.DB
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2017-05-02 23:04:58 +03:00
|
|
|
htlcSwitch *htlcswitch.Switch
|
2016-11-29 06:43:57 +03:00
|
|
|
invoices *invoiceRegistry
|
|
|
|
breachArbiter *breachArbiter
|
2016-07-10 02:36:25 +03:00
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
chanRouter *routing.ChannelRouter
|
2016-07-15 14:02:59 +03:00
|
|
|
|
2017-04-01 15:33:17 +03:00
|
|
|
discoverSrv *discovery.AuthenticatedGossiper
|
2017-03-20 00:06:10 +03:00
|
|
|
|
2016-09-12 22:37:51 +03:00
|
|
|
utxoNursery *utxoNursery
|
|
|
|
|
2016-09-21 03:15:26 +03:00
|
|
|
sphinx *sphinx.Router
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
connMgr *connmgr.ConnManager
|
|
|
|
|
2017-01-10 06:08:52 +03:00
|
|
|
pendingConnMtx sync.RWMutex
|
2017-02-25 02:46:02 +03:00
|
|
|
persistentConnReqs map[string][]*connmgr.ConnReq
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
broadcastRequests chan *broadcastReq
|
|
|
|
sendRequests chan *sendReq
|
|
|
|
|
2016-01-14 08:41:46 +03:00
|
|
|
newPeers chan *peer
|
|
|
|
donePeers chan *peer
|
2016-01-17 06:07:44 +03:00
|
|
|
queries chan interface{}
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2017-02-16 15:39:38 +03:00
|
|
|
// globalFeatures feature vector which affects HTLCs and thus are also
|
|
|
|
// advertised to other nodes.
|
|
|
|
globalFeatures *lnwire.FeatureVector
|
|
|
|
|
|
|
|
// localFeatures is an feature vector which represent the features which
|
|
|
|
// only affect the protocol between these two nodes.
|
|
|
|
localFeatures *lnwire.FeatureVector
|
|
|
|
|
2016-01-14 08:41:46 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
quit chan struct{}
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// newServer creates a new instance of the server which is to listen using the
|
|
|
|
// passed listener address.
|
2017-06-06 01:18:06 +03:00
|
|
|
func newServer(listenAddrs []string, chanDB *channeldb.DB, cc *chainControl,
|
|
|
|
privKey *btcec.PrivateKey) (*server, error) {
|
|
|
|
|
|
|
|
var err error
|
2016-01-17 06:07:44 +03:00
|
|
|
|
|
|
|
listeners := make([]net.Listener, len(listenAddrs))
|
|
|
|
for i, addr := range listenAddrs {
|
2016-10-28 05:49:10 +03:00
|
|
|
listeners[i], err = brontide.NewListener(privKey, addr)
|
2016-01-17 06:07:44 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-06 04:48:35 +03:00
|
|
|
serializedPubKey := privKey.PubKey().SerializeCompressed()
|
2016-01-17 06:07:44 +03:00
|
|
|
s := &server{
|
2017-05-18 21:55:25 +03:00
|
|
|
chanDB: chanDB,
|
|
|
|
cc: cc,
|
|
|
|
|
|
|
|
invoices: newInvoiceRegistry(chanDB),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
2017-05-18 21:55:25 +03:00
|
|
|
utxoNursery: newUtxoNursery(chanDB, cc.chainNotifier, cc.wallet),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
|
|
|
identityPriv: privKey,
|
2017-04-14 21:17:41 +03:00
|
|
|
nodeSigner: newNodeSigner(privKey),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
2016-09-21 03:15:26 +03:00
|
|
|
// TODO(roasbeef): derive proper onion key based on rotation
|
|
|
|
// schedule
|
|
|
|
sphinx: sphinx.NewRouter(privKey, activeNetParams.Params),
|
2017-03-16 04:56:25 +03:00
|
|
|
lightningID: sha256.Sum256(serializedPubKey),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
persistentPeers: make(map[string]struct{}),
|
2017-02-25 02:46:02 +03:00
|
|
|
persistentConnReqs: make(map[string][]*connmgr.ConnReq),
|
2016-12-15 05:11:31 +03:00
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
peersByID: make(map[int32]*peer),
|
|
|
|
peersByPub: make(map[string]*peer),
|
|
|
|
inboundPeers: make(map[string]*peer),
|
|
|
|
outboundPeers: make(map[string]*peer),
|
2016-12-15 05:11:31 +03:00
|
|
|
|
|
|
|
newPeers: make(chan *peer, 10),
|
|
|
|
donePeers: make(chan *peer, 10),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
broadcastRequests: make(chan *broadcastReq),
|
|
|
|
sendRequests: make(chan *sendReq),
|
|
|
|
|
2017-02-17 17:28:11 +03:00
|
|
|
globalFeatures: globalFeatures,
|
2017-02-21 12:35:21 +03:00
|
|
|
localFeatures: localFeatures,
|
2017-02-16 15:39:38 +03:00
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
queries: make(chan interface{}),
|
|
|
|
quit: make(chan struct{}),
|
2016-01-17 06:07:44 +03:00
|
|
|
}
|
|
|
|
|
2016-09-19 22:03:38 +03:00
|
|
|
// If the debug HTLC flag is on, then we invoice a "master debug"
|
|
|
|
// invoice which all outgoing payments will be sent and all incoming
|
2017-01-13 08:01:50 +03:00
|
|
|
// HTLCs with the debug R-Hash immediately settled.
|
2016-09-19 22:03:38 +03:00
|
|
|
if cfg.DebugHTLC {
|
|
|
|
kiloCoin := btcutil.Amount(btcutil.SatoshiPerBitcoin * 1000)
|
|
|
|
s.invoices.AddDebugInvoice(kiloCoin, *debugPre)
|
|
|
|
srvrLog.Debugf("Debug HTLC invoice inserted, preimage=%x, hash=%x",
|
|
|
|
debugPre[:], debugHash[:])
|
|
|
|
}
|
2016-07-13 03:14:07 +03:00
|
|
|
|
2017-05-02 23:04:58 +03:00
|
|
|
s.htlcSwitch = htlcswitch.New(htlcswitch.Config{
|
|
|
|
LocalChannelClose: func(pubKey []byte,
|
|
|
|
request *htlcswitch.ChanClose) {
|
|
|
|
s.peersMtx.RLock()
|
|
|
|
peer, ok := s.peersByPub[string(pubKey)]
|
|
|
|
s.peersMtx.RUnlock()
|
|
|
|
|
|
|
|
if !ok {
|
|
|
|
srvrLog.Error("unable to close channel, peer"+
|
|
|
|
" with %v id can't be found", pubKey)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
peer.localCloseChanReqs <- request
|
|
|
|
},
|
|
|
|
})
|
|
|
|
|
2017-02-23 03:24:22 +03:00
|
|
|
// If external IP addresses have been specified, add those to the list
|
|
|
|
// of this server's addresses.
|
2017-03-30 02:55:28 +03:00
|
|
|
selfAddrs := make([]net.Addr, 0, len(cfg.ExternalIPs))
|
2017-02-23 03:24:22 +03:00
|
|
|
for _, ip := range cfg.ExternalIPs {
|
2017-04-14 00:41:54 +03:00
|
|
|
var addr string
|
|
|
|
_, _, err = net.SplitHostPort(ip)
|
|
|
|
if err != nil {
|
|
|
|
addr = net.JoinHostPort(ip, strconv.Itoa(defaultPeerPort))
|
|
|
|
} else {
|
|
|
|
addr = ip
|
|
|
|
}
|
|
|
|
|
|
|
|
lnAddr, err := net.ResolveTCPAddr("tcp", addr)
|
2017-02-23 03:24:22 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-04-14 00:41:54 +03:00
|
|
|
|
|
|
|
selfAddrs = append(selfAddrs, lnAddr)
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2016-10-05 23:47:02 +03:00
|
|
|
|
2017-03-27 20:25:44 +03:00
|
|
|
chanGraph := chanDB.ChannelGraph()
|
2017-03-28 22:08:14 +03:00
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
// TODO(roasbeef): make alias configurable
|
2017-04-20 00:59:16 +03:00
|
|
|
alias := lnwire.NewAlias(hex.EncodeToString(serializedPubKey[:10]))
|
2016-12-27 08:42:23 +03:00
|
|
|
self := &channeldb.LightningNode{
|
|
|
|
LastUpdate: time.Now(),
|
2017-02-17 12:29:23 +03:00
|
|
|
Addresses: selfAddrs,
|
2016-12-27 08:42:23 +03:00
|
|
|
PubKey: privKey.PubKey(),
|
2017-04-24 05:21:32 +03:00
|
|
|
Alias: alias.String(),
|
|
|
|
Features: globalFeatures,
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2017-03-27 20:25:44 +03:00
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
// If our information has changed since our last boot, then we'll
|
|
|
|
// re-sign our node announcement so a fresh authenticated version of it
|
|
|
|
// can be propagated throughout the network upon startup.
|
|
|
|
// TODO(roasbeef): don't always set timestamp above to _now.
|
2017-04-14 21:17:41 +03:00
|
|
|
self.AuthSig, err = discovery.SignAnnouncement(s.nodeSigner,
|
|
|
|
s.identityPriv.PubKey(),
|
2017-03-27 20:25:44 +03:00
|
|
|
&lnwire.NodeAnnouncement{
|
|
|
|
Timestamp: uint32(self.LastUpdate.Unix()),
|
|
|
|
Addresses: self.Addresses,
|
|
|
|
NodeID: self.PubKey,
|
|
|
|
Alias: alias,
|
|
|
|
Features: self.Features,
|
2017-04-01 15:33:17 +03:00
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
2017-03-27 20:25:44 +03:00
|
|
|
return nil, fmt.Errorf("unable to generate signature for "+
|
|
|
|
"self node announcement: %v", err)
|
|
|
|
}
|
2016-12-27 08:42:23 +03:00
|
|
|
if err := chanGraph.SetSourceNode(self); err != nil {
|
2017-03-27 20:25:44 +03:00
|
|
|
return nil, fmt.Errorf("can't set self node: %v", err)
|
2016-10-05 23:47:02 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
s.chanRouter, err = routing.New(routing.Config{
|
2017-05-11 03:34:15 +03:00
|
|
|
Graph: chanGraph,
|
2017-05-18 21:55:25 +03:00
|
|
|
Chain: cc.chainIO,
|
|
|
|
ChainView: cc.chainView,
|
2017-02-02 05:29:46 +03:00
|
|
|
SendToSwitch: func(firstHop *btcec.PublicKey,
|
2017-02-21 10:57:43 +03:00
|
|
|
htlcAdd *lnwire.UpdateAddHTLC) ([32]byte, error) {
|
2017-02-02 05:29:46 +03:00
|
|
|
firstHopPub := firstHop.SerializeCompressed()
|
2017-05-02 23:04:58 +03:00
|
|
|
return s.htlcSwitch.SendHTLC(firstHopPub, htlcAdd)
|
2017-02-02 05:29:46 +03:00
|
|
|
},
|
2016-12-27 08:42:23 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
2017-03-27 20:25:44 +03:00
|
|
|
return nil, fmt.Errorf("can't create router: %v", err)
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2016-07-15 14:02:59 +03:00
|
|
|
|
2017-03-20 00:06:10 +03:00
|
|
|
s.discoverSrv, err = discovery.New(discovery.Config{
|
2017-03-28 22:08:14 +03:00
|
|
|
Broadcast: s.broadcastMessage,
|
2017-05-18 21:55:25 +03:00
|
|
|
Notifier: s.cc.chainNotifier,
|
2017-03-28 22:08:14 +03:00
|
|
|
Router: s.chanRouter,
|
|
|
|
SendToPeer: s.sendToPeer,
|
|
|
|
TrickleDelay: time.Millisecond * 300,
|
|
|
|
ProofMatureDelta: 0,
|
2017-05-05 20:17:31 +03:00
|
|
|
DB: chanDB,
|
2017-03-20 00:06:10 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-05-18 21:55:25 +03:00
|
|
|
s.breachArbiter = newBreachArbiter(cc.wallet, chanDB, cc.chainNotifier,
|
|
|
|
s.htlcSwitch, s.cc.chainIO, s.cc.feeEstimator)
|
2017-01-13 06:40:38 +03:00
|
|
|
|
2016-11-29 06:43:57 +03:00
|
|
|
// TODO(roasbeef): introduce closure and config system to decouple the
|
|
|
|
// initialization above ^
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
// Create the connection manager which will be responsible for
|
|
|
|
// maintaining persistent outbound connections and also accepting new
|
|
|
|
// incoming connections
|
|
|
|
cmgr, err := connmgr.New(&connmgr.Config{
|
|
|
|
Listeners: listeners,
|
|
|
|
OnAccept: s.inboundPeerConnected,
|
|
|
|
RetryDuration: time.Second * 5,
|
|
|
|
TargetOutbound: 100,
|
|
|
|
GetNewAddress: nil,
|
|
|
|
Dial: noiseDial(s.identityPriv),
|
|
|
|
OnConnection: s.outboundPeerConnected,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.connMgr = cmgr
|
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start starts the main daemon server, all requested listeners, and any helper
|
|
|
|
// goroutines.
|
|
|
|
func (s *server) Start() error {
|
|
|
|
// Already running?
|
|
|
|
if atomic.AddInt32(&s.started, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start the notification server. This is used so channel management
|
|
|
|
// goroutines can be notified when a funding transaction reaches a
|
|
|
|
// sufficient number of confirmations, or when the input for the
|
|
|
|
// funding transaction is spent in an attempt at an uncooperative close
|
|
|
|
// by the counterparty.
|
2017-05-18 21:55:25 +03:00
|
|
|
if err := s.cc.chainNotifier.Start(); err != nil {
|
2017-04-24 05:21:32 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
if err := s.htlcSwitch.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := s.utxoNursery.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := s.breachArbiter.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := s.discoverSrv.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := s.chanRouter.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
s.wg.Add(1)
|
|
|
|
go s.queryHandler()
|
|
|
|
|
|
|
|
// With all the relevant sub-systems started, we'll now atetmpt to
|
|
|
|
// stasblish persistent connections to our direct channel collaborators
|
|
|
|
// within the network.
|
|
|
|
if err := s.establishPersistentConnections(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop gracefully shutsdown the main daemon server. This function will signal
|
|
|
|
// any active goroutines, or helper objects to exit, then blocks until they've
|
|
|
|
// all successfully exited. Additionally, any/all listeners are closed.
|
|
|
|
func (s *server) Stop() error {
|
|
|
|
// Bail if we're already shutting down.
|
|
|
|
if atomic.AddInt32(&s.shutdown, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown the wallet, funding manager, and the rpc server.
|
2017-05-18 21:55:25 +03:00
|
|
|
s.cc.chainNotifier.Stop()
|
2017-04-24 05:21:32 +03:00
|
|
|
s.chanRouter.Stop()
|
|
|
|
s.htlcSwitch.Stop()
|
|
|
|
s.utxoNursery.Stop()
|
|
|
|
s.breachArbiter.Stop()
|
|
|
|
s.discoverSrv.Stop()
|
2017-05-18 21:55:25 +03:00
|
|
|
s.cc.wallet.Shutdown()
|
|
|
|
s.cc.chainView.Stop()
|
2017-04-24 05:21:32 +03:00
|
|
|
|
|
|
|
// Signal all the lingering goroutines to quit.
|
|
|
|
close(s.quit)
|
|
|
|
s.wg.Wait()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// establishPersistentConnections attempts to establish persistent connections
|
|
|
|
// to all our direct channel collaborators. In order to promote liveness of
|
|
|
|
// our active channels, we instruct the connection manager to attempt to
|
|
|
|
// establish and maintain persistent connections to all our direct channel
|
|
|
|
// counterparties.
|
|
|
|
func (s *server) establishPersistentConnections() error {
|
2017-03-25 11:40:33 +03:00
|
|
|
// nodeAddrsMap stores the combination of node public keys and
|
|
|
|
// addresses that we'll attempt to reconnect to. PubKey strings are
|
|
|
|
// used as keys since other PubKey forms can't be compared.
|
|
|
|
nodeAddrsMap := map[string]*nodeAddresses{}
|
|
|
|
|
|
|
|
// Iterate through the list of LinkNodes to find addresses we should
|
|
|
|
// attempt to connect to based on our set of previous connections. Set
|
|
|
|
// the reconnection port to the default peer port.
|
2016-12-15 05:11:31 +03:00
|
|
|
linkNodes, err := s.chanDB.FetchAllLinkNodes()
|
|
|
|
if err != nil && err != channeldb.ErrLinkNodesNotFound {
|
2017-04-24 05:21:32 +03:00
|
|
|
return err
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
for _, node := range linkNodes {
|
2017-03-25 11:40:33 +03:00
|
|
|
for _, address := range node.Addresses {
|
|
|
|
if address.Port == 0 {
|
|
|
|
address.Port = defaultPeerPort
|
|
|
|
}
|
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
pubStr := string(node.IdentityPub.SerializeCompressed())
|
2017-05-02 22:31:35 +03:00
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
nodeAddrs := &nodeAddresses{
|
|
|
|
pubKey: node.IdentityPub,
|
|
|
|
addresses: node.Addresses,
|
|
|
|
}
|
|
|
|
nodeAddrsMap[pubStr] = nodeAddrs
|
|
|
|
}
|
2017-02-25 02:46:02 +03:00
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
// After checking our previous connections for addresses to connect to,
|
|
|
|
// iterate through the nodes in our channel graph to find addresses
|
|
|
|
// that have been added via NodeAnnouncement messages.
|
2017-04-24 05:21:32 +03:00
|
|
|
chanGraph := s.chanDB.ChannelGraph()
|
2017-03-25 11:40:33 +03:00
|
|
|
sourceNode, err := chanGraph.SourceNode()
|
|
|
|
if err != nil {
|
2017-04-24 05:21:32 +03:00
|
|
|
return err
|
2017-03-25 11:40:33 +03:00
|
|
|
}
|
2017-05-05 01:24:45 +03:00
|
|
|
// TODO(roasbeef): instead iterate over link nodes and query graph for
|
|
|
|
// each of the nodes.
|
2017-04-14 23:17:51 +03:00
|
|
|
err = sourceNode.ForEachChannel(nil, func(_ *bolt.Tx,
|
|
|
|
_ *channeldb.ChannelEdgeInfo, policy *channeldb.ChannelEdgePolicy) error {
|
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
pubStr := string(policy.Node.PubKey.SerializeCompressed())
|
|
|
|
|
|
|
|
// Add addresses from channel graph/NodeAnnouncements to the
|
|
|
|
// list of addresses we'll connect to. If there are duplicates
|
|
|
|
// that have different ports specified, the port from the
|
|
|
|
// channel graph should supersede the port from the link node.
|
|
|
|
var addrs []*net.TCPAddr
|
|
|
|
linkNodeAddrs, ok := nodeAddrsMap[pubStr]
|
|
|
|
if ok {
|
|
|
|
for _, lnAddress := range linkNodeAddrs.addresses {
|
|
|
|
var addrMatched bool
|
|
|
|
for _, polAddress := range policy.Node.Addresses {
|
2017-04-17 01:39:57 +03:00
|
|
|
polTCPAddr, ok := polAddress.(*net.TCPAddr)
|
2017-03-25 11:40:33 +03:00
|
|
|
if ok && polTCPAddr.IP.Equal(lnAddress.IP) {
|
|
|
|
addrMatched = true
|
|
|
|
addrs = append(addrs, polTCPAddr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !addrMatched {
|
|
|
|
addrs = append(addrs, lnAddress)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for _, addr := range policy.Node.Addresses {
|
|
|
|
polTCPAddr, ok := addr.(*net.TCPAddr)
|
|
|
|
if ok {
|
|
|
|
addrs = append(addrs, polTCPAddr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeAddrsMap[pubStr] = &nodeAddresses{
|
|
|
|
pubKey: policy.Node.PubKey,
|
|
|
|
addresses: addrs,
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
|
2017-04-24 05:21:32 +03:00
|
|
|
return err
|
2017-03-25 11:40:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate through the combined list of addresses from prior links and
|
|
|
|
// node announcements and attempt to reconnect to each node.
|
|
|
|
for pubStr, nodeAddr := range nodeAddrsMap {
|
2017-04-24 05:21:32 +03:00
|
|
|
// Add this peer to the set of peers we should maintain a
|
|
|
|
// persistent connection with.
|
|
|
|
s.persistentPeers[pubStr] = struct{}{}
|
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
for _, address := range nodeAddr.addresses {
|
|
|
|
// Create a wrapper address which couples the IP and
|
|
|
|
// the pubkey so the brontide authenticated connection
|
|
|
|
// can be established.
|
2017-02-25 02:46:02 +03:00
|
|
|
lnAddr := &lnwire.NetAddress{
|
2017-03-25 11:40:33 +03:00
|
|
|
IdentityKey: nodeAddr.pubKey,
|
2017-02-25 02:46:02 +03:00
|
|
|
Address: address,
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
srvrLog.Debugf("Attempting persistent connection to "+
|
|
|
|
"channel peer %v", lnAddr)
|
2017-04-01 15:33:17 +03:00
|
|
|
|
|
|
|
// Send the persistent connection request to the
|
|
|
|
// connection manager, saving the request itself so we
|
|
|
|
// can cancel/restart the process as needed.
|
2017-02-25 02:46:02 +03:00
|
|
|
connReq := &connmgr.ConnReq{
|
|
|
|
Addr: lnAddr,
|
|
|
|
Permanent: true,
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
|
2017-04-24 05:21:32 +03:00
|
|
|
s.pendingConnMtx.Lock()
|
2017-04-01 15:33:17 +03:00
|
|
|
s.persistentConnReqs[pubStr] = append(s.persistentConnReqs[pubStr],
|
|
|
|
connReq)
|
2017-04-24 05:21:32 +03:00
|
|
|
s.pendingConnMtx.Unlock()
|
|
|
|
|
2017-02-25 02:46:02 +03:00
|
|
|
go s.connMgr.Connect(connReq)
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitForShutdown blocks all goroutines have been stopped.
|
|
|
|
func (s *server) WaitForShutdown() {
|
|
|
|
s.wg.Wait()
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// broadcastReq is a message sent to the server by a related subsystem when it
|
2016-12-27 08:42:23 +03:00
|
|
|
// wishes to broadcast one or more messages to all connected peers. Thi
|
|
|
|
type broadcastReq struct {
|
|
|
|
ignore *btcec.PublicKey
|
|
|
|
msgs []lnwire.Message
|
|
|
|
|
|
|
|
errChan chan error // MUST be buffered.
|
|
|
|
}
|
|
|
|
|
|
|
|
// broadcastMessage sends a request to the server to broadcast a set of
|
|
|
|
// messages to all peers other than the one specified by the `skip` parameter.
|
|
|
|
func (s *server) broadcastMessage(skip *btcec.PublicKey, msgs ...lnwire.Message) error {
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
|
|
|
|
msgsToSend := make([]lnwire.Message, 0, len(msgs))
|
|
|
|
msgsToSend = append(msgsToSend, msgs...)
|
|
|
|
broadcastReq := &broadcastReq{
|
|
|
|
ignore: skip,
|
|
|
|
msgs: msgsToSend,
|
|
|
|
errChan: errChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case s.broadcastRequests <- broadcastReq:
|
|
|
|
case <-s.quit:
|
|
|
|
return errors.New("server shutting down")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
return err
|
|
|
|
case <-s.quit:
|
|
|
|
return errors.New("server shutting down")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// sendReq is message sent to the server by a related subsystem which it
|
2016-12-27 08:42:23 +03:00
|
|
|
// wishes to send a set of messages to a specified peer.
|
|
|
|
type sendReq struct {
|
|
|
|
target *btcec.PublicKey
|
|
|
|
msgs []lnwire.Message
|
|
|
|
|
|
|
|
errChan chan error
|
|
|
|
}
|
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
type nodeAddresses struct {
|
|
|
|
pubKey *btcec.PublicKey
|
|
|
|
addresses []*net.TCPAddr
|
|
|
|
}
|
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
// sendToPeer send a message to the server telling it to send the specific set
|
|
|
|
// of message to a particular peer. If the peer connect be found, then this
|
|
|
|
// method will return a non-nil error.
|
|
|
|
func (s *server) sendToPeer(target *btcec.PublicKey, msgs ...lnwire.Message) error {
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
|
|
|
|
msgsToSend := make([]lnwire.Message, 0, len(msgs))
|
|
|
|
msgsToSend = append(msgsToSend, msgs...)
|
|
|
|
sMsg := &sendReq{
|
|
|
|
target: target,
|
|
|
|
msgs: msgsToSend,
|
|
|
|
errChan: errChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case s.sendRequests <- sMsg:
|
|
|
|
case <-s.quit:
|
|
|
|
return errors.New("server shutting down")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
return err
|
|
|
|
case <-s.quit:
|
|
|
|
return errors.New("server shutting down")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-13 06:40:38 +03:00
|
|
|
// findPeer will return the peer that corresponds to the passed in public key.
|
|
|
|
// This function is used by the funding manager, allowing it to update the
|
|
|
|
// daemon's local representation of the remote peer.
|
|
|
|
func (s *server) findPeer(peerKey *btcec.PublicKey) (*peer, error) {
|
|
|
|
serializedIDKey := string(peerKey.SerializeCompressed())
|
|
|
|
|
|
|
|
s.peersMtx.RLock()
|
|
|
|
peer := s.peersByPub[serializedIDKey]
|
|
|
|
s.peersMtx.RUnlock()
|
|
|
|
|
|
|
|
if peer == nil {
|
|
|
|
return nil, errors.New("Peer not found. Pubkey: " +
|
|
|
|
string(peerKey.SerializeCompressed()))
|
|
|
|
}
|
|
|
|
|
|
|
|
return peer, nil
|
|
|
|
}
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// peerTerminationWatcher waits until a peer has been disconnected, and then
|
|
|
|
// cleans up all resources allocated to the peer, notifies relevant sub-systems
|
|
|
|
// of its demise, and finally handles re-connecting to the peer if it's
|
|
|
|
// persistent.
|
|
|
|
//
|
|
|
|
// NOTE: This MUST be launched as a goroutine.
|
|
|
|
func (s *server) peerTerminationWatcher(p *peer) {
|
|
|
|
p.WaitForDisconnect()
|
|
|
|
|
|
|
|
srvrLog.Debugf("Peer %v has been disconnected", p)
|
|
|
|
|
2017-05-02 23:04:58 +03:00
|
|
|
// Tell the switch to remove all links associated with this peer.
|
2017-05-06 02:02:03 +03:00
|
|
|
// Passing nil as the target link indicates that all links associated
|
|
|
|
// with this interface should be closed.
|
2017-05-02 23:04:58 +03:00
|
|
|
hop := htlcswitch.NewHopID(p.addr.IdentityKey.SerializeCompressed())
|
|
|
|
links, err := p.server.htlcSwitch.GetLinks(hop)
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to get channel links: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, link := range links {
|
|
|
|
err := p.server.htlcSwitch.RemoveLink(link.ChanID())
|
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to remove channel link: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
|
|
|
// Send the peer to be garbage collected by the server.
|
|
|
|
p.server.donePeers <- p
|
|
|
|
|
|
|
|
// If this peer had an active persistent connection request, then we
|
|
|
|
// can remove this as we manually decide below if we should attempt to
|
|
|
|
// re-connect.
|
|
|
|
if p.connReq != nil {
|
|
|
|
s.connMgr.Remove(p.connReq.ID())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, check to see if this is a persistent peer or not.
|
|
|
|
pubStr := string(p.addr.IdentityKey.SerializeCompressed())
|
2017-05-06 02:02:03 +03:00
|
|
|
s.pendingConnMtx.RLock()
|
|
|
|
_, ok := s.persistentPeers[pubStr]
|
|
|
|
s.pendingConnMtx.RUnlock()
|
|
|
|
if ok {
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
srvrLog.Debugf("Attempting to re-establish persistent "+
|
|
|
|
"connection to peer %v", p)
|
|
|
|
|
|
|
|
// If so, then we'll attempt to re-establish a persistent
|
|
|
|
// connection to the peer.
|
2017-05-05 01:24:45 +03:00
|
|
|
// TODO(roasbeef): look up latest info for peer in database
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
connReq := &connmgr.ConnReq{
|
|
|
|
Addr: p.addr,
|
|
|
|
Permanent: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
s.pendingConnMtx.Lock()
|
2017-05-05 01:24:45 +03:00
|
|
|
// We'll only need to re-launch a connection requests if one
|
|
|
|
// isn't already currently pending.
|
|
|
|
if _, ok := s.persistentConnReqs[pubStr]; ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll launch a new connection requests in order
|
|
|
|
// to attempt to maintain a persistent connection with this
|
|
|
|
// peer.
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
s.persistentConnReqs[pubStr] = append(s.persistentConnReqs[pubStr],
|
|
|
|
connReq)
|
|
|
|
s.pendingConnMtx.Unlock()
|
|
|
|
|
|
|
|
go s.connMgr.Connect(connReq)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
// peerConnected is a function that handles initialization a newly connected
|
|
|
|
// peer by adding it to the server's global list of all active peers, and
|
|
|
|
// starting all the goroutines the peer needs to function properly.
|
|
|
|
func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, inbound bool) {
|
|
|
|
brontideConn := conn.(*brontide.Conn)
|
|
|
|
peerAddr := &lnwire.NetAddress{
|
|
|
|
IdentityKey: brontideConn.RemotePub(),
|
|
|
|
Address: conn.RemoteAddr().(*net.TCPAddr),
|
|
|
|
ChainNet: activeNetParams.Net,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we've established a connection, create a peer, and
|
|
|
|
// it to the set of currently active peers.
|
2017-02-22 12:10:07 +03:00
|
|
|
p, err := newPeer(conn, connReq, s, peerAddr, inbound)
|
2016-12-15 05:11:31 +03:00
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to create peer %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(roasbeef): update IP address for link-node
|
|
|
|
// * also mark last-seen, do it one single transaction?
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// Attempt to start the peer, if we're unable to do so, then disconnect
|
|
|
|
// this peer.
|
2017-02-22 12:10:07 +03:00
|
|
|
if err := p.Start(); err != nil {
|
2017-02-21 12:35:21 +03:00
|
|
|
srvrLog.Errorf("unable to start peer: %v", err)
|
2017-02-22 12:10:07 +03:00
|
|
|
p.Disconnect()
|
2017-02-16 15:39:38 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-02-22 12:10:07 +03:00
|
|
|
s.newPeers <- p
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// shouldDropConnection determines if our local connection to a remote peer
|
|
|
|
// should be dropped in the case of concurrent connection establishment. In
|
|
|
|
// order to deterministically decide which connection should be dropped, we'll
|
|
|
|
// utilize the ordering of the local and remote public key. If we didn't use
|
|
|
|
// such a tie breaker, then we risk _both_ connections erroneously being
|
|
|
|
// dropped.
|
|
|
|
func shouldDropLocalConnection(local, remote *btcec.PublicKey) bool {
|
|
|
|
localPubBytes := local.SerializeCompressed()
|
|
|
|
remotePubPbytes := remote.SerializeCompressed()
|
|
|
|
|
|
|
|
// The connection that comes from the node with a "smaller" pubkey should
|
|
|
|
// be kept. Therefore, if our pubkey is "greater" than theirs, we should
|
|
|
|
// drop our established connection.
|
|
|
|
return bytes.Compare(localPubBytes, remotePubPbytes) > 0
|
|
|
|
}
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
// inboundPeerConnected initializes a new peer in response to a new inbound
|
|
|
|
// connection.
|
|
|
|
func (s *server) inboundPeerConnected(conn net.Conn) {
|
|
|
|
s.peersMtx.Lock()
|
|
|
|
defer s.peersMtx.Unlock()
|
|
|
|
|
2017-05-11 03:42:53 +03:00
|
|
|
// If we already have an inbound connection to this peer, then ignore
|
|
|
|
// this new connection.
|
|
|
|
nodePub := conn.(*brontide.Conn).RemotePub()
|
|
|
|
pubStr := string(nodePub.SerializeCompressed())
|
|
|
|
if _, ok := s.inboundPeers[pubStr]; ok {
|
|
|
|
srvrLog.Debugf("Ignoring duplicate inbound connection")
|
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-14 00:41:54 +03:00
|
|
|
srvrLog.Infof("New inbound connection from %v", conn.RemoteAddr())
|
2016-12-15 05:11:31 +03:00
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
localPub := s.identityPriv.PubKey()
|
2016-12-15 05:11:31 +03:00
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// Check to see if we should drop our connection, if not, then we'll
|
|
|
|
// close out this connection with the remote peer. This
|
|
|
|
// prevents us from having duplicate connections, or none.
|
|
|
|
if connectedPeer, ok := s.peersByPub[pubStr]; ok {
|
|
|
|
// If the connection we've already established should be kept,
|
|
|
|
// then we'll close out this connection s.t there's only a
|
|
|
|
// single connection between us.
|
|
|
|
if !shouldDropLocalConnection(localPub, nodePub) {
|
|
|
|
srvrLog.Warnf("Received inbound connection from "+
|
|
|
|
"peer %x, but already connected, dropping conn",
|
|
|
|
nodePub.SerializeCompressed())
|
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, if we should drop the connection, then we'll
|
|
|
|
// disconnect our already connected peer, and also send the
|
|
|
|
// peer to the peer garbage collection goroutine.
|
|
|
|
srvrLog.Debugf("Disconnecting stale connection to %v",
|
|
|
|
connectedPeer)
|
|
|
|
connectedPeer.Disconnect()
|
|
|
|
s.donePeers <- connectedPeer
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// Next, check to see if we have any outstanding persistent connection
|
|
|
|
// requests to this peer. If so, then we'll remove all of these
|
|
|
|
// connection requests, and also delete the entry from the map.
|
|
|
|
s.pendingConnMtx.Lock()
|
2017-02-25 02:46:02 +03:00
|
|
|
if connReqs, ok := s.persistentConnReqs[pubStr]; ok {
|
|
|
|
for _, connReq := range connReqs {
|
|
|
|
s.connMgr.Remove(connReq.ID())
|
|
|
|
}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
delete(s.persistentConnReqs, pubStr)
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
s.pendingConnMtx.Unlock()
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-04-12 07:14:38 +03:00
|
|
|
go s.peerConnected(conn, nil, false)
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// outboundPeerConnected initializes a new peer in response to a new outbound
|
|
|
|
// connection.
|
|
|
|
func (s *server) outboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn) {
|
|
|
|
s.peersMtx.Lock()
|
|
|
|
defer s.peersMtx.Unlock()
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
localPub := s.identityPriv.PubKey()
|
2016-12-15 05:11:31 +03:00
|
|
|
nodePub := conn.(*brontide.Conn).RemotePub()
|
|
|
|
pubStr := string(nodePub.SerializeCompressed())
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
|
|
|
// If we already have an outbound connection to this peer, then ignore
|
|
|
|
// this new connection.
|
|
|
|
if _, ok := s.outboundPeers[pubStr]; ok {
|
|
|
|
srvrLog.Debugf("Ignoring duplicate outbound connection")
|
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := s.persistentConnReqs[pubStr]; !ok && connReq != nil {
|
|
|
|
srvrLog.Debugf("Ignoring cancelled outbound connection")
|
2017-01-24 07:32:49 +03:00
|
|
|
conn.Close()
|
2016-12-15 05:11:31 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
srvrLog.Infof("Established connection to: %v", conn.RemoteAddr())
|
|
|
|
|
|
|
|
// As we've just established an outbound connection to this peer, we'll
|
|
|
|
// cancel all other persistent connection requests and eliminate the
|
|
|
|
// entry for this peer from the map.
|
|
|
|
s.pendingConnMtx.Lock()
|
|
|
|
if connReqs, ok := s.persistentConnReqs[pubStr]; ok {
|
|
|
|
for _, pConnReq := range connReqs {
|
|
|
|
if pConnReq.ID() != connReq.ID() {
|
|
|
|
s.connMgr.Remove(pConnReq.ID())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
delete(s.persistentConnReqs, pubStr)
|
|
|
|
}
|
|
|
|
s.pendingConnMtx.Unlock()
|
|
|
|
|
|
|
|
// If we already have an inbound connection from this peer, then we'll
|
|
|
|
// check to see _which_ of our connections should be dropped.
|
|
|
|
if connectedPeer, ok := s.peersByPub[pubStr]; ok {
|
|
|
|
// If our (this) connection should be dropped, then we'll do
|
|
|
|
// so, in order to ensure we don't have any duplicate
|
|
|
|
// connections.
|
|
|
|
if shouldDropLocalConnection(localPub, nodePub) {
|
|
|
|
srvrLog.Warnf("Established outbound connection to "+
|
|
|
|
"peer %x, but already connected, dropping conn",
|
|
|
|
nodePub.SerializeCompressed())
|
|
|
|
s.connMgr.Remove(connReq.ID())
|
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, _their_ connection should be dropped. So we'll
|
|
|
|
// disconnect the peer and send the now obsolete peer to the
|
|
|
|
// server for garbage collection.
|
|
|
|
srvrLog.Debugf("Disconnecting stale connection to %v",
|
|
|
|
connectedPeer)
|
|
|
|
connectedPeer.Disconnect()
|
|
|
|
s.donePeers <- connectedPeer
|
|
|
|
}
|
|
|
|
|
2017-04-12 07:14:38 +03:00
|
|
|
go s.peerConnected(conn, connReq, true)
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// addPeer adds the passed peer to the server's global state of all active
|
|
|
|
// peers.
|
|
|
|
func (s *server) addPeer(p *peer) {
|
|
|
|
if p == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore new peers if we're shutting down.
|
|
|
|
if atomic.LoadInt32(&s.shutdown) != 0 {
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
p.Disconnect()
|
2016-06-21 21:52:09 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
// Track the new peer in our indexes so we can quickly look it up either
|
|
|
|
// according to its public key, or it's peer ID.
|
|
|
|
// TODO(roasbeef): pipe all requests through to the
|
|
|
|
// queryHandler/peerManager
|
|
|
|
s.peersMtx.Lock()
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
|
|
|
|
pubStr := string(p.addr.IdentityKey.SerializeCompressed())
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
s.peersByID[p.id] = p
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
s.peersByPub[pubStr] = p
|
|
|
|
|
|
|
|
if p.inbound {
|
|
|
|
s.inboundPeers[pubStr] = p
|
|
|
|
} else {
|
|
|
|
s.outboundPeers[pubStr] = p
|
|
|
|
}
|
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
s.peersMtx.Unlock()
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
// Launch a goroutine to watch for the termination of this peer so we
|
|
|
|
// can ensure all resources are properly cleaned up and if need be
|
|
|
|
// connections are re-established.
|
|
|
|
go s.peerTerminationWatcher(p)
|
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
// Once the peer has been added to our indexes, send a message to the
|
|
|
|
// channel router so we can synchronize our view of the channel graph
|
|
|
|
// with this new peer.
|
2017-03-20 00:06:10 +03:00
|
|
|
go s.discoverSrv.SynchronizeNode(p.addr.IdentityKey)
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// removePeer removes the passed peer from the server's state of all active
|
|
|
|
// peers.
|
|
|
|
func (s *server) removePeer(p *peer) {
|
2016-12-15 05:11:31 +03:00
|
|
|
s.peersMtx.Lock()
|
|
|
|
defer s.peersMtx.Unlock()
|
|
|
|
|
2016-07-14 02:34:23 +03:00
|
|
|
srvrLog.Debugf("removing peer %v", p)
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
if p == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-02-07 02:04:52 +03:00
|
|
|
// As the peer is now finished, ensure that the TCP connection is
|
|
|
|
// closed and all of its related goroutines have exited.
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
p.Disconnect()
|
2017-02-07 02:04:52 +03:00
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Ignore deleting peers if we're shutting down.
|
|
|
|
if atomic.LoadInt32(&s.shutdown) != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
pubStr := string(p.addr.IdentityKey.SerializeCompressed())
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
delete(s.peersByID, p.id)
|
server+peer: re-write persistent connection handling
The prior methods we employed to handle persistent connections could
result in the following situation: both peers come up, and
_concurrently_ establish connection to each other. With the prior
logic, at this point, both connections would be terminated as each peer
would go to kill the connection of the other peer. In order to resolve
this issue in this commit, we’ve re-written the way we handle
persistent connections.
The eliminate the issue described above, in the case of concurrent peer
connection, we now use a deterministic method to decide _which_
connection should be closed. The following rule governs which
connection should be closed: the connection of the peer with the
“smaller” public key should be closed. With this rule we now avoid the
issue described above.
Additionally, each peer now gains a peerTerminationWatcher which waits
until a peer has been disconnected, and then cleans up all resources
allocated to the peer, notifies relevant sub-systems of its demise, and
finally handles re-connecting to the peer if it's persistent. This
replaces the goroutine that was spawned in the old version of
peer.Disconnect().
2017-04-24 05:38:34 +03:00
|
|
|
delete(s.peersByPub, pubStr)
|
|
|
|
|
|
|
|
if p.inbound {
|
|
|
|
delete(s.inboundPeers, pubStr)
|
|
|
|
} else {
|
|
|
|
delete(s.outboundPeers, pubStr)
|
|
|
|
}
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// connectPeerMsg is a message requesting the server to open a connection to a
|
|
|
|
// particular peer. This message also houses an error channel which will be
|
|
|
|
// used to report success/failure.
|
2016-01-17 06:09:02 +03:00
|
|
|
type connectPeerMsg struct {
|
2017-01-10 06:08:52 +03:00
|
|
|
addr *lnwire.NetAddress
|
|
|
|
persistent bool
|
|
|
|
|
|
|
|
err chan error
|
2016-01-17 06:09:02 +03:00
|
|
|
}
|
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// disconnectPeerMsg is a message requesting the server to disconnect from an
|
|
|
|
// active peer.
|
2017-05-02 22:31:35 +03:00
|
|
|
type disconnectPeerMsg struct {
|
2017-05-06 02:02:03 +03:00
|
|
|
pubKey *btcec.PublicKey
|
2017-05-02 22:31:35 +03:00
|
|
|
|
|
|
|
err chan error
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// openChanReq is a message sent to the server in order to request the
|
2016-09-14 01:35:41 +03:00
|
|
|
// initiation of a channel funding workflow to the peer with either the specified
|
|
|
|
// relative peer ID, or a global lightning ID.
|
2016-06-21 22:32:32 +03:00
|
|
|
type openChanReq struct {
|
2016-09-14 01:35:41 +03:00
|
|
|
targetPeerID int32
|
2016-10-28 05:49:10 +03:00
|
|
|
targetPubkey *btcec.PublicKey
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
// TODO(roasbeef): make enums in lnwire
|
|
|
|
channelType uint8
|
|
|
|
coinType uint64
|
|
|
|
|
|
|
|
localFundingAmt btcutil.Amount
|
|
|
|
remoteFundingAmt btcutil.Amount
|
|
|
|
|
2017-01-10 06:05:11 +03:00
|
|
|
pushAmt btcutil.Amount
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
numConfs uint32
|
|
|
|
|
2016-08-31 02:52:53 +03:00
|
|
|
updates chan *lnrpc.OpenStatusUpdate
|
|
|
|
err chan error
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2016-07-27 21:32:27 +03:00
|
|
|
// queryHandler handles any requests to modify the server's internal state of
|
2016-07-14 02:34:23 +03:00
|
|
|
// all active peers, or query/mutate the server's global state. Additionally,
|
|
|
|
// any queries directed at peers will be handled by this goroutine.
|
2016-06-21 21:52:09 +03:00
|
|
|
//
|
|
|
|
// NOTE: This MUST be run as a goroutine.
|
2016-01-14 08:41:46 +03:00
|
|
|
func (s *server) queryHandler() {
|
2016-12-15 05:11:31 +03:00
|
|
|
go s.connMgr.Start()
|
|
|
|
|
2016-01-14 08:41:46 +03:00
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
2016-07-14 02:34:23 +03:00
|
|
|
// New peers.
|
|
|
|
case p := <-s.newPeers:
|
|
|
|
s.addPeer(p)
|
|
|
|
|
|
|
|
// Finished peers.
|
|
|
|
case p := <-s.donePeers:
|
|
|
|
s.removePeer(p)
|
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
case bMsg := <-s.broadcastRequests:
|
|
|
|
ignore := bMsg.ignore
|
|
|
|
|
|
|
|
srvrLog.Debugf("Broadcasting %v messages", len(bMsg.msgs))
|
|
|
|
|
2017-01-25 04:06:23 +03:00
|
|
|
// Launch a new goroutine to handle the broadcast
|
|
|
|
// request, this allows us process this request
|
|
|
|
// asynchronously without blocking subsequent broadcast
|
|
|
|
// requests.
|
|
|
|
go func() {
|
|
|
|
s.peersMtx.RLock()
|
2017-01-30 04:15:11 +03:00
|
|
|
for _, sPeer := range s.peersByPub {
|
2017-01-25 04:06:23 +03:00
|
|
|
if ignore != nil &&
|
2017-01-30 04:15:11 +03:00
|
|
|
sPeer.addr.IdentityKey.IsEqual(ignore) {
|
2017-01-25 04:06:23 +03:00
|
|
|
|
|
|
|
srvrLog.Debugf("Skipping %v in broadcast",
|
|
|
|
ignore.SerializeCompressed())
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-01-30 04:15:11 +03:00
|
|
|
go func(p *peer) {
|
2017-01-30 02:02:57 +03:00
|
|
|
for _, msg := range bMsg.msgs {
|
2017-01-30 04:15:11 +03:00
|
|
|
p.queueMsg(msg, nil)
|
2017-01-30 02:02:57 +03:00
|
|
|
}
|
2017-01-30 04:15:11 +03:00
|
|
|
}(sPeer)
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2017-01-25 04:06:23 +03:00
|
|
|
s.peersMtx.RUnlock()
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-01-25 04:06:23 +03:00
|
|
|
bMsg.errChan <- nil
|
|
|
|
}()
|
2016-12-27 08:42:23 +03:00
|
|
|
case sMsg := <-s.sendRequests:
|
|
|
|
// TODO(roasbeef): use [33]byte everywhere instead
|
|
|
|
// * eliminate usage of mutexes, funnel all peer
|
2017-01-25 04:06:23 +03:00
|
|
|
// mutation to this goroutine
|
2016-12-27 08:42:23 +03:00
|
|
|
target := sMsg.target.SerializeCompressed()
|
|
|
|
|
|
|
|
srvrLog.Debugf("Attempting to send msgs %v to: %x",
|
|
|
|
len(sMsg.msgs), target)
|
|
|
|
|
2017-01-25 04:06:23 +03:00
|
|
|
// Launch a new goroutine to handle this send request,
|
|
|
|
// this allows us process this request asynchronously
|
|
|
|
// without blocking future send requests.
|
|
|
|
go func() {
|
|
|
|
s.peersMtx.RLock()
|
|
|
|
targetPeer, ok := s.peersByPub[string(target)]
|
|
|
|
if !ok {
|
|
|
|
s.peersMtx.RUnlock()
|
|
|
|
srvrLog.Errorf("unable to send message to %x, "+
|
|
|
|
"peer not found", target)
|
|
|
|
sMsg.errChan <- errors.New("peer not found")
|
|
|
|
return
|
|
|
|
}
|
2017-01-30 02:02:57 +03:00
|
|
|
s.peersMtx.RUnlock()
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-02-03 03:57:20 +03:00
|
|
|
sMsg.errChan <- nil
|
|
|
|
|
2017-01-25 04:06:23 +03:00
|
|
|
for _, msg := range sMsg.msgs {
|
|
|
|
targetPeer.queueMsg(msg, nil)
|
|
|
|
}
|
|
|
|
}()
|
2016-01-17 06:09:02 +03:00
|
|
|
case query := <-s.queries:
|
|
|
|
switch msg := query.(type) {
|
2017-05-02 22:31:35 +03:00
|
|
|
case *disconnectPeerMsg:
|
|
|
|
s.handleDisconnectPeer(msg)
|
2016-01-17 06:09:02 +03:00
|
|
|
case *connectPeerMsg:
|
2016-06-21 21:52:09 +03:00
|
|
|
s.handleConnectPeer(msg)
|
2016-06-21 22:32:32 +03:00
|
|
|
case *openChanReq:
|
|
|
|
s.handleOpenChanReq(msg)
|
2016-01-17 06:09:02 +03:00
|
|
|
}
|
2016-01-14 08:41:46 +03:00
|
|
|
case <-s.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
s.connMgr.Stop()
|
|
|
|
|
2016-01-14 08:41:46 +03:00
|
|
|
s.wg.Done()
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// handleConnectPeer attempts to establish a connection to the address enclosed
|
|
|
|
// within the passed connectPeerMsg. This function is *async*, a goroutine will
|
|
|
|
// be spawned in order to finish the request, and respond to the caller.
|
|
|
|
func (s *server) handleConnectPeer(msg *connectPeerMsg) {
|
|
|
|
addr := msg.addr
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
targetPub := string(msg.addr.IdentityKey.SerializeCompressed())
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Ensure we're not already connected to this
|
|
|
|
// peer.
|
2016-12-15 05:11:31 +03:00
|
|
|
s.peersMtx.RLock()
|
|
|
|
peer, ok := s.peersByPub[targetPub]
|
|
|
|
if ok {
|
|
|
|
s.peersMtx.RUnlock()
|
|
|
|
msg.err <- fmt.Errorf("already connected to peer: %v", peer)
|
|
|
|
return
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
s.peersMtx.RUnlock()
|
|
|
|
|
|
|
|
// If there's already a pending connection request for this pubkey,
|
|
|
|
// then we ignore this request to ensure we don't create a redundant
|
|
|
|
// connection.
|
|
|
|
s.pendingConnMtx.RLock()
|
|
|
|
if _, ok := s.persistentConnReqs[targetPub]; ok {
|
|
|
|
s.pendingConnMtx.RUnlock()
|
|
|
|
msg.err <- fmt.Errorf("connection attempt to %v is pending",
|
|
|
|
addr)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
s.pendingConnMtx.RUnlock()
|
|
|
|
|
|
|
|
// If there's not already a pending or active connection to this node,
|
|
|
|
// then instruct the connection manager to attempt to establish a
|
|
|
|
// persistent connection to the peer.
|
2016-12-25 03:51:25 +03:00
|
|
|
srvrLog.Debugf("Connecting to %v", addr)
|
2017-01-10 06:08:52 +03:00
|
|
|
if msg.persistent {
|
2017-05-05 02:47:48 +03:00
|
|
|
connReq := &connmgr.ConnReq{
|
2017-01-10 06:08:52 +03:00
|
|
|
Addr: addr,
|
|
|
|
Permanent: true,
|
2017-05-05 02:47:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
s.pendingConnMtx.Lock()
|
2017-05-06 01:57:09 +03:00
|
|
|
s.persistentPeers[targetPub] = struct{}{}
|
2017-05-05 02:47:48 +03:00
|
|
|
s.persistentConnReqs[targetPub] = append(s.persistentConnReqs[targetPub],
|
|
|
|
connReq)
|
|
|
|
s.pendingConnMtx.Unlock()
|
|
|
|
|
|
|
|
go s.connMgr.Connect(connReq)
|
2017-01-25 04:06:23 +03:00
|
|
|
msg.err <- nil
|
2017-01-10 06:08:52 +03:00
|
|
|
} else {
|
|
|
|
// If we're not making a persistent connection, then we'll
|
|
|
|
// attempt to connect o the target peer, returning an error
|
|
|
|
// which indicates success of failure.
|
|
|
|
go func() {
|
|
|
|
// Attempt to connect to the remote node. If the we
|
|
|
|
// can't make the connection, or the crypto negotiation
|
|
|
|
// breaks down, then return an error to the caller.
|
|
|
|
conn, err := brontide.Dial(s.identityPriv, addr)
|
|
|
|
if err != nil {
|
|
|
|
msg.err <- err
|
|
|
|
return
|
|
|
|
}
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-01-10 06:08:52 +03:00
|
|
|
s.outboundPeerConnected(nil, conn)
|
|
|
|
msg.err <- nil
|
|
|
|
}()
|
|
|
|
}
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-05-02 22:31:35 +03:00
|
|
|
// handleDisconnectPeer attempts to disconnect one peer from another
|
|
|
|
func (s *server) handleDisconnectPeer(msg *disconnectPeerMsg) {
|
2017-05-06 02:02:03 +03:00
|
|
|
pubBytes := msg.pubKey.SerializeCompressed()
|
|
|
|
pubStr := string(pubBytes)
|
2017-05-02 22:31:35 +03:00
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// Check that were actually connected to this peer. If not, then we'll
|
|
|
|
// exit in an error as we can't disconnect from a peer that we're not
|
|
|
|
// currently connected to.
|
2017-05-02 22:31:35 +03:00
|
|
|
s.peersMtx.RLock()
|
2017-05-06 02:02:03 +03:00
|
|
|
peer, ok := s.peersByPub[pubStr]
|
2017-05-02 22:31:35 +03:00
|
|
|
s.peersMtx.RUnlock()
|
|
|
|
if !ok {
|
2017-05-06 02:02:03 +03:00
|
|
|
msg.err <- fmt.Errorf("unable to find peer %x", pubBytes)
|
2017-05-02 22:31:35 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// If this peer was formerly a persistent connection, then we'll remove
|
|
|
|
// them from this map so we don't attempt to re-connect after we
|
|
|
|
// disconnect.
|
|
|
|
s.pendingConnMtx.Lock()
|
|
|
|
if _, ok := s.persistentPeers[pubStr]; ok {
|
|
|
|
delete(s.persistentPeers, pubStr)
|
2017-05-02 22:31:35 +03:00
|
|
|
}
|
2017-05-06 02:02:03 +03:00
|
|
|
s.pendingConnMtx.Unlock()
|
2017-05-02 22:31:35 +03:00
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// Now that we know the peer is actually connected, we'll disconnect
|
|
|
|
// from the peer.
|
2017-05-02 22:31:35 +03:00
|
|
|
srvrLog.Infof("Disconnecting from %v", peer)
|
|
|
|
peer.Disconnect()
|
2017-05-06 02:02:03 +03:00
|
|
|
|
2017-05-02 22:31:35 +03:00
|
|
|
msg.err <- nil
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// handleOpenChanReq first locates the target peer, and if found hands off the
|
|
|
|
// request to the funding manager allowing it to initiate the channel funding
|
|
|
|
// workflow.
|
|
|
|
func (s *server) handleOpenChanReq(req *openChanReq) {
|
2017-01-15 05:12:20 +03:00
|
|
|
var (
|
2017-02-21 03:33:14 +03:00
|
|
|
targetPeer *peer
|
|
|
|
pubKeyBytes []byte
|
2017-01-15 05:12:20 +03:00
|
|
|
)
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-01-15 05:12:20 +03:00
|
|
|
// If the user is targeting the peer by public key, then we'll need to
|
|
|
|
// convert that into a string for our map. Otherwise, we expect them to
|
|
|
|
// target by peer ID instead.
|
|
|
|
if req.targetPubkey != nil {
|
2017-02-21 03:33:14 +03:00
|
|
|
pubKeyBytes = req.targetPubkey.SerializeCompressed()
|
2017-01-15 05:12:20 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// First attempt to locate the target peer to open a channel with, if
|
|
|
|
// we're unable to locate the peer then this request will fail.
|
2016-12-15 05:11:31 +03:00
|
|
|
s.peersMtx.RLock()
|
|
|
|
if peer, ok := s.peersByID[req.targetPeerID]; ok {
|
|
|
|
targetPeer = peer
|
2017-02-21 03:33:14 +03:00
|
|
|
} else if peer, ok := s.peersByPub[string(pubKeyBytes)]; ok {
|
2016-12-15 05:11:31 +03:00
|
|
|
targetPeer = peer
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
s.peersMtx.RUnlock()
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
if targetPeer == nil {
|
2016-10-28 05:49:10 +03:00
|
|
|
req.err <- fmt.Errorf("unable to find peer nodeID(%x), "+
|
2017-02-21 03:33:14 +03:00
|
|
|
"peerID(%v)", pubKeyBytes, req.targetPeerID)
|
2016-06-21 22:32:32 +03:00
|
|
|
return
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// Spawn a goroutine to send the funding workflow request to the
|
|
|
|
// funding manager. This allows the server to continue handling queries
|
|
|
|
// instead of blocking on this request which is exported as a
|
|
|
|
// synchronous request to the outside world.
|
|
|
|
// TODO(roasbeef): pass in chan that's closed if/when funding succeeds
|
|
|
|
// so can track as persistent peer?
|
2017-01-13 06:40:38 +03:00
|
|
|
go s.fundingMgr.initFundingWorkflow(targetPeer.addr, req)
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// ConnectToPeer requests that the server connect to a Lightning Network peer
|
|
|
|
// at the specified address. This function will *block* until either a
|
|
|
|
// connection is established, or the initial handshake process fails.
|
2017-01-10 06:08:52 +03:00
|
|
|
func (s *server) ConnectToPeer(addr *lnwire.NetAddress,
|
|
|
|
perm bool) error {
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
errChan := make(chan error, 1)
|
|
|
|
|
2017-01-10 06:08:52 +03:00
|
|
|
s.queries <- &connectPeerMsg{
|
|
|
|
addr: addr,
|
|
|
|
persistent: perm,
|
|
|
|
err: errChan,
|
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-01-10 06:08:52 +03:00
|
|
|
return <-errChan
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2017-05-06 02:02:03 +03:00
|
|
|
// DisconnectPeer sends the request to server to close the connection with peer
|
|
|
|
// identified by public key.
|
|
|
|
func (s *server) DisconnectPeer(pubKey *btcec.PublicKey) error {
|
2017-05-02 22:31:35 +03:00
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
|
|
|
|
s.queries <- &disconnectPeerMsg{
|
2017-05-06 02:02:03 +03:00
|
|
|
pubKey: pubKey,
|
2017-05-02 22:31:35 +03:00
|
|
|
err: errChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
return <-errChan
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// OpenChannel sends a request to the server to open a channel to the specified
|
|
|
|
// peer identified by ID with the passed channel funding paramters.
|
2016-10-28 05:49:10 +03:00
|
|
|
func (s *server) OpenChannel(peerID int32, nodeKey *btcec.PublicKey,
|
2017-01-10 06:05:11 +03:00
|
|
|
localAmt, pushAmt btcutil.Amount,
|
2016-08-31 02:52:53 +03:00
|
|
|
numConfs uint32) (chan *lnrpc.OpenStatusUpdate, chan error) {
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
2016-08-31 02:52:53 +03:00
|
|
|
updateChan := make(chan *lnrpc.OpenStatusUpdate, 1)
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2016-09-14 01:35:41 +03:00
|
|
|
req := &openChanReq{
|
2017-01-10 06:05:11 +03:00
|
|
|
targetPeerID: peerID,
|
|
|
|
targetPubkey: nodeKey,
|
|
|
|
localFundingAmt: localAmt,
|
|
|
|
pushAmt: pushAmt,
|
|
|
|
numConfs: numConfs,
|
|
|
|
updates: updateChan,
|
|
|
|
err: errChan,
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
2016-09-14 01:35:41 +03:00
|
|
|
|
|
|
|
s.queries <- req
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2016-08-31 02:52:53 +03:00
|
|
|
return updateChan, errChan
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Peers returns a slice of all active peers.
|
|
|
|
func (s *server) Peers() []*peer {
|
2017-06-05 08:18:12 +03:00
|
|
|
s.peersMtx.RLock()
|
|
|
|
|
|
|
|
peers := make([]*peer, 0, len(s.peersByID))
|
|
|
|
for _, peer := range s.peersByID {
|
|
|
|
peers = append(peers, peer)
|
|
|
|
}
|
2016-06-21 21:52:09 +03:00
|
|
|
|
2017-06-05 08:18:12 +03:00
|
|
|
s.peersMtx.RUnlock()
|
2016-06-21 21:52:09 +03:00
|
|
|
|
2017-06-05 08:18:12 +03:00
|
|
|
return peers
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|