2015-12-26 09:09:17 +03:00
|
|
|
package main
|
2016-01-14 08:41:46 +03:00
|
|
|
|
|
|
|
import (
|
2017-03-16 04:56:25 +03:00
|
|
|
"crypto/sha256"
|
2016-12-27 08:42:23 +03:00
|
|
|
"encoding/hex"
|
|
|
|
"errors"
|
2016-01-14 08:41:46 +03:00
|
|
|
"fmt"
|
|
|
|
"net"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2016-12-15 05:11:31 +03:00
|
|
|
"time"
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2016-09-21 03:15:26 +03:00
|
|
|
"github.com/lightningnetwork/lightning-onion"
|
2016-10-28 05:49:10 +03:00
|
|
|
"github.com/lightningnetwork/lnd/brontide"
|
2016-08-04 22:37:50 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2016-03-23 04:49:22 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2016-08-31 02:52:53 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2016-01-16 21:38:48 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
2016-10-28 05:49:10 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2017-02-02 05:29:46 +03:00
|
|
|
"github.com/lightningnetwork/lnd/routing"
|
2016-05-15 17:17:44 +03:00
|
|
|
"github.com/roasbeef/btcd/btcec"
|
2017-02-02 05:29:46 +03:00
|
|
|
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
2017-01-06 00:58:06 +03:00
|
|
|
"github.com/roasbeef/btcd/connmgr"
|
2016-06-21 21:52:09 +03:00
|
|
|
"github.com/roasbeef/btcutil"
|
2016-01-14 08:41:46 +03:00
|
|
|
)
|
|
|
|
|
2016-12-25 03:51:25 +03:00
|
|
|
// server is the main server of the Lightning Network Daemon. The server houses
|
|
|
|
// global state pertaining to the wallet, database, and the rpcserver.
|
2016-06-21 21:52:09 +03:00
|
|
|
// Additionally, the server is also used as a central messaging bus to interact
|
|
|
|
// with any of its companion objects.
|
2016-01-14 08:41:46 +03:00
|
|
|
type server struct {
|
|
|
|
started int32 // atomic
|
|
|
|
shutdown int32 // atomic
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// identityPriv is the private key used to authenticate any incoming
|
|
|
|
// connections.
|
|
|
|
identityPriv *btcec.PrivateKey
|
2016-01-17 06:07:44 +03:00
|
|
|
|
2016-07-06 04:48:35 +03:00
|
|
|
// lightningID is the sha256 of the public key corresponding to our
|
|
|
|
// long-term identity private key.
|
|
|
|
lightningID [32]byte
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
peersMtx sync.RWMutex
|
|
|
|
peersByID map[int32]*peer
|
|
|
|
peersByPub map[string]*peer
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2016-09-12 22:33:22 +03:00
|
|
|
rpcServer *rpcServer
|
|
|
|
|
2016-08-04 22:37:50 +03:00
|
|
|
chainNotifier chainntnfs.ChainNotifier
|
2016-09-12 22:33:22 +03:00
|
|
|
|
|
|
|
bio lnwallet.BlockChainIO
|
|
|
|
lnwallet *lnwallet.LightningWallet
|
2016-06-21 21:52:09 +03:00
|
|
|
|
|
|
|
fundingMgr *fundingManager
|
|
|
|
chanDB *channeldb.DB
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2016-11-29 06:43:57 +03:00
|
|
|
htlcSwitch *htlcSwitch
|
|
|
|
invoices *invoiceRegistry
|
|
|
|
breachArbiter *breachArbiter
|
2016-07-10 02:36:25 +03:00
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
chanRouter *routing.ChannelRouter
|
2016-07-15 14:02:59 +03:00
|
|
|
|
2016-09-12 22:37:51 +03:00
|
|
|
utxoNursery *utxoNursery
|
|
|
|
|
2016-09-21 03:15:26 +03:00
|
|
|
sphinx *sphinx.Router
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
connMgr *connmgr.ConnManager
|
|
|
|
|
2017-01-10 06:08:52 +03:00
|
|
|
pendingConnMtx sync.RWMutex
|
2017-02-25 02:46:02 +03:00
|
|
|
persistentConnReqs map[string][]*connmgr.ConnReq
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
broadcastRequests chan *broadcastReq
|
|
|
|
sendRequests chan *sendReq
|
|
|
|
|
2016-01-14 08:41:46 +03:00
|
|
|
newPeers chan *peer
|
|
|
|
donePeers chan *peer
|
2016-01-17 06:07:44 +03:00
|
|
|
queries chan interface{}
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2017-02-16 15:39:38 +03:00
|
|
|
// globalFeatures feature vector which affects HTLCs and thus are also
|
|
|
|
// advertised to other nodes.
|
|
|
|
globalFeatures *lnwire.FeatureVector
|
|
|
|
|
|
|
|
// localFeatures is an feature vector which represent the features which
|
|
|
|
// only affect the protocol between these two nodes.
|
|
|
|
localFeatures *lnwire.FeatureVector
|
|
|
|
|
2016-01-14 08:41:46 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
quit chan struct{}
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// newServer creates a new instance of the server which is to listen using the
|
|
|
|
// passed listener address.
|
2016-08-04 22:37:50 +03:00
|
|
|
func newServer(listenAddrs []string, notifier chainntnfs.ChainNotifier,
|
2016-09-12 22:33:22 +03:00
|
|
|
bio lnwallet.BlockChainIO, wallet *lnwallet.LightningWallet,
|
|
|
|
chanDB *channeldb.DB) (*server, error) {
|
2016-03-23 04:49:22 +03:00
|
|
|
|
2016-08-13 01:51:53 +03:00
|
|
|
privKey, err := wallet.GetIdentitykey()
|
2016-01-17 06:07:44 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-01-15 06:44:21 +03:00
|
|
|
privKey.Curve = btcec.S256()
|
2016-01-17 06:07:44 +03:00
|
|
|
|
|
|
|
listeners := make([]net.Listener, len(listenAddrs))
|
|
|
|
for i, addr := range listenAddrs {
|
2016-10-28 05:49:10 +03:00
|
|
|
listeners[i], err = brontide.NewListener(privKey, addr)
|
2016-01-17 06:07:44 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-07-06 04:48:35 +03:00
|
|
|
serializedPubKey := privKey.PubKey().SerializeCompressed()
|
2016-01-17 06:07:44 +03:00
|
|
|
s := &server{
|
2016-11-29 06:43:57 +03:00
|
|
|
lnwallet: wallet,
|
2016-09-12 22:33:22 +03:00
|
|
|
bio: bio,
|
2016-08-04 22:37:50 +03:00
|
|
|
chainNotifier: notifier,
|
|
|
|
chanDB: chanDB,
|
2016-11-29 06:43:57 +03:00
|
|
|
|
|
|
|
invoices: newInvoiceRegistry(chanDB),
|
2016-12-14 02:32:44 +03:00
|
|
|
utxoNursery: newUtxoNursery(chanDB, notifier, wallet),
|
2016-12-27 08:42:23 +03:00
|
|
|
htlcSwitch: newHtlcSwitch(),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
|
|
|
identityPriv: privKey,
|
|
|
|
|
2016-09-21 03:15:26 +03:00
|
|
|
// TODO(roasbeef): derive proper onion key based on rotation
|
|
|
|
// schedule
|
|
|
|
sphinx: sphinx.NewRouter(privKey, activeNetParams.Params),
|
2017-03-16 04:56:25 +03:00
|
|
|
lightningID: sha256.Sum256(serializedPubKey),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
2017-02-25 02:46:02 +03:00
|
|
|
persistentConnReqs: make(map[string][]*connmgr.ConnReq),
|
2016-12-15 05:11:31 +03:00
|
|
|
|
|
|
|
peersByID: make(map[int32]*peer),
|
|
|
|
peersByPub: make(map[string]*peer),
|
|
|
|
|
|
|
|
newPeers: make(chan *peer, 10),
|
|
|
|
donePeers: make(chan *peer, 10),
|
2016-11-29 06:43:57 +03:00
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
broadcastRequests: make(chan *broadcastReq),
|
|
|
|
sendRequests: make(chan *sendReq),
|
|
|
|
|
2017-02-17 17:28:11 +03:00
|
|
|
globalFeatures: globalFeatures,
|
2017-02-21 12:35:21 +03:00
|
|
|
localFeatures: localFeatures,
|
2017-02-16 15:39:38 +03:00
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
queries: make(chan interface{}),
|
|
|
|
quit: make(chan struct{}),
|
2016-01-17 06:07:44 +03:00
|
|
|
}
|
|
|
|
|
2016-09-19 22:03:38 +03:00
|
|
|
// If the debug HTLC flag is on, then we invoice a "master debug"
|
|
|
|
// invoice which all outgoing payments will be sent and all incoming
|
2017-01-13 08:01:50 +03:00
|
|
|
// HTLCs with the debug R-Hash immediately settled.
|
2016-09-19 22:03:38 +03:00
|
|
|
if cfg.DebugHTLC {
|
|
|
|
kiloCoin := btcutil.Amount(btcutil.SatoshiPerBitcoin * 1000)
|
|
|
|
s.invoices.AddDebugInvoice(kiloCoin, *debugPre)
|
|
|
|
srvrLog.Debugf("Debug HTLC invoice inserted, preimage=%x, hash=%x",
|
|
|
|
debugPre[:], debugHash[:])
|
|
|
|
}
|
2016-07-13 03:14:07 +03:00
|
|
|
|
2017-02-23 03:24:22 +03:00
|
|
|
// If external IP addresses have been specified, add those to the list
|
|
|
|
// of this server's addresses.
|
|
|
|
selfAddrs := make([]net.Addr, 0)
|
|
|
|
for _, ip := range cfg.ExternalIPs {
|
|
|
|
addr, err := net.ResolveTCPAddr("tcp", ip)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
selfAddrs = append(selfAddrs, addr)
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2016-10-05 23:47:02 +03:00
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
chanGraph := chanDB.ChannelGraph()
|
|
|
|
self := &channeldb.LightningNode{
|
|
|
|
LastUpdate: time.Now(),
|
2017-02-17 12:29:23 +03:00
|
|
|
Addresses: selfAddrs,
|
2016-12-27 08:42:23 +03:00
|
|
|
PubKey: privKey.PubKey(),
|
|
|
|
// TODO(roasbeef): make alias configurable
|
2017-03-20 12:24:55 +03:00
|
|
|
Alias: hex.EncodeToString(serializedPubKey[:10]),
|
|
|
|
Features: globalFeatures,
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
|
|
|
if err := chanGraph.SetSourceNode(self); err != nil {
|
|
|
|
return nil, err
|
2016-10-05 23:47:02 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
s.chanRouter, err = routing.New(routing.Config{
|
|
|
|
Graph: chanGraph,
|
|
|
|
Chain: bio,
|
|
|
|
Notifier: notifier,
|
|
|
|
Broadcast: s.broadcastMessage,
|
|
|
|
SendMessages: s.sendToPeer,
|
2017-02-02 05:29:46 +03:00
|
|
|
SendToSwitch: func(firstHop *btcec.PublicKey,
|
2017-02-21 10:57:43 +03:00
|
|
|
htlcAdd *lnwire.UpdateAddHTLC) ([32]byte, error) {
|
2017-02-02 05:29:46 +03:00
|
|
|
|
|
|
|
firstHopPub := firstHop.SerializeCompressed()
|
2017-03-17 05:40:18 +03:00
|
|
|
destInterface := chainhash.Hash(sha256.Sum256(firstHopPub))
|
2017-02-02 05:29:46 +03:00
|
|
|
|
|
|
|
return s.htlcSwitch.SendHTLC(&htlcPacket{
|
|
|
|
dest: destInterface,
|
|
|
|
msg: htlcAdd,
|
|
|
|
})
|
|
|
|
},
|
2016-12-27 08:42:23 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-07-15 14:02:59 +03:00
|
|
|
|
2017-02-23 01:49:04 +03:00
|
|
|
s.rpcServer = newRPCServer(s)
|
2016-11-29 06:43:57 +03:00
|
|
|
s.breachArbiter = newBreachArbiter(wallet, chanDB, notifier, s.htlcSwitch)
|
2017-01-13 06:40:38 +03:00
|
|
|
|
2017-02-22 09:14:22 +03:00
|
|
|
s.fundingMgr, err = newFundingManager(fundingConfig{
|
2017-02-25 03:16:13 +03:00
|
|
|
IDKey: s.identityPriv.PubKey(),
|
|
|
|
Wallet: wallet,
|
|
|
|
Notifier: s.chainNotifier,
|
|
|
|
SendToRouter: func(msg lnwire.Message) {
|
|
|
|
s.chanRouter.ProcessRoutingMessage(msg,
|
|
|
|
s.identityPriv.PubKey())
|
|
|
|
},
|
|
|
|
ArbiterChan: s.breachArbiter.newContracts,
|
|
|
|
SendToPeer: s.sendToPeer,
|
|
|
|
FindPeer: s.findPeer,
|
|
|
|
FindChannel: s.rpcServer.fetchActiveChannel,
|
2017-01-13 06:40:38 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-11-29 06:43:57 +03:00
|
|
|
|
|
|
|
// TODO(roasbeef): introduce closure and config system to decouple the
|
|
|
|
// initialization above ^
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
// Create the connection manager which will be responsible for
|
|
|
|
// maintaining persistent outbound connections and also accepting new
|
|
|
|
// incoming connections
|
|
|
|
cmgr, err := connmgr.New(&connmgr.Config{
|
|
|
|
Listeners: listeners,
|
|
|
|
OnAccept: s.inboundPeerConnected,
|
|
|
|
RetryDuration: time.Second * 5,
|
|
|
|
TargetOutbound: 100,
|
|
|
|
GetNewAddress: nil,
|
|
|
|
Dial: noiseDial(s.identityPriv),
|
|
|
|
OnConnection: s.outboundPeerConnected,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s.connMgr = cmgr
|
|
|
|
|
|
|
|
// In order to promote liveness of our active channels, instruct the
|
|
|
|
// connection manager to attempt to establish and maintain persistent
|
2017-01-13 08:01:50 +03:00
|
|
|
// connections to all our direct channel counterparties.
|
2017-03-25 11:40:33 +03:00
|
|
|
|
|
|
|
// nodeAddrsMap stores the combination of node public keys and
|
|
|
|
// addresses that we'll attempt to reconnect to. PubKey strings are
|
|
|
|
// used as keys since other PubKey forms can't be compared.
|
|
|
|
nodeAddrsMap := map[string]*nodeAddresses{}
|
|
|
|
|
|
|
|
// Iterate through the list of LinkNodes to find addresses we should
|
|
|
|
// attempt to connect to based on our set of previous connections. Set
|
|
|
|
// the reconnection port to the default peer port.
|
2016-12-15 05:11:31 +03:00
|
|
|
linkNodes, err := s.chanDB.FetchAllLinkNodes()
|
|
|
|
if err != nil && err != channeldb.ErrLinkNodesNotFound {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, node := range linkNodes {
|
2017-03-25 11:40:33 +03:00
|
|
|
for _, address := range node.Addresses {
|
|
|
|
if address.Port == 0 {
|
|
|
|
address.Port = defaultPeerPort
|
|
|
|
}
|
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
pubStr := string(node.IdentityPub.SerializeCompressed())
|
2017-03-25 11:40:33 +03:00
|
|
|
nodeAddrs := &nodeAddresses{
|
|
|
|
pubKey: node.IdentityPub,
|
|
|
|
addresses: node.Addresses,
|
|
|
|
}
|
|
|
|
nodeAddrsMap[pubStr] = nodeAddrs
|
|
|
|
}
|
2017-02-25 02:46:02 +03:00
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
// After checking our previous connections for addresses to connect to,
|
|
|
|
// iterate through the nodes in our channel graph to find addresses
|
|
|
|
// that have been added via NodeAnnouncement messages.
|
|
|
|
sourceNode, err := chanGraph.SourceNode()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
err = sourceNode.ForEachChannel(nil, func(_ *channeldb.ChannelEdgeInfo,
|
|
|
|
policy *channeldb.ChannelEdgePolicy) error {
|
|
|
|
pubStr := string(policy.Node.PubKey.SerializeCompressed())
|
|
|
|
|
|
|
|
// Add addresses from channel graph/NodeAnnouncements to the
|
|
|
|
// list of addresses we'll connect to. If there are duplicates
|
|
|
|
// that have different ports specified, the port from the
|
|
|
|
// channel graph should supersede the port from the link node.
|
|
|
|
var addrs []*net.TCPAddr
|
|
|
|
linkNodeAddrs, ok := nodeAddrsMap[pubStr]
|
|
|
|
if ok {
|
|
|
|
for _, lnAddress := range linkNodeAddrs.addresses {
|
|
|
|
var addrMatched bool
|
|
|
|
for _, polAddress := range policy.Node.Addresses {
|
|
|
|
polTCPAddr, ok :=
|
|
|
|
polAddress.(*net.TCPAddr)
|
|
|
|
if ok && polTCPAddr.IP.Equal(lnAddress.IP) {
|
|
|
|
addrMatched = true
|
|
|
|
addrs = append(addrs, polTCPAddr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !addrMatched {
|
|
|
|
addrs = append(addrs, lnAddress)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for _, addr := range policy.Node.Addresses {
|
|
|
|
polTCPAddr, ok := addr.(*net.TCPAddr)
|
|
|
|
if ok {
|
|
|
|
addrs = append(addrs, polTCPAddr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeAddrsMap[pubStr] = &nodeAddresses{
|
|
|
|
pubKey: policy.Node.PubKey,
|
|
|
|
addresses: addrs,
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil && err != channeldb.ErrGraphNoEdgesFound {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Iterate through the combined list of addresses from prior links and
|
|
|
|
// node announcements and attempt to reconnect to each node.
|
|
|
|
for pubStr, nodeAddr := range nodeAddrsMap {
|
|
|
|
for _, address := range nodeAddr.addresses {
|
|
|
|
// Create a wrapper address which couples the IP and
|
|
|
|
// the pubkey so the brontide authenticated connection
|
|
|
|
// can be established.
|
2017-02-25 02:46:02 +03:00
|
|
|
lnAddr := &lnwire.NetAddress{
|
2017-03-25 11:40:33 +03:00
|
|
|
IdentityKey: nodeAddr.pubKey,
|
2017-02-25 02:46:02 +03:00
|
|
|
Address: address,
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
srvrLog.Debugf("Attempting persistent connection to "+
|
|
|
|
"channel peer %v", lnAddr)
|
|
|
|
// Send the persistent connection request to
|
|
|
|
// the connection manager, saving the request
|
|
|
|
// itself so we can cancel/restart the process
|
|
|
|
// as needed.
|
2017-02-25 02:46:02 +03:00
|
|
|
connReq := &connmgr.ConnReq{
|
|
|
|
Addr: lnAddr,
|
|
|
|
Permanent: true,
|
|
|
|
}
|
2017-03-25 11:40:33 +03:00
|
|
|
|
2017-02-25 02:46:02 +03:00
|
|
|
s.persistentConnReqs[pubStr] =
|
|
|
|
append(s.persistentConnReqs[pubStr], connReq)
|
|
|
|
go s.connMgr.Connect(connReq)
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-17 06:07:44 +03:00
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Start starts the main daemon server, all requested listeners, and any helper
|
|
|
|
// goroutines.
|
2016-08-04 22:37:50 +03:00
|
|
|
func (s *server) Start() error {
|
2016-06-21 21:52:09 +03:00
|
|
|
// Already running?
|
|
|
|
if atomic.AddInt32(&s.started, 1) != 1 {
|
2016-08-04 22:37:50 +03:00
|
|
|
return nil
|
2016-01-17 06:09:02 +03:00
|
|
|
}
|
|
|
|
|
2016-11-29 06:43:57 +03:00
|
|
|
// Start the notification server. This is used so channel management
|
2016-08-04 22:37:50 +03:00
|
|
|
// goroutines can be notified when a funding transaction reaches a
|
|
|
|
// sufficient number of confirmations, or when the input for the
|
2016-11-29 06:43:57 +03:00
|
|
|
// funding transaction is spent in an attempt at an uncooperative close
|
2017-01-13 08:01:50 +03:00
|
|
|
// by the counterparty.
|
2016-08-04 22:37:50 +03:00
|
|
|
if err := s.chainNotifier.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := s.rpcServer.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := s.fundingMgr.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := s.htlcSwitch.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-12 22:37:51 +03:00
|
|
|
if err := s.utxoNursery.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-29 06:43:57 +03:00
|
|
|
if err := s.breachArbiter.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-27 08:42:23 +03:00
|
|
|
if err := s.chanRouter.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-06-21 21:52:09 +03:00
|
|
|
|
2016-07-14 02:34:23 +03:00
|
|
|
s.wg.Add(1)
|
2016-06-21 21:52:09 +03:00
|
|
|
go s.queryHandler()
|
2016-08-04 22:37:50 +03:00
|
|
|
|
|
|
|
return nil
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Stop gracefully shutsdown the main daemon server. This function will signal
|
|
|
|
// any active goroutines, or helper objects to exit, then blocks until they've
|
|
|
|
// all successfully exited. Additionally, any/all listeners are closed.
|
|
|
|
func (s *server) Stop() error {
|
|
|
|
// Bail if we're already shutting down.
|
|
|
|
if atomic.AddInt32(&s.shutdown, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-08-04 22:37:50 +03:00
|
|
|
// Shutdown the wallet, funding manager, and the rpc server.
|
|
|
|
s.chainNotifier.Stop()
|
2016-06-21 21:52:09 +03:00
|
|
|
s.rpcServer.Stop()
|
|
|
|
s.fundingMgr.Stop()
|
2016-12-27 08:42:23 +03:00
|
|
|
s.chanRouter.Stop()
|
2016-08-04 22:37:50 +03:00
|
|
|
s.htlcSwitch.Stop()
|
2016-09-12 22:37:51 +03:00
|
|
|
s.utxoNursery.Stop()
|
2016-11-29 06:43:57 +03:00
|
|
|
s.breachArbiter.Stop()
|
2016-08-04 22:37:50 +03:00
|
|
|
|
|
|
|
s.lnwallet.Shutdown()
|
2016-07-15 14:02:59 +03:00
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Signal all the lingering goroutines to quit.
|
|
|
|
close(s.quit)
|
|
|
|
s.wg.Wait()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitForShutdown blocks all goroutines have been stopped.
|
|
|
|
func (s *server) WaitForShutdown() {
|
|
|
|
s.wg.Wait()
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// broadcastReq is a message sent to the server by a related subsystem when it
|
2016-12-27 08:42:23 +03:00
|
|
|
// wishes to broadcast one or more messages to all connected peers. Thi
|
|
|
|
type broadcastReq struct {
|
|
|
|
ignore *btcec.PublicKey
|
|
|
|
msgs []lnwire.Message
|
|
|
|
|
|
|
|
errChan chan error // MUST be buffered.
|
|
|
|
}
|
|
|
|
|
|
|
|
// broadcastMessage sends a request to the server to broadcast a set of
|
|
|
|
// messages to all peers other than the one specified by the `skip` parameter.
|
|
|
|
func (s *server) broadcastMessage(skip *btcec.PublicKey, msgs ...lnwire.Message) error {
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
|
|
|
|
msgsToSend := make([]lnwire.Message, 0, len(msgs))
|
|
|
|
msgsToSend = append(msgsToSend, msgs...)
|
|
|
|
broadcastReq := &broadcastReq{
|
|
|
|
ignore: skip,
|
|
|
|
msgs: msgsToSend,
|
|
|
|
errChan: errChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case s.broadcastRequests <- broadcastReq:
|
|
|
|
case <-s.quit:
|
|
|
|
return errors.New("server shutting down")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
return err
|
|
|
|
case <-s.quit:
|
|
|
|
return errors.New("server shutting down")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// sendReq is message sent to the server by a related subsystem which it
|
2016-12-27 08:42:23 +03:00
|
|
|
// wishes to send a set of messages to a specified peer.
|
|
|
|
type sendReq struct {
|
|
|
|
target *btcec.PublicKey
|
|
|
|
msgs []lnwire.Message
|
|
|
|
|
|
|
|
errChan chan error
|
|
|
|
}
|
|
|
|
|
2017-03-25 11:40:33 +03:00
|
|
|
type nodeAddresses struct {
|
|
|
|
pubKey *btcec.PublicKey
|
|
|
|
addresses []*net.TCPAddr
|
|
|
|
}
|
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
// sendToPeer send a message to the server telling it to send the specific set
|
|
|
|
// of message to a particular peer. If the peer connect be found, then this
|
|
|
|
// method will return a non-nil error.
|
|
|
|
func (s *server) sendToPeer(target *btcec.PublicKey, msgs ...lnwire.Message) error {
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
|
|
|
|
msgsToSend := make([]lnwire.Message, 0, len(msgs))
|
|
|
|
msgsToSend = append(msgsToSend, msgs...)
|
|
|
|
sMsg := &sendReq{
|
|
|
|
target: target,
|
|
|
|
msgs: msgsToSend,
|
|
|
|
errChan: errChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case s.sendRequests <- sMsg:
|
|
|
|
case <-s.quit:
|
|
|
|
return errors.New("server shutting down")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
return err
|
|
|
|
case <-s.quit:
|
|
|
|
return errors.New("server shutting down")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-13 06:40:38 +03:00
|
|
|
// findPeer will return the peer that corresponds to the passed in public key.
|
|
|
|
// This function is used by the funding manager, allowing it to update the
|
|
|
|
// daemon's local representation of the remote peer.
|
|
|
|
func (s *server) findPeer(peerKey *btcec.PublicKey) (*peer, error) {
|
|
|
|
serializedIDKey := string(peerKey.SerializeCompressed())
|
|
|
|
|
|
|
|
s.peersMtx.RLock()
|
|
|
|
peer := s.peersByPub[serializedIDKey]
|
|
|
|
s.peersMtx.RUnlock()
|
|
|
|
|
|
|
|
if peer == nil {
|
|
|
|
return nil, errors.New("Peer not found. Pubkey: " +
|
|
|
|
string(peerKey.SerializeCompressed()))
|
|
|
|
}
|
|
|
|
|
|
|
|
return peer, nil
|
|
|
|
}
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
// peerConnected is a function that handles initialization a newly connected
|
|
|
|
// peer by adding it to the server's global list of all active peers, and
|
|
|
|
// starting all the goroutines the peer needs to function properly.
|
|
|
|
func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, inbound bool) {
|
|
|
|
brontideConn := conn.(*brontide.Conn)
|
|
|
|
peerAddr := &lnwire.NetAddress{
|
|
|
|
IdentityKey: brontideConn.RemotePub(),
|
|
|
|
Address: conn.RemoteAddr().(*net.TCPAddr),
|
|
|
|
ChainNet: activeNetParams.Net,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we've established a connection, create a peer, and
|
|
|
|
// it to the set of currently active peers.
|
2017-02-22 12:10:07 +03:00
|
|
|
p, err := newPeer(conn, connReq, s, peerAddr, inbound)
|
2016-12-15 05:11:31 +03:00
|
|
|
if err != nil {
|
|
|
|
srvrLog.Errorf("unable to create peer %v", err)
|
2017-03-16 22:37:36 +03:00
|
|
|
if connReq != nil {
|
|
|
|
s.connMgr.Remove(connReq.ID())
|
2017-02-22 12:10:07 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(roasbeef): update IP address for link-node
|
|
|
|
// * also mark last-seen, do it one single transaction?
|
|
|
|
|
2017-02-22 12:10:07 +03:00
|
|
|
if err := p.Start(); err != nil {
|
2017-02-21 12:35:21 +03:00
|
|
|
srvrLog.Errorf("unable to start peer: %v", err)
|
2017-02-22 12:10:07 +03:00
|
|
|
if p.connReq != nil {
|
|
|
|
s.connMgr.Remove(p.connReq.ID())
|
|
|
|
}
|
|
|
|
p.Disconnect()
|
2017-02-16 15:39:38 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-02-22 12:10:07 +03:00
|
|
|
s.newPeers <- p
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// inboundPeerConnected initializes a new peer in response to a new inbound
|
|
|
|
// connection.
|
|
|
|
func (s *server) inboundPeerConnected(conn net.Conn) {
|
|
|
|
s.peersMtx.Lock()
|
|
|
|
defer s.peersMtx.Unlock()
|
|
|
|
|
|
|
|
srvrLog.Tracef("New inbound connection from %v", conn.RemoteAddr())
|
|
|
|
|
|
|
|
nodePub := conn.(*brontide.Conn).RemotePub()
|
|
|
|
|
|
|
|
// If we already have an outbound connection to this peer, simply drop
|
|
|
|
// the connection.
|
|
|
|
pubStr := string(nodePub.SerializeCompressed())
|
|
|
|
if _, ok := s.peersByPub[pubStr]; ok {
|
|
|
|
srvrLog.Errorf("Received inbound connection from peer %x, but "+
|
|
|
|
"already connected, dropping conn",
|
|
|
|
nodePub.SerializeCompressed())
|
|
|
|
conn.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// However, if we receive an incoming connection from a peer we're
|
|
|
|
// attempting to maintain a persistent connection with then we need to
|
|
|
|
// cancel the ongoing connection attempts to ensure that we don't end
|
|
|
|
// up with a duplicate connecting to the same peer.
|
|
|
|
s.pendingConnMtx.RLock()
|
2017-02-25 02:46:02 +03:00
|
|
|
if connReqs, ok := s.persistentConnReqs[pubStr]; ok {
|
|
|
|
for _, connReq := range connReqs {
|
|
|
|
s.connMgr.Remove(connReq.ID())
|
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
}
|
|
|
|
s.pendingConnMtx.RUnlock()
|
|
|
|
|
|
|
|
s.peerConnected(conn, nil, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
// outboundPeerConnected initializes a new peer in response to a new outbound
|
|
|
|
// connection.
|
|
|
|
func (s *server) outboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn) {
|
|
|
|
s.peersMtx.Lock()
|
|
|
|
defer s.peersMtx.Unlock()
|
|
|
|
|
|
|
|
srvrLog.Tracef("Established connection to: %v", conn.RemoteAddr())
|
|
|
|
|
|
|
|
nodePub := conn.(*brontide.Conn).RemotePub()
|
|
|
|
|
|
|
|
// If we already have an inbound connection from this peer, simply drop
|
|
|
|
// the connection.
|
|
|
|
pubStr := string(nodePub.SerializeCompressed())
|
|
|
|
if _, ok := s.peersByPub[pubStr]; ok {
|
|
|
|
srvrLog.Errorf("Established outbound connection to peer %x, but "+
|
|
|
|
"already connected, dropping conn",
|
|
|
|
nodePub.SerializeCompressed())
|
|
|
|
s.connMgr.Remove(connReq.ID())
|
2017-01-24 07:32:49 +03:00
|
|
|
conn.Close()
|
2016-12-15 05:11:31 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s.peerConnected(conn, connReq, true)
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// addPeer adds the passed peer to the server's global state of all active
|
|
|
|
// peers.
|
|
|
|
func (s *server) addPeer(p *peer) {
|
|
|
|
if p == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ignore new peers if we're shutting down.
|
|
|
|
if atomic.LoadInt32(&s.shutdown) != 0 {
|
|
|
|
p.Stop()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
// Track the new peer in our indexes so we can quickly look it up either
|
|
|
|
// according to its public key, or it's peer ID.
|
|
|
|
// TODO(roasbeef): pipe all requests through to the
|
|
|
|
// queryHandler/peerManager
|
|
|
|
s.peersMtx.Lock()
|
2016-12-15 05:11:31 +03:00
|
|
|
s.peersByID[p.id] = p
|
|
|
|
s.peersByPub[string(p.addr.IdentityKey.SerializeCompressed())] = p
|
2016-12-27 08:42:23 +03:00
|
|
|
s.peersMtx.Unlock()
|
|
|
|
|
|
|
|
// Once the peer has been added to our indexes, send a message to the
|
|
|
|
// channel router so we can synchronize our view of the channel graph
|
|
|
|
// with this new peer.
|
2017-02-07 01:58:38 +03:00
|
|
|
go s.chanRouter.SynchronizeNode(p.addr.IdentityKey)
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// removePeer removes the passed peer from the server's state of all active
|
|
|
|
// peers.
|
|
|
|
func (s *server) removePeer(p *peer) {
|
2016-12-15 05:11:31 +03:00
|
|
|
s.peersMtx.Lock()
|
|
|
|
defer s.peersMtx.Unlock()
|
|
|
|
|
2016-07-14 02:34:23 +03:00
|
|
|
srvrLog.Debugf("removing peer %v", p)
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
if p == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-02-07 02:04:52 +03:00
|
|
|
// As the peer is now finished, ensure that the TCP connection is
|
|
|
|
// closed and all of its related goroutines have exited.
|
|
|
|
if err := p.Stop(); err != nil {
|
|
|
|
peerLog.Errorf("unable to stop peer: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Ignore deleting peers if we're shutting down.
|
|
|
|
if atomic.LoadInt32(&s.shutdown) != 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
delete(s.peersByID, p.id)
|
|
|
|
delete(s.peersByPub, string(p.addr.IdentityKey.SerializeCompressed()))
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// connectPeerMsg is a message requesting the server to open a connection to a
|
|
|
|
// particular peer. This message also houses an error channel which will be
|
|
|
|
// used to report success/failure.
|
2016-01-17 06:09:02 +03:00
|
|
|
type connectPeerMsg struct {
|
2017-01-10 06:08:52 +03:00
|
|
|
addr *lnwire.NetAddress
|
|
|
|
persistent bool
|
|
|
|
|
|
|
|
err chan error
|
2016-01-17 06:09:02 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// listPeersMsg is a message sent to the server in order to obtain a listing
|
|
|
|
// of all currently active channels.
|
|
|
|
type listPeersMsg struct {
|
|
|
|
resp chan []*peer
|
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
// openChanReq is a message sent to the server in order to request the
|
2016-09-14 01:35:41 +03:00
|
|
|
// initiation of a channel funding workflow to the peer with either the specified
|
|
|
|
// relative peer ID, or a global lightning ID.
|
2016-06-21 22:32:32 +03:00
|
|
|
type openChanReq struct {
|
2016-09-14 01:35:41 +03:00
|
|
|
targetPeerID int32
|
2016-10-28 05:49:10 +03:00
|
|
|
targetPubkey *btcec.PublicKey
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
// TODO(roasbeef): make enums in lnwire
|
|
|
|
channelType uint8
|
|
|
|
coinType uint64
|
|
|
|
|
|
|
|
localFundingAmt btcutil.Amount
|
|
|
|
remoteFundingAmt btcutil.Amount
|
|
|
|
|
2017-01-10 06:05:11 +03:00
|
|
|
pushAmt btcutil.Amount
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
numConfs uint32
|
|
|
|
|
2016-08-31 02:52:53 +03:00
|
|
|
updates chan *lnrpc.OpenStatusUpdate
|
|
|
|
err chan error
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2016-07-27 21:32:27 +03:00
|
|
|
// queryHandler handles any requests to modify the server's internal state of
|
2016-07-14 02:34:23 +03:00
|
|
|
// all active peers, or query/mutate the server's global state. Additionally,
|
|
|
|
// any queries directed at peers will be handled by this goroutine.
|
2016-06-21 21:52:09 +03:00
|
|
|
//
|
|
|
|
// NOTE: This MUST be run as a goroutine.
|
2016-01-14 08:41:46 +03:00
|
|
|
func (s *server) queryHandler() {
|
2016-12-15 05:11:31 +03:00
|
|
|
go s.connMgr.Start()
|
|
|
|
|
2016-01-14 08:41:46 +03:00
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
2016-07-14 02:34:23 +03:00
|
|
|
// New peers.
|
|
|
|
case p := <-s.newPeers:
|
|
|
|
s.addPeer(p)
|
|
|
|
|
|
|
|
// Finished peers.
|
|
|
|
case p := <-s.donePeers:
|
|
|
|
s.removePeer(p)
|
|
|
|
|
2016-12-27 08:42:23 +03:00
|
|
|
case bMsg := <-s.broadcastRequests:
|
|
|
|
ignore := bMsg.ignore
|
|
|
|
|
|
|
|
srvrLog.Debugf("Broadcasting %v messages", len(bMsg.msgs))
|
|
|
|
|
2017-01-25 04:06:23 +03:00
|
|
|
// Launch a new goroutine to handle the broadcast
|
|
|
|
// request, this allows us process this request
|
|
|
|
// asynchronously without blocking subsequent broadcast
|
|
|
|
// requests.
|
|
|
|
go func() {
|
|
|
|
s.peersMtx.RLock()
|
2017-01-30 04:15:11 +03:00
|
|
|
for _, sPeer := range s.peersByPub {
|
2017-01-25 04:06:23 +03:00
|
|
|
if ignore != nil &&
|
2017-01-30 04:15:11 +03:00
|
|
|
sPeer.addr.IdentityKey.IsEqual(ignore) {
|
2017-01-25 04:06:23 +03:00
|
|
|
|
|
|
|
srvrLog.Debugf("Skipping %v in broadcast",
|
|
|
|
ignore.SerializeCompressed())
|
|
|
|
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-01-30 04:15:11 +03:00
|
|
|
go func(p *peer) {
|
2017-01-30 02:02:57 +03:00
|
|
|
for _, msg := range bMsg.msgs {
|
2017-01-30 04:15:11 +03:00
|
|
|
p.queueMsg(msg, nil)
|
2017-01-30 02:02:57 +03:00
|
|
|
}
|
2017-01-30 04:15:11 +03:00
|
|
|
}(sPeer)
|
2016-12-27 08:42:23 +03:00
|
|
|
}
|
2017-01-25 04:06:23 +03:00
|
|
|
s.peersMtx.RUnlock()
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-01-25 04:06:23 +03:00
|
|
|
bMsg.errChan <- nil
|
|
|
|
}()
|
2016-12-27 08:42:23 +03:00
|
|
|
case sMsg := <-s.sendRequests:
|
|
|
|
// TODO(roasbeef): use [33]byte everywhere instead
|
|
|
|
// * eliminate usage of mutexes, funnel all peer
|
2017-01-25 04:06:23 +03:00
|
|
|
// mutation to this goroutine
|
2016-12-27 08:42:23 +03:00
|
|
|
target := sMsg.target.SerializeCompressed()
|
|
|
|
|
|
|
|
srvrLog.Debugf("Attempting to send msgs %v to: %x",
|
|
|
|
len(sMsg.msgs), target)
|
|
|
|
|
2017-01-25 04:06:23 +03:00
|
|
|
// Launch a new goroutine to handle this send request,
|
|
|
|
// this allows us process this request asynchronously
|
|
|
|
// without blocking future send requests.
|
|
|
|
go func() {
|
|
|
|
s.peersMtx.RLock()
|
|
|
|
targetPeer, ok := s.peersByPub[string(target)]
|
|
|
|
if !ok {
|
|
|
|
s.peersMtx.RUnlock()
|
|
|
|
srvrLog.Errorf("unable to send message to %x, "+
|
|
|
|
"peer not found", target)
|
|
|
|
sMsg.errChan <- errors.New("peer not found")
|
|
|
|
return
|
|
|
|
}
|
2017-01-30 02:02:57 +03:00
|
|
|
s.peersMtx.RUnlock()
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-02-03 03:57:20 +03:00
|
|
|
sMsg.errChan <- nil
|
|
|
|
|
2017-01-25 04:06:23 +03:00
|
|
|
for _, msg := range sMsg.msgs {
|
|
|
|
targetPeer.queueMsg(msg, nil)
|
|
|
|
}
|
|
|
|
}()
|
2016-01-17 06:09:02 +03:00
|
|
|
case query := <-s.queries:
|
|
|
|
switch msg := query.(type) {
|
|
|
|
case *connectPeerMsg:
|
2016-06-21 21:52:09 +03:00
|
|
|
s.handleConnectPeer(msg)
|
|
|
|
case *listPeersMsg:
|
|
|
|
s.handleListPeers(msg)
|
2016-06-21 22:32:32 +03:00
|
|
|
case *openChanReq:
|
|
|
|
s.handleOpenChanReq(msg)
|
2016-01-17 06:09:02 +03:00
|
|
|
}
|
2016-01-14 08:41:46 +03:00
|
|
|
case <-s.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
s.connMgr.Stop()
|
|
|
|
|
2016-01-14 08:41:46 +03:00
|
|
|
s.wg.Done()
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// handleListPeers sends a lice of all currently active peers to the original
|
|
|
|
// caller.
|
2016-06-21 21:52:09 +03:00
|
|
|
func (s *server) handleListPeers(msg *listPeersMsg) {
|
2017-01-25 03:52:44 +03:00
|
|
|
s.peersMtx.RLock()
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
peers := make([]*peer, 0, len(s.peersByID))
|
|
|
|
for _, peer := range s.peersByID {
|
2016-06-21 21:52:09 +03:00
|
|
|
peers = append(peers, peer)
|
|
|
|
}
|
2016-01-17 06:09:02 +03:00
|
|
|
|
2017-01-25 03:52:44 +03:00
|
|
|
s.peersMtx.RUnlock()
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
msg.resp <- peers
|
2016-01-17 06:09:02 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// handleConnectPeer attempts to establish a connection to the address enclosed
|
|
|
|
// within the passed connectPeerMsg. This function is *async*, a goroutine will
|
|
|
|
// be spawned in order to finish the request, and respond to the caller.
|
|
|
|
func (s *server) handleConnectPeer(msg *connectPeerMsg) {
|
|
|
|
addr := msg.addr
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
targetPub := string(msg.addr.IdentityKey.SerializeCompressed())
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Ensure we're not already connected to this
|
|
|
|
// peer.
|
2016-12-15 05:11:31 +03:00
|
|
|
s.peersMtx.RLock()
|
|
|
|
peer, ok := s.peersByPub[targetPub]
|
|
|
|
if ok {
|
|
|
|
s.peersMtx.RUnlock()
|
|
|
|
msg.err <- fmt.Errorf("already connected to peer: %v", peer)
|
|
|
|
return
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
s.peersMtx.RUnlock()
|
|
|
|
|
|
|
|
// If there's already a pending connection request for this pubkey,
|
|
|
|
// then we ignore this request to ensure we don't create a redundant
|
|
|
|
// connection.
|
|
|
|
s.pendingConnMtx.RLock()
|
|
|
|
if _, ok := s.persistentConnReqs[targetPub]; ok {
|
|
|
|
s.pendingConnMtx.RUnlock()
|
|
|
|
msg.err <- fmt.Errorf("connection attempt to %v is pending",
|
|
|
|
addr)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
s.pendingConnMtx.RUnlock()
|
|
|
|
|
|
|
|
// If there's not already a pending or active connection to this node,
|
|
|
|
// then instruct the connection manager to attempt to establish a
|
|
|
|
// persistent connection to the peer.
|
2016-12-25 03:51:25 +03:00
|
|
|
srvrLog.Debugf("Connecting to %v", addr)
|
2017-01-10 06:08:52 +03:00
|
|
|
if msg.persistent {
|
|
|
|
go s.connMgr.Connect(&connmgr.ConnReq{
|
|
|
|
Addr: addr,
|
|
|
|
Permanent: true,
|
|
|
|
})
|
2017-01-25 04:06:23 +03:00
|
|
|
msg.err <- nil
|
2017-01-10 06:08:52 +03:00
|
|
|
} else {
|
|
|
|
// If we're not making a persistent connection, then we'll
|
|
|
|
// attempt to connect o the target peer, returning an error
|
|
|
|
// which indicates success of failure.
|
|
|
|
go func() {
|
|
|
|
// Attempt to connect to the remote node. If the we
|
|
|
|
// can't make the connection, or the crypto negotiation
|
|
|
|
// breaks down, then return an error to the caller.
|
|
|
|
conn, err := brontide.Dial(s.identityPriv, addr)
|
|
|
|
if err != nil {
|
|
|
|
msg.err <- err
|
|
|
|
return
|
|
|
|
}
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-01-10 06:08:52 +03:00
|
|
|
s.outboundPeerConnected(nil, conn)
|
|
|
|
msg.err <- nil
|
|
|
|
}()
|
|
|
|
}
|
2016-06-21 21:52:09 +03:00
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
// handleOpenChanReq first locates the target peer, and if found hands off the
|
|
|
|
// request to the funding manager allowing it to initiate the channel funding
|
|
|
|
// workflow.
|
|
|
|
func (s *server) handleOpenChanReq(req *openChanReq) {
|
2017-01-15 05:12:20 +03:00
|
|
|
var (
|
2017-02-21 03:33:14 +03:00
|
|
|
targetPeer *peer
|
|
|
|
pubKeyBytes []byte
|
2017-01-15 05:12:20 +03:00
|
|
|
)
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2017-01-15 05:12:20 +03:00
|
|
|
// If the user is targeting the peer by public key, then we'll need to
|
|
|
|
// convert that into a string for our map. Otherwise, we expect them to
|
|
|
|
// target by peer ID instead.
|
|
|
|
if req.targetPubkey != nil {
|
2017-02-21 03:33:14 +03:00
|
|
|
pubKeyBytes = req.targetPubkey.SerializeCompressed()
|
2017-01-15 05:12:20 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// First attempt to locate the target peer to open a channel with, if
|
|
|
|
// we're unable to locate the peer then this request will fail.
|
2016-12-15 05:11:31 +03:00
|
|
|
s.peersMtx.RLock()
|
|
|
|
if peer, ok := s.peersByID[req.targetPeerID]; ok {
|
|
|
|
targetPeer = peer
|
2017-02-21 03:33:14 +03:00
|
|
|
} else if peer, ok := s.peersByPub[string(pubKeyBytes)]; ok {
|
2016-12-15 05:11:31 +03:00
|
|
|
targetPeer = peer
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
2016-12-15 05:11:31 +03:00
|
|
|
s.peersMtx.RUnlock()
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
if targetPeer == nil {
|
2016-10-28 05:49:10 +03:00
|
|
|
req.err <- fmt.Errorf("unable to find peer nodeID(%x), "+
|
2017-02-21 03:33:14 +03:00
|
|
|
"peerID(%v)", pubKeyBytes, req.targetPeerID)
|
2016-06-21 22:32:32 +03:00
|
|
|
return
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// Spawn a goroutine to send the funding workflow request to the funding
|
2016-12-06 17:05:46 +03:00
|
|
|
// manager. This allows the server to continue handling queries instead
|
|
|
|
// of blocking on this request which is exported as a synchronous
|
|
|
|
// request to the outside world.
|
2016-08-31 02:52:53 +03:00
|
|
|
// TODO(roasbeef): server semaphore to restrict num goroutines
|
2017-01-13 06:40:38 +03:00
|
|
|
go s.fundingMgr.initFundingWorkflow(targetPeer.addr, req)
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// ConnectToPeer requests that the server connect to a Lightning Network peer
|
|
|
|
// at the specified address. This function will *block* until either a
|
|
|
|
// connection is established, or the initial handshake process fails.
|
2017-01-10 06:08:52 +03:00
|
|
|
func (s *server) ConnectToPeer(addr *lnwire.NetAddress,
|
|
|
|
perm bool) error {
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
errChan := make(chan error, 1)
|
|
|
|
|
2017-01-10 06:08:52 +03:00
|
|
|
s.queries <- &connectPeerMsg{
|
|
|
|
addr: addr,
|
|
|
|
persistent: perm,
|
|
|
|
err: errChan,
|
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-01-10 06:08:52 +03:00
|
|
|
return <-errChan
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// OpenChannel sends a request to the server to open a channel to the specified
|
|
|
|
// peer identified by ID with the passed channel funding paramters.
|
2016-10-28 05:49:10 +03:00
|
|
|
func (s *server) OpenChannel(peerID int32, nodeKey *btcec.PublicKey,
|
2017-01-10 06:05:11 +03:00
|
|
|
localAmt, pushAmt btcutil.Amount,
|
2016-08-31 02:52:53 +03:00
|
|
|
numConfs uint32) (chan *lnrpc.OpenStatusUpdate, chan error) {
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
2016-08-31 02:52:53 +03:00
|
|
|
updateChan := make(chan *lnrpc.OpenStatusUpdate, 1)
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2016-09-14 01:35:41 +03:00
|
|
|
req := &openChanReq{
|
2017-01-10 06:05:11 +03:00
|
|
|
targetPeerID: peerID,
|
|
|
|
targetPubkey: nodeKey,
|
|
|
|
localFundingAmt: localAmt,
|
|
|
|
pushAmt: pushAmt,
|
|
|
|
numConfs: numConfs,
|
|
|
|
updates: updateChan,
|
|
|
|
err: errChan,
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
2016-09-14 01:35:41 +03:00
|
|
|
|
|
|
|
s.queries <- req
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2016-08-31 02:52:53 +03:00
|
|
|
return updateChan, errChan
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:52:09 +03:00
|
|
|
// Peers returns a slice of all active peers.
|
|
|
|
func (s *server) Peers() []*peer {
|
2017-01-25 03:52:44 +03:00
|
|
|
resp := make(chan []*peer, 1)
|
2016-06-21 21:52:09 +03:00
|
|
|
|
|
|
|
s.queries <- &listPeersMsg{resp}
|
|
|
|
|
|
|
|
return <-resp
|
|
|
|
}
|