2015-12-21 00:16:38 +03:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2018-05-11 00:40:29 +03:00
|
|
|
"bytes"
|
2016-01-14 08:41:46 +03:00
|
|
|
"container/list"
|
2016-07-10 02:41:06 +03:00
|
|
|
"fmt"
|
2015-12-21 02:10:09 +03:00
|
|
|
"net"
|
2015-12-21 00:16:38 +03:00
|
|
|
"sync"
|
2016-01-14 08:41:46 +03:00
|
|
|
"sync/atomic"
|
2015-12-21 00:16:38 +03:00
|
|
|
"time"
|
|
|
|
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/connmgr"
|
|
|
|
"github.com/btcsuite/btcd/txscript"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
2016-06-21 22:32:32 +03:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
2017-05-02 23:04:58 +03:00
|
|
|
"github.com/go-errors/errors"
|
2018-05-11 00:40:29 +03:00
|
|
|
"github.com/lightningnetwork/lnd/brontide"
|
2017-05-05 02:03:47 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2016-06-21 22:32:32 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2018-05-11 00:40:29 +03:00
|
|
|
"github.com/lightningnetwork/lnd/contractcourt"
|
2017-05-02 23:04:58 +03:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
2018-07-05 23:27:35 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnpeer"
|
2016-08-31 02:52:53 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2016-01-18 06:14:47 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2016-01-17 06:03:03 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
numNodes int32
|
2018-05-08 04:32:00 +03:00
|
|
|
|
|
|
|
// ErrPeerExiting signals that the peer received a disconnect request.
|
|
|
|
ErrPeerExiting = errors.Errorf("peer exiting")
|
2015-12-21 00:16:38 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2016-06-21 22:32:32 +03:00
|
|
|
// pingInterval is the interval at which ping messages are sent.
|
2017-01-23 01:35:26 +03:00
|
|
|
pingInterval = 1 * time.Minute
|
2016-01-17 06:03:03 +03:00
|
|
|
|
2017-10-16 01:13:27 +03:00
|
|
|
// idleTimeout is the duration of inactivity before we time out a peer.
|
|
|
|
idleTimeout = 5 * time.Minute
|
|
|
|
|
2018-06-27 03:27:22 +03:00
|
|
|
// writeMessageTimeout is the timeout used when writing a message to peer.
|
|
|
|
writeMessageTimeout = 10 * time.Second
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// outgoingQueueLen is the buffer size of the channel which houses
|
|
|
|
// messages to be sent across the wire, requested by objects outside
|
|
|
|
// this struct.
|
2016-01-17 06:03:03 +03:00
|
|
|
outgoingQueueLen = 50
|
2015-12-21 00:16:38 +03:00
|
|
|
)
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// outgoingMsg packages an lnwire.Message to be sent out on the wire, along with
|
2016-06-21 22:32:32 +03:00
|
|
|
// a buffered channel which will be sent upon once the write is complete. This
|
|
|
|
// buffered channel acts as a semaphore to be used for synchronization purposes.
|
2018-02-07 06:11:11 +03:00
|
|
|
type outgoingMsg struct {
|
2017-11-16 05:23:46 +03:00
|
|
|
msg lnwire.Message
|
|
|
|
errChan chan error // MUST be buffered.
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2018-04-18 05:02:04 +03:00
|
|
|
// newChannelMsg packages an lnwallet.LightningChannel with a channel that
|
2017-01-24 05:19:54 +03:00
|
|
|
// allows the receiver of the request to report when the funding transaction
|
|
|
|
// has been confirmed and the channel creation process completed.
|
|
|
|
type newChannelMsg struct {
|
|
|
|
channel *lnwallet.LightningChannel
|
|
|
|
done chan struct{}
|
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// closeMsgs is a wrapper struct around any wire messages that deal with the
|
|
|
|
// cooperative channel closure negotiation process. This struct includes the
|
|
|
|
// raw channel ID targeted along with the original message.
|
|
|
|
type closeMsg struct {
|
|
|
|
cid lnwire.ChannelID
|
|
|
|
msg lnwire.Message
|
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// chanSnapshotReq is a message sent by outside subsystems to a peer in order
|
2016-06-23 08:22:06 +03:00
|
|
|
// to gain a snapshot of the peer's currently active channels.
|
|
|
|
type chanSnapshotReq struct {
|
|
|
|
resp chan []*channeldb.ChannelSnapshot
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// peer is an active peer on the Lightning Network. This struct is responsible
|
2016-11-11 04:15:25 +03:00
|
|
|
// for managing any channel state related to this peer. To do so, it has
|
|
|
|
// several helper goroutines to handle events such as HTLC timeouts, new
|
|
|
|
// funding workflow, and detecting an uncooperative closure of any active
|
|
|
|
// channels.
|
2016-09-26 20:39:47 +03:00
|
|
|
// TODO(roasbeef): proper reconnection logic
|
2015-12-21 00:16:38 +03:00
|
|
|
type peer struct {
|
2018-06-01 01:41:41 +03:00
|
|
|
// MUST be used atomically.
|
|
|
|
started int32
|
|
|
|
disconnect int32
|
|
|
|
|
2017-01-30 11:53:09 +03:00
|
|
|
// The following fields are only meant to be used *atomically*
|
|
|
|
bytesReceived uint64
|
|
|
|
bytesSent uint64
|
|
|
|
|
|
|
|
// pingTime is a rough estimate of the RTT (round-trip-time) between us
|
|
|
|
// and the connected peer. This time is expressed in micro seconds.
|
2018-06-01 01:41:41 +03:00
|
|
|
// To be used atomically.
|
2017-01-30 11:53:09 +03:00
|
|
|
// TODO(roasbeef): also use a WMA or EMA?
|
|
|
|
pingTime int64
|
|
|
|
|
|
|
|
// pingLastSend is the Unix time expressed in nanoseconds when we sent
|
2018-06-01 01:41:41 +03:00
|
|
|
// our last ping message. To be used atomically.
|
2017-01-30 11:53:09 +03:00
|
|
|
pingLastSend int64
|
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
connReq *connmgr.ConnReq
|
|
|
|
conn net.Conn
|
2015-12-21 00:16:38 +03:00
|
|
|
|
2016-10-28 05:49:10 +03:00
|
|
|
addr *lnwire.NetAddress
|
2017-06-17 01:11:02 +03:00
|
|
|
pubKeyBytes [33]byte
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2016-06-23 08:22:06 +03:00
|
|
|
inbound bool
|
2015-12-21 00:16:38 +03:00
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// This mutex protects all the stats below it.
|
2016-01-17 06:03:03 +03:00
|
|
|
sync.RWMutex
|
2016-06-21 22:32:32 +03:00
|
|
|
timeConnected time.Time
|
|
|
|
lastSend time.Time
|
|
|
|
lastRecv time.Time
|
|
|
|
|
|
|
|
// sendQueue is the channel which is used to queue outgoing to be
|
|
|
|
// written onto the wire. Note that this channel is unbuffered.
|
2018-02-07 06:11:11 +03:00
|
|
|
sendQueue chan outgoingMsg
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
// outgoingQueue is a buffered channel which allows second/third party
|
|
|
|
// objects to queue messages to be sent out on the wire.
|
2018-02-07 06:11:11 +03:00
|
|
|
outgoingQueue chan outgoingMsg
|
2015-12-21 00:16:38 +03:00
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// activeChannels is a map which stores the state machines of all
|
|
|
|
// active channels. Channels are indexed into the map by the txid of
|
|
|
|
// the funding transaction which opened the channel.
|
2017-08-09 02:51:19 +03:00
|
|
|
activeChanMtx sync.RWMutex
|
|
|
|
activeChannels map[lnwire.ChannelID]*lnwallet.LightningChannel
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
// newChannels is used by the fundingManager to send fully opened
|
|
|
|
// channels to the source peer which handled the funding workflow.
|
2017-01-24 05:19:54 +03:00
|
|
|
newChannels chan *newChannelMsg
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// activeChanCloses is a map that keep track of all the active
|
|
|
|
// cooperative channel closures that are active. Any channel closing
|
|
|
|
// messages are directed to one of these active state machines. Once
|
|
|
|
// the channel has been closed, the state machine will be delete from
|
|
|
|
// the map.
|
|
|
|
activeChanCloses map[lnwire.ChannelID]*channelCloser
|
|
|
|
|
2016-11-11 04:15:25 +03:00
|
|
|
// localCloseChanReqs is a channel in which any local requests to close
|
|
|
|
// a particular channel are sent over.
|
2017-05-02 23:04:58 +03:00
|
|
|
localCloseChanReqs chan *htlcswitch.ChanClose
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// chanCloseMsgs is a channel that any message related to channel
|
|
|
|
// closures are sent over. This includes lnwire.Shutdown message as
|
|
|
|
// well as lnwire.ClosingSigned messages.
|
|
|
|
chanCloseMsgs chan *closeMsg
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
server *server
|
2015-12-21 00:16:38 +03:00
|
|
|
|
2017-10-19 01:16:03 +03:00
|
|
|
// localFeatures is the set of local features that we advertised to the
|
|
|
|
// remote node.
|
|
|
|
localFeatures *lnwire.RawFeatureVector
|
2017-02-16 15:39:38 +03:00
|
|
|
|
2017-10-19 01:14:22 +03:00
|
|
|
// remoteLocalFeatures is the local feature vector received from the
|
|
|
|
// peer during the connection handshake.
|
|
|
|
remoteLocalFeatures *lnwire.FeatureVector
|
|
|
|
|
|
|
|
// remoteGlobalFeatures is the global feature vector received from the
|
|
|
|
// peer during the connection handshake.
|
|
|
|
remoteGlobalFeatures *lnwire.FeatureVector
|
2017-02-16 15:39:38 +03:00
|
|
|
|
2018-03-13 13:03:55 +03:00
|
|
|
// failedChannels is a set that tracks channels we consider `failed`.
|
|
|
|
// This is a temporary measure until we have implemented real failure
|
|
|
|
// handling at the link level, to handle the case where we reconnect to
|
|
|
|
// a peer and try to re-sync a failed channel, triggering a disconnect
|
|
|
|
// loop.
|
|
|
|
// TODO(halseth): remove when link failure is properly handled.
|
|
|
|
failedChannels map[lnwire.ChannelID]struct{}
|
|
|
|
|
peer: re-use a static writeBuf within writeMessage optimize memory usage
In this commit, we might a very small change to the way writing messages
works in the peer, which should have large implications w.r.t reducing
memory usage amongst chatty nodes.
When profiling the heap on one of my nodes earlier, I noticed this
fragment:
```
Showing top 20 nodes out of 68
flat flat% sum% cum cum%
0 0% 0% 75.53MB 54.61% main.(*peer).writeHandler
75.53MB 54.61% 54.61% 75.53MB 54.61% main.(*peer).writeMessage
```
Which points to an inefficiency with the way we handle allocations when
writing new messages, drilling down further we see:
```
(pprof) list writeMessage
Total: 138.31MB
ROUTINE ======================== main.(*peer).writeMessage in /root/go/src/github.com/lightningnetwork/lnd/peer.go
75.53MB 75.53MB (flat, cum) 54.61% of Total
. . 1104: p.logWireMessage(msg, false)
. . 1105:
. . 1106: // As the Lightning wire protocol is fully message oriented, we only
. . 1107: // allows one wire message per outer encapsulated crypto message. So
. . 1108: // we'll create a temporary buffer to write the message directly to.
75.53MB 75.53MB 1109: var msgPayload [lnwire.MaxMessagePayload]byte
. . 1110: b := bytes.NewBuffer(msgPayload[0:0:len(msgPayload)])
. . 1111:
. . 1112: // With the temp buffer created and sliced properly (length zero, full
. . 1113: // capacity), we'll now encode the message directly into this buffer.
. . 1114: n, err := lnwire.WriteMessage(b, msg, 0)
(pprof) list writeHandler
Total: 138.31MB
ROUTINE ======================== main.(*peer).writeHandler in /root/go/src/github.com/lightningnetwork/lnd/peer.go
0 75.53MB (flat, cum) 54.61% of Total
. . 1148:
. . 1149: // Write out the message to the socket, closing the
. . 1150: // 'sentChan' if it's non-nil, The 'sentChan' allows
. . 1151: // callers to optionally synchronize sends with the
. . 1152: // writeHandler.
. 75.53MB 1153: err := p.writeMessage(outMsg.msg)
. . 1154: if outMsg.errChan != nil {
. . 1155: outMsg.errChan <- err
. . 1156: }
. . 1157:
. . 1158: if err != nil {
```
Ah hah! We create a _new_ buffer each time we want to write a message
out. This is unnecessary and _very_ wasteful (as seen by the profile).
The fix is simple: re-use a buffer unique to each peer when writing out
messages. Since we know what the max message size is, we just allocate
one of these 65KB buffers for each peer, and keep it around until the
peer is removed.
2018-04-06 22:55:11 +03:00
|
|
|
// writeBuf is a buffer that we'll re-use in order to encode wire
|
|
|
|
// messages to write out directly on the socket. By re-using this
|
|
|
|
// buffer, we avoid needing to allocate more memory each time a new
|
|
|
|
// message is to be sent to a peer.
|
|
|
|
writeBuf [lnwire.MaxMessagePayload]byte
|
|
|
|
|
2015-12-21 00:16:38 +03:00
|
|
|
queueQuit chan struct{}
|
|
|
|
quit chan struct{}
|
2016-01-14 08:41:46 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
}
|
|
|
|
|
2018-07-05 23:27:35 +03:00
|
|
|
// A compile-time check to ensure that peer satisfies the lnpeer.Peer interface.
|
|
|
|
var _ lnpeer.Peer = (*peer)(nil)
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// newPeer creates a new peer from an establish connection object, and a
|
|
|
|
// pointer to the main server.
|
2017-02-22 12:10:07 +03:00
|
|
|
func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server,
|
2017-10-19 01:16:03 +03:00
|
|
|
addr *lnwire.NetAddress, inbound bool,
|
|
|
|
localFeatures *lnwire.RawFeatureVector) (*peer, error) {
|
2016-10-28 05:49:10 +03:00
|
|
|
|
|
|
|
nodePub := addr.IdentityKey
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
p := &peer{
|
2017-06-17 01:11:02 +03:00
|
|
|
conn: conn,
|
|
|
|
addr: addr,
|
2016-10-28 05:49:10 +03:00
|
|
|
|
|
|
|
inbound: inbound,
|
2017-02-22 12:10:07 +03:00
|
|
|
connReq: connReq,
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
server: server,
|
2016-01-17 06:03:03 +03:00
|
|
|
|
2017-10-19 01:16:03 +03:00
|
|
|
localFeatures: localFeatures,
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
sendQueue: make(chan outgoingMsg),
|
|
|
|
outgoingQueue: make(chan outgoingMsg),
|
2016-01-17 06:03:03 +03:00
|
|
|
|
2017-08-09 02:51:19 +03:00
|
|
|
activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
|
|
|
|
newChannels: make(chan *newChannelMsg, 1),
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
activeChanCloses: make(map[lnwire.ChannelID]*channelCloser),
|
|
|
|
localCloseChanReqs: make(chan *htlcswitch.ChanClose),
|
|
|
|
chanCloseMsgs: make(chan *closeMsg),
|
2018-03-13 13:03:55 +03:00
|
|
|
failedChannels: make(map[lnwire.ChannelID]struct{}),
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2016-01-17 06:03:03 +03:00
|
|
|
queueQuit: make(chan struct{}),
|
|
|
|
quit: make(chan struct{}),
|
|
|
|
}
|
2017-06-17 01:11:02 +03:00
|
|
|
copy(p.pubKeyBytes[:], nodePub.SerializeCompressed())
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
2017-04-12 07:59:45 +03:00
|
|
|
// Start starts all helper goroutines the peer needs for normal operations. In
|
|
|
|
// the case this peer has already been started, then this function is a loop.
|
2016-01-17 06:03:03 +03:00
|
|
|
func (p *peer) Start() error {
|
|
|
|
if atomic.AddInt32(&p.started, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
peerLog.Tracef("peer %v starting", p)
|
2016-01-17 06:03:03 +03:00
|
|
|
|
2017-03-17 05:45:10 +03:00
|
|
|
// Exchange local and global features, the init message should be very
|
|
|
|
// first between two nodes.
|
2017-02-16 15:39:38 +03:00
|
|
|
if err := p.sendInitMsg(); err != nil {
|
2017-04-24 05:24:28 +03:00
|
|
|
return fmt.Errorf("unable to send init msg: %v", err)
|
2017-02-16 15:39:38 +03:00
|
|
|
}
|
|
|
|
|
2017-03-17 05:45:10 +03:00
|
|
|
// Before we launch any of the helper goroutines off the peer struct,
|
2017-04-24 05:24:28 +03:00
|
|
|
// we'll first ensure proper adherence to the p2p protocol. The init
|
2017-03-17 05:45:10 +03:00
|
|
|
// message MUST be sent before any other message.
|
2017-03-30 04:33:20 +03:00
|
|
|
readErr := make(chan error, 1)
|
|
|
|
msgChan := make(chan lnwire.Message, 1)
|
2017-08-09 02:51:19 +03:00
|
|
|
p.wg.Add(1)
|
2017-03-30 04:33:20 +03:00
|
|
|
go func() {
|
2017-08-09 02:51:19 +03:00
|
|
|
defer p.wg.Done()
|
|
|
|
|
2017-04-20 02:23:17 +03:00
|
|
|
msg, err := p.readNextMessage()
|
2017-03-30 04:33:20 +03:00
|
|
|
if err != nil {
|
|
|
|
readErr <- err
|
|
|
|
msgChan <- nil
|
2017-08-11 04:07:45 +03:00
|
|
|
return
|
2017-03-30 04:33:20 +03:00
|
|
|
}
|
|
|
|
readErr <- nil
|
|
|
|
msgChan <- msg
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
2017-04-12 07:59:45 +03:00
|
|
|
// In order to avoid blocking indefinitely, we'll give the other peer
|
2017-04-14 00:48:38 +03:00
|
|
|
// an upper timeout of 15 seconds to respond before we bail out early.
|
|
|
|
case <-time.After(time.Second * 15):
|
2017-03-30 04:33:20 +03:00
|
|
|
return fmt.Errorf("peer did not complete handshake within 5 " +
|
|
|
|
"seconds")
|
|
|
|
case err := <-readErr:
|
|
|
|
if err != nil {
|
2017-04-24 05:24:28 +03:00
|
|
|
return fmt.Errorf("unable to read init msg: %v", err)
|
2017-03-30 04:33:20 +03:00
|
|
|
}
|
2017-02-16 15:39:38 +03:00
|
|
|
}
|
|
|
|
|
2017-05-11 03:37:59 +03:00
|
|
|
// Once the init message arrives, we can parse it so we can figure out
|
|
|
|
// the negotiation of features for this session.
|
2017-03-30 04:33:20 +03:00
|
|
|
msg := <-msgChan
|
2017-02-16 15:39:38 +03:00
|
|
|
if msg, ok := msg.(*lnwire.Init); ok {
|
|
|
|
if err := p.handleInitMsg(msg); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return errors.New("very first message between nodes " +
|
|
|
|
"must be init message")
|
|
|
|
}
|
|
|
|
|
2017-04-24 05:23:15 +03:00
|
|
|
// Fetch and then load all the active channels we have with this remote
|
|
|
|
// peer from the database.
|
|
|
|
activeChans, err := p.server.chanDB.FetchOpenChannels(p.addr.IdentityKey)
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("unable to fetch active chans "+
|
|
|
|
"for peer %v: %v", p, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, load all the active channels we have with this peer,
|
|
|
|
// registering them with the switch and launching the necessary
|
|
|
|
// goroutines required to operate them.
|
|
|
|
peerLog.Debugf("Loaded %v active channels from database with "+
|
2018-02-20 02:01:23 +03:00
|
|
|
"NodeKey(%x)", len(activeChans), p.PubKey())
|
2017-04-24 05:23:15 +03:00
|
|
|
if err := p.loadActiveChannels(activeChans); err != nil {
|
|
|
|
return fmt.Errorf("unable to load channels: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-11 03:37:59 +03:00
|
|
|
p.wg.Add(5)
|
|
|
|
go p.queueHandler()
|
|
|
|
go p.writeHandler()
|
|
|
|
go p.readHandler()
|
|
|
|
go p.channelManager()
|
|
|
|
go p.pingHandler()
|
|
|
|
|
2016-01-17 06:03:03 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-24 05:23:15 +03:00
|
|
|
// loadActiveChannels creates indexes within the peer for tracking all active
|
|
|
|
// channels returned by the database.
|
|
|
|
func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
|
|
|
|
for _, dbChan := range chans {
|
2018-01-17 07:25:34 +03:00
|
|
|
lnChan, err := lnwallet.NewLightningChannel(
|
2018-01-19 01:03:13 +03:00
|
|
|
p.server.cc.signer, p.server.witnessBeacon, dbChan,
|
2018-01-17 07:25:34 +03:00
|
|
|
)
|
2017-04-24 05:23:15 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-01-17 06:03:03 +03:00
|
|
|
|
2017-07-31 00:13:28 +03:00
|
|
|
chanPoint := &dbChan.FundingOutpoint
|
2017-10-03 02:30:17 +03:00
|
|
|
|
2017-07-31 00:13:28 +03:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
2017-04-24 05:23:15 +03:00
|
|
|
|
2018-03-13 13:03:55 +03:00
|
|
|
peerLog.Infof("NodeKey(%x) loading ChannelPoint(%v)",
|
|
|
|
p.PubKey(), chanPoint)
|
2017-04-24 05:23:15 +03:00
|
|
|
|
2017-12-18 08:45:35 +03:00
|
|
|
// Skip adding any permanently irreconcilable channels to the
|
|
|
|
// htlcswitch.
|
2018-07-31 12:31:28 +03:00
|
|
|
if dbChan.ChanStatus() != channeldb.Default {
|
2018-04-03 22:53:22 +03:00
|
|
|
peerLog.Warnf("ChannelPoint(%v) has status %v, won't "+
|
|
|
|
"start.", chanPoint, dbChan.ChanStatus)
|
2018-03-20 05:14:50 +03:00
|
|
|
lnChan.Stop()
|
2018-03-13 13:03:55 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Also skip adding any channel marked as `failed` for this
|
|
|
|
// session.
|
|
|
|
if _, ok := p.failedChannels[chanID]; ok {
|
|
|
|
peerLog.Warnf("ChannelPoint(%v) is failed, won't "+
|
|
|
|
"start.", chanPoint)
|
2018-03-20 05:14:50 +03:00
|
|
|
lnChan.Stop()
|
2017-12-18 08:45:35 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-08-03 07:15:49 +03:00
|
|
|
_, currentHeight, err := p.server.cc.chainIO.GetBestBlock()
|
|
|
|
if err != nil {
|
2018-03-20 05:14:50 +03:00
|
|
|
lnChan.Stop()
|
2017-08-03 07:15:49 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-08-22 09:53:21 +03:00
|
|
|
|
|
|
|
// Before we register this new link with the HTLC Switch, we'll
|
|
|
|
// need to fetch its current link-layer forwarding policy from
|
|
|
|
// the database.
|
|
|
|
graph := p.server.chanDB.ChannelGraph()
|
2017-08-23 21:32:50 +03:00
|
|
|
info, p1, p2, err := graph.FetchChannelEdgesByOutpoint(chanPoint)
|
2017-08-31 01:33:49 +03:00
|
|
|
if err != nil && err != channeldb.ErrEdgeNotFound {
|
2018-03-20 05:14:50 +03:00
|
|
|
lnChan.Stop()
|
2017-08-22 09:53:21 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll filter out our policy from the directional channel
|
|
|
|
// edges based whom the edge connects to. If it doesn't connect
|
|
|
|
// to us, then we know that we were the one that advertised the
|
|
|
|
// policy.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): can add helper method to get policy for
|
|
|
|
// particular channel.
|
|
|
|
var selfPolicy *channeldb.ChannelEdgePolicy
|
2018-01-31 07:30:00 +03:00
|
|
|
if info != nil && bytes.Equal(info.NodeKey1Bytes[:],
|
|
|
|
p.server.identityPriv.PubKey().SerializeCompressed()) {
|
|
|
|
|
2017-08-22 09:53:21 +03:00
|
|
|
selfPolicy = p1
|
|
|
|
} else {
|
|
|
|
selfPolicy = p2
|
|
|
|
}
|
|
|
|
|
2017-08-23 21:32:50 +03:00
|
|
|
// If we don't yet have an advertised routing policy, then
|
|
|
|
// we'll use the current default, otherwise we'll translate the
|
|
|
|
// routing policy into a forwarding policy.
|
|
|
|
var forwardingPolicy *htlcswitch.ForwardingPolicy
|
|
|
|
if selfPolicy != nil {
|
|
|
|
forwardingPolicy = &htlcswitch.ForwardingPolicy{
|
|
|
|
MinHTLC: selfPolicy.MinHTLC,
|
|
|
|
BaseFee: selfPolicy.FeeBaseMSat,
|
|
|
|
FeeRate: selfPolicy.FeeProportionalMillionths,
|
|
|
|
TimeLockDelta: uint32(selfPolicy.TimeLockDelta),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
forwardingPolicy = &p.server.cc.routingPolicy
|
2017-08-22 09:53:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
peerLog.Tracef("Using link policy of: %v", spew.Sdump(forwardingPolicy))
|
|
|
|
|
|
|
|
// Register this new channel link with the HTLC Switch. This is
|
|
|
|
// necessary to properly route multi-hop payments, and forward
|
|
|
|
// new payments triggered by RPC clients.
|
2018-01-21 07:25:54 +03:00
|
|
|
chainEvents, err := p.server.chainArb.SubscribeChannelEvents(
|
2018-04-18 14:57:02 +03:00
|
|
|
*chanPoint,
|
2018-01-21 07:25:54 +03:00
|
|
|
)
|
2018-01-19 01:03:13 +03:00
|
|
|
if err != nil {
|
2018-03-20 05:14:50 +03:00
|
|
|
lnChan.Stop()
|
2018-01-19 01:03:13 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-05-02 23:04:58 +03:00
|
|
|
|
2018-05-23 14:33:41 +03:00
|
|
|
// Create the link and add it to the switch.
|
2018-06-12 09:52:38 +03:00
|
|
|
err = p.addLink(
|
2018-06-12 05:05:00 +03:00
|
|
|
chanPoint, lnChan, forwardingPolicy, chainEvents,
|
|
|
|
currentHeight, true,
|
2018-06-12 09:52:38 +03:00
|
|
|
)
|
2018-05-23 14:33:41 +03:00
|
|
|
if err != nil {
|
2018-03-20 05:14:50 +03:00
|
|
|
lnChan.Stop()
|
2017-05-02 23:04:58 +03:00
|
|
|
return err
|
|
|
|
}
|
2018-06-12 09:52:38 +03:00
|
|
|
|
|
|
|
p.activeChanMtx.Lock()
|
|
|
|
p.activeChannels[chanID] = lnChan
|
|
|
|
p.activeChanMtx.Unlock()
|
2017-04-24 05:23:15 +03:00
|
|
|
}
|
2016-01-17 06:03:03 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-05-23 14:33:41 +03:00
|
|
|
// addLink creates and adds a new link from the specified channel.
|
|
|
|
func (p *peer) addLink(chanPoint *wire.OutPoint,
|
|
|
|
lnChan *lnwallet.LightningChannel,
|
|
|
|
forwardingPolicy *htlcswitch.ForwardingPolicy,
|
|
|
|
chainEvents *contractcourt.ChainEventSubscription,
|
|
|
|
currentHeight int32, syncStates bool) error {
|
|
|
|
|
2018-05-23 14:39:04 +03:00
|
|
|
// onChannelFailure will be called by the link in case the channel
|
|
|
|
// fails for some reason.
|
|
|
|
onChannelFailure := func(chanID lnwire.ChannelID,
|
|
|
|
shortChanID lnwire.ShortChannelID,
|
|
|
|
linkErr htlcswitch.LinkFailureError) {
|
|
|
|
|
2018-05-26 05:08:59 +03:00
|
|
|
select {
|
|
|
|
// If the server is already exiting, then none of the actions
|
|
|
|
// below can finish exiting, so we'll exit early as well.
|
|
|
|
case <-p.server.quit:
|
|
|
|
return
|
|
|
|
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2018-05-23 14:39:04 +03:00
|
|
|
// The link has notified us about a failure. We launch a go
|
|
|
|
// routine to stop the link, disconnect the peer and optionally
|
|
|
|
// force close the channel. We must launch a goroutine since we
|
|
|
|
// must let OnChannelFailure return in order for the link to
|
|
|
|
// completely stop in the call to RemoveLink.
|
2018-05-26 05:08:59 +03:00
|
|
|
p.server.wg.Add(1)
|
2018-05-23 14:39:04 +03:00
|
|
|
go func() {
|
2018-05-26 05:08:59 +03:00
|
|
|
defer p.server.wg.Done()
|
2018-05-23 14:39:04 +03:00
|
|
|
|
|
|
|
// We begin by removing the link from the switch, such
|
|
|
|
// that it won't be attempted used for any more
|
|
|
|
// updates.
|
|
|
|
// TODO(halseth): should introduce a way to atomically
|
|
|
|
// stop/pause the link and cancel back any adds in its
|
|
|
|
// mailboxes such that we can safely force close
|
|
|
|
// without the link being added again and updates being
|
|
|
|
// applied.
|
|
|
|
err := p.server.htlcSwitch.RemoveLink(chanID)
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("unable to stop link(%v): %v",
|
|
|
|
shortChanID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the error encountered was severe enough, we'll
|
|
|
|
// now force close the channel.
|
|
|
|
if linkErr.ForceClose {
|
|
|
|
peerLog.Warnf("Force closing link(%v)",
|
|
|
|
shortChanID)
|
|
|
|
|
|
|
|
closeTx, err := p.server.chainArb.ForceCloseContract(*chanPoint)
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("unable to force close "+
|
|
|
|
"link(%v): %v", shortChanID,
|
|
|
|
err)
|
|
|
|
} else {
|
|
|
|
peerLog.Infof("channel(%v) force "+
|
|
|
|
"closed with txid %v",
|
|
|
|
shortChanID, closeTx.TxHash())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send an error to the peer, why we failed the
|
|
|
|
// channel.
|
|
|
|
if linkErr.ShouldSendToPeer() {
|
|
|
|
// If SendData is set, send it to the peer. If
|
|
|
|
// not, we'll use the standard error messages
|
|
|
|
// in the payload. We only include sendData in
|
|
|
|
// the cases where the error data does not
|
|
|
|
// contain sensitive information.
|
|
|
|
data := []byte(linkErr.Error())
|
|
|
|
if linkErr.SendData != nil {
|
|
|
|
data = linkErr.SendData
|
|
|
|
}
|
2018-06-08 06:07:30 +03:00
|
|
|
err := p.SendMessage(true, &lnwire.Error{
|
2018-05-23 14:39:04 +03:00
|
|
|
ChanID: chanID,
|
|
|
|
Data: data,
|
2018-06-08 06:07:30 +03:00
|
|
|
})
|
2018-05-23 14:39:04 +03:00
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("unable to send msg to "+
|
|
|
|
"remote peer: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Initiate disconnection.
|
|
|
|
// TODO(halseth): consider not disconnecting the peer,
|
|
|
|
// as we might still have other active channels with
|
|
|
|
// the same peer.
|
|
|
|
p.Disconnect(linkErr)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2018-05-23 14:33:41 +03:00
|
|
|
linkCfg := htlcswitch.ChannelLinkConfig{
|
|
|
|
Peer: p,
|
|
|
|
DecodeHopIterators: p.server.sphinx.DecodeHopIterators,
|
|
|
|
ExtractErrorEncrypter: p.server.sphinx.ExtractErrorEncrypter,
|
|
|
|
FetchLastChannelUpdate: fetchLastChanUpdate(
|
|
|
|
p.server, p.PubKey(),
|
|
|
|
),
|
|
|
|
DebugHTLC: cfg.DebugHTLC,
|
|
|
|
HodlMask: cfg.Hodl.Mask(),
|
|
|
|
Registry: p.server.invoices,
|
|
|
|
Switch: p.server.htlcSwitch,
|
|
|
|
Circuits: p.server.htlcSwitch.CircuitModifier(),
|
|
|
|
ForwardPackets: p.server.htlcSwitch.ForwardPackets,
|
|
|
|
FwrdingPolicy: *forwardingPolicy,
|
|
|
|
FeeEstimator: p.server.cc.feeEstimator,
|
|
|
|
PreimageCache: p.server.witnessBeacon,
|
|
|
|
ChainEvents: chainEvents,
|
|
|
|
UpdateContractSignals: func(signals *contractcourt.ContractSignals) error {
|
|
|
|
return p.server.chainArb.UpdateContractSignals(
|
|
|
|
*chanPoint, signals,
|
|
|
|
)
|
|
|
|
},
|
2018-07-31 08:28:37 +03:00
|
|
|
OnChannelFailure: onChannelFailure,
|
|
|
|
SyncStates: syncStates,
|
|
|
|
BatchTicker: htlcswitch.NewBatchTicker(50 * time.Millisecond),
|
|
|
|
FwdPkgGCTicker: htlcswitch.NewBatchTicker(time.Minute),
|
2018-05-11 00:40:29 +03:00
|
|
|
BatchSize: 10,
|
|
|
|
UnsafeReplay: cfg.UnsafeReplay,
|
|
|
|
MinFeeUpdateTimeout: htlcswitch.DefaultMinLinkFeeUpdateTimeout,
|
|
|
|
MaxFeeUpdateTimeout: htlcswitch.DefaultMaxLinkFeeUpdateTimeout,
|
2018-05-23 14:33:41 +03:00
|
|
|
}
|
2018-05-11 00:40:29 +03:00
|
|
|
|
2018-06-01 06:31:40 +03:00
|
|
|
link := htlcswitch.NewChannelLink(linkCfg, lnChan)
|
2018-05-23 14:33:41 +03:00
|
|
|
|
|
|
|
// With the channel link created, we'll now notify the htlc switch so
|
|
|
|
// this channel can be used to dispatch local payments and also
|
|
|
|
// passively forward payments.
|
|
|
|
return p.server.htlcSwitch.AddLink(link)
|
|
|
|
}
|
|
|
|
|
2017-04-24 05:29:38 +03:00
|
|
|
// WaitForDisconnect waits until the peer has disconnected. A peer may be
|
|
|
|
// disconnected if the local or remote side terminating the connection, or an
|
|
|
|
// irrecoverable protocol error has been encountered.
|
|
|
|
func (p *peer) WaitForDisconnect() {
|
2018-05-08 06:31:22 +03:00
|
|
|
p.wg.Wait()
|
2017-04-24 05:29:38 +03:00
|
|
|
}
|
2017-02-21 09:06:16 +03:00
|
|
|
|
2016-07-14 02:40:01 +03:00
|
|
|
// Disconnect terminates the connection with the remote peer. Additionally, a
|
|
|
|
// signal is sent to the server and htlcSwitch indicating the resources
|
|
|
|
// allocated to the peer can now be cleaned up.
|
2017-07-12 16:44:17 +03:00
|
|
|
func (p *peer) Disconnect(reason error) {
|
2016-07-14 02:40:01 +03:00
|
|
|
if !atomic.CompareAndSwapInt32(&p.disconnect, 0, 1) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-07-12 16:44:17 +03:00
|
|
|
peerLog.Tracef("Disconnecting %s, reason: %v", p, reason)
|
2017-02-07 02:04:52 +03:00
|
|
|
|
|
|
|
// Ensure that the TCP connection is properly closed before continuing.
|
|
|
|
p.conn.Close()
|
2016-07-14 02:40:01 +03:00
|
|
|
|
|
|
|
close(p.quit)
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// String returns the string representation of this peer.
|
|
|
|
func (p *peer) String() string {
|
|
|
|
return p.conn.RemoteAddr().String()
|
|
|
|
}
|
|
|
|
|
|
|
|
// readNextMessage reads, and returns the next message on the wire along with
|
|
|
|
// any additional raw payload.
|
2017-04-20 02:23:17 +03:00
|
|
|
func (p *peer) readNextMessage() (lnwire.Message, error) {
|
2017-04-21 01:45:04 +03:00
|
|
|
noiseConn, ok := p.conn.(*brontide.Conn)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("brontide.Conn required to read messages")
|
|
|
|
}
|
|
|
|
|
|
|
|
// First we'll read the next _full_ message. We do this rather than
|
|
|
|
// reading incrementally from the stream as the Lightning wire protocol
|
|
|
|
// is message oriented and allows nodes to pad on additional data to
|
|
|
|
// the message stream.
|
|
|
|
rawMsg, err := noiseConn.ReadNextMessage()
|
|
|
|
atomic.AddUint64(&p.bytesReceived, uint64(len(rawMsg)))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, create a new io.Reader implementation from the raw message,
|
|
|
|
// and use this to decode the message directly from.
|
|
|
|
msgReader := bytes.NewReader(rawMsg)
|
|
|
|
nextMsg, err := lnwire.ReadMessage(msgReader, 0)
|
2016-01-14 08:41:46 +03:00
|
|
|
if err != nil {
|
2017-04-20 02:23:17 +03:00
|
|
|
return nil, err
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-01-15 04:52:05 +03:00
|
|
|
p.logWireMessage(nextMsg, true)
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-04-20 02:23:17 +03:00
|
|
|
return nextMsg, nil
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
// msgStream implements a goroutine-safe, in-order stream of messages to be
|
|
|
|
// delivered via closure to a receiver. These messages MUST be in order due to
|
|
|
|
// the nature of the lightning channel commitment and gossiper state machines.
|
|
|
|
// TODO(conner): use stream handler interface to abstract out stream
|
|
|
|
// state/logging
|
|
|
|
type msgStream struct {
|
2018-06-01 01:41:41 +03:00
|
|
|
streamShutdown int32 // To be used atomically.
|
2018-01-09 05:41:15 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
peer *peer
|
2017-08-01 07:20:36 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
apply func(lnwire.Message)
|
2017-08-01 07:20:36 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
startMsg string
|
|
|
|
stopMsg string
|
2017-08-01 07:20:36 +03:00
|
|
|
|
|
|
|
msgCond *sync.Cond
|
|
|
|
msgs []lnwire.Message
|
|
|
|
|
|
|
|
mtx sync.Mutex
|
|
|
|
|
2018-03-13 02:34:03 +03:00
|
|
|
bufSize uint32
|
|
|
|
producerSema chan struct{}
|
|
|
|
|
2017-08-09 02:51:19 +03:00
|
|
|
wg sync.WaitGroup
|
2017-08-01 07:20:36 +03:00
|
|
|
quit chan struct{}
|
|
|
|
}
|
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
// newMsgStream creates a new instance of a chanMsgStream for a particular
|
2018-03-13 02:34:03 +03:00
|
|
|
// channel identified by its channel ID. bufSize is the max number of messages
|
|
|
|
// that should be buffered in the internal queue. Callers should set this to a
|
|
|
|
// sane value that avoids blocking unnecessarily, but doesn't allow an
|
|
|
|
// unbounded amount of memory to be allocated to buffer incoming messages.
|
|
|
|
func newMsgStream(p *peer, startMsg, stopMsg string, bufSize uint32,
|
2017-11-02 01:50:55 +03:00
|
|
|
apply func(lnwire.Message)) *msgStream {
|
|
|
|
|
|
|
|
stream := &msgStream{
|
2018-03-13 02:34:03 +03:00
|
|
|
peer: p,
|
|
|
|
apply: apply,
|
|
|
|
startMsg: startMsg,
|
|
|
|
stopMsg: stopMsg,
|
|
|
|
producerSema: make(chan struct{}, bufSize),
|
|
|
|
quit: make(chan struct{}),
|
2017-08-01 07:20:36 +03:00
|
|
|
}
|
|
|
|
stream.msgCond = sync.NewCond(&stream.mtx)
|
|
|
|
|
2018-03-13 02:34:03 +03:00
|
|
|
// Before we return the active stream, we'll populate the producer's
|
|
|
|
// semaphore channel. We'll use this to ensure that the producer won't
|
|
|
|
// attempt to allocate memory in the queue for an item until it has
|
|
|
|
// sufficient extra space.
|
|
|
|
for i := uint32(0); i < bufSize; i++ {
|
|
|
|
stream.producerSema <- struct{}{}
|
|
|
|
}
|
|
|
|
|
2017-08-01 07:20:36 +03:00
|
|
|
return stream
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start starts the chanMsgStream.
|
2017-11-02 01:50:55 +03:00
|
|
|
func (ms *msgStream) Start() {
|
|
|
|
ms.wg.Add(1)
|
|
|
|
go ms.msgConsumer()
|
2017-08-01 07:20:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stop stops the chanMsgStream.
|
2017-11-02 01:50:55 +03:00
|
|
|
func (ms *msgStream) Stop() {
|
2017-08-01 07:20:36 +03:00
|
|
|
// TODO(roasbeef): signal too?
|
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
close(ms.quit)
|
2017-08-01 07:31:16 +03:00
|
|
|
|
2018-01-09 05:41:15 +03:00
|
|
|
// Now that we've closed the channel, we'll repeatedly signal the msg
|
|
|
|
// consumer until we've detected that it has exited.
|
|
|
|
for atomic.LoadInt32(&ms.streamShutdown) == 0 {
|
|
|
|
ms.msgCond.Signal()
|
|
|
|
time.Sleep(time.Millisecond * 100)
|
|
|
|
}
|
2017-08-09 02:51:19 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
ms.wg.Wait()
|
2017-08-01 07:20:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// msgConsumer is the main goroutine that streams messages from the peer's
|
|
|
|
// readHandler directly to the target channel.
|
2017-11-02 01:50:55 +03:00
|
|
|
func (ms *msgStream) msgConsumer() {
|
|
|
|
defer ms.wg.Done()
|
|
|
|
defer peerLog.Tracef(ms.stopMsg)
|
2017-08-09 02:51:19 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
peerLog.Tracef(ms.startMsg)
|
2017-08-01 07:20:36 +03:00
|
|
|
|
|
|
|
for {
|
|
|
|
// First, we'll check our condition. If the queue of messages
|
|
|
|
// is empty, then we'll wait until a new item is added.
|
2017-11-02 01:50:55 +03:00
|
|
|
ms.msgCond.L.Lock()
|
|
|
|
for len(ms.msgs) == 0 {
|
|
|
|
ms.msgCond.Wait()
|
2017-08-01 07:31:16 +03:00
|
|
|
|
2018-01-09 05:41:15 +03:00
|
|
|
// If we woke up in order to exit, then we'll do so.
|
|
|
|
// Otherwise, we'll check the message queue for any new
|
|
|
|
// items.
|
2017-08-01 07:31:16 +03:00
|
|
|
select {
|
2017-11-02 01:50:55 +03:00
|
|
|
case <-ms.quit:
|
|
|
|
ms.msgCond.L.Unlock()
|
2018-01-09 05:41:15 +03:00
|
|
|
atomic.StoreInt32(&ms.streamShutdown, 1)
|
2017-08-01 07:31:16 +03:00
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2017-08-01 07:20:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Grab the message off the front of the queue, shifting the
|
|
|
|
// slice's reference down one in order to remove the message
|
|
|
|
// from the queue.
|
2017-11-02 01:50:55 +03:00
|
|
|
msg := ms.msgs[0]
|
|
|
|
ms.msgs[0] = nil // Set to nil to prevent GC leak.
|
|
|
|
ms.msgs = ms.msgs[1:]
|
|
|
|
|
|
|
|
ms.msgCond.L.Unlock()
|
2017-08-01 07:20:36 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
ms.apply(msg)
|
2018-03-13 02:34:03 +03:00
|
|
|
|
|
|
|
// We've just successfully processed an item, so we'll signal
|
|
|
|
// to the producer that a new slot in the buffer. We'll use
|
|
|
|
// this to bound the size of the buffer to avoid allowing it to
|
|
|
|
// grow indefinitely.
|
|
|
|
select {
|
|
|
|
case ms.producerSema <- struct{}{}:
|
|
|
|
case <-ms.quit:
|
2018-05-04 01:45:22 +03:00
|
|
|
atomic.StoreInt32(&ms.streamShutdown, 1)
|
2018-03-13 02:34:03 +03:00
|
|
|
return
|
|
|
|
}
|
2017-08-01 07:20:36 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
// AddMsg adds a new message to the msgStream. This function is safe for
|
2017-08-01 07:20:36 +03:00
|
|
|
// concurrent access.
|
2017-11-02 01:50:55 +03:00
|
|
|
func (ms *msgStream) AddMsg(msg lnwire.Message) {
|
2018-03-13 02:34:03 +03:00
|
|
|
// First, we'll attempt to receive from the producerSema struct. This
|
|
|
|
// acts as a sempahore to prevent us from indefinitely buffering
|
|
|
|
// incoming items from the wire. Either the msg queue isn't full, and
|
|
|
|
// we'll not block, or the queue is full, and we'll block until either
|
|
|
|
// we're signalled to quit, or a slot is freed up.
|
|
|
|
select {
|
|
|
|
case <-ms.producerSema:
|
|
|
|
case <-ms.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll lock the condition, and add the message to the end of
|
2017-08-01 07:20:36 +03:00
|
|
|
// the message queue.
|
2017-11-02 01:50:55 +03:00
|
|
|
ms.msgCond.L.Lock()
|
|
|
|
ms.msgs = append(ms.msgs, msg)
|
|
|
|
ms.msgCond.L.Unlock()
|
2017-08-01 07:20:36 +03:00
|
|
|
|
|
|
|
// With the message added, we signal to the msgConsumer that there are
|
|
|
|
// additional messages to consume.
|
2017-11-02 01:50:55 +03:00
|
|
|
ms.msgCond.Signal()
|
|
|
|
}
|
|
|
|
|
|
|
|
// newChanMsgStream is used to create a msgStream between the peer and
|
|
|
|
// particular channel link in the htlcswitch. We utilize additional
|
|
|
|
// synchronization with the fundingManager to ensure we don't attempt to
|
|
|
|
// dispatch a message to a channel before it is fully active. A reference to the
|
|
|
|
// channel this stream forwards to his held in scope to prevent unnecessary
|
|
|
|
// lookups.
|
|
|
|
func newChanMsgStream(p *peer, cid lnwire.ChannelID) *msgStream {
|
|
|
|
|
|
|
|
var chanLink htlcswitch.ChannelLink
|
|
|
|
|
|
|
|
return newMsgStream(p,
|
2018-01-09 05:41:15 +03:00
|
|
|
fmt.Sprintf("Update stream for ChannelID(%x) created", cid[:]),
|
|
|
|
fmt.Sprintf("Update stream for ChannelID(%x) exiting", cid[:]),
|
2018-03-13 02:34:03 +03:00
|
|
|
1000,
|
2017-11-02 01:50:55 +03:00
|
|
|
func(msg lnwire.Message) {
|
2017-12-07 03:30:50 +03:00
|
|
|
_, isChanSycMsg := msg.(*lnwire.ChannelReestablish)
|
|
|
|
|
2017-12-18 05:40:05 +03:00
|
|
|
// If this is the chanSync message, then we'll deliver
|
|
|
|
// it immediately to the active link.
|
2017-12-07 03:30:50 +03:00
|
|
|
if !isChanSycMsg {
|
|
|
|
// We'll send a message to the funding manager
|
|
|
|
// and wait iff an active funding process for
|
|
|
|
// this channel hasn't yet completed. We do
|
|
|
|
// this in order to account for the following
|
|
|
|
// scenario: we send the funding locked message
|
|
|
|
// to the other side, they immediately send a
|
|
|
|
// channel update message, but we haven't yet
|
|
|
|
// sent the channel to the channelManager.
|
|
|
|
p.server.fundingMgr.waitUntilChannelOpen(cid)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(roasbeef): only wait if not chan sync
|
2017-11-02 01:50:55 +03:00
|
|
|
|
|
|
|
// Dispatch the commitment update message to the proper active
|
|
|
|
// goroutine dedicated to this channel.
|
|
|
|
if chanLink == nil {
|
|
|
|
link, err := p.server.htlcSwitch.GetLink(cid)
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("recv'd update for unknown "+
|
|
|
|
"channel %v from %v", cid, p)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
chanLink = link
|
|
|
|
}
|
|
|
|
|
|
|
|
chanLink.HandleChannelUpdate(msg)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// newDiscMsgStream is used to setup a msgStream between the peer and the
|
|
|
|
// authenticated gossiper. This stream should be used to forward all remote
|
|
|
|
// channel announcements.
|
|
|
|
func newDiscMsgStream(p *peer) *msgStream {
|
|
|
|
return newMsgStream(p,
|
|
|
|
"Update stream for gossiper created",
|
|
|
|
"Update stream for gossiper exited",
|
2018-03-13 02:34:03 +03:00
|
|
|
1000,
|
2017-11-02 01:50:55 +03:00
|
|
|
func(msg lnwire.Message) {
|
2018-06-08 06:07:30 +03:00
|
|
|
p.server.authGossiper.ProcessRemoteAnnouncement(msg, p)
|
2017-11-02 01:50:55 +03:00
|
|
|
},
|
|
|
|
)
|
2017-08-01 07:20:36 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// readHandler is responsible for reading messages off the wire in series, then
|
2017-01-13 08:01:50 +03:00
|
|
|
// properly dispatching the handling of the message to the proper subsystem.
|
2016-06-21 22:32:32 +03:00
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
|
|
|
func (p *peer) readHandler() {
|
2017-08-09 02:51:19 +03:00
|
|
|
|
2017-10-16 01:13:27 +03:00
|
|
|
// We'll stop the timer after a new messages is received, and also
|
|
|
|
// reset it after we process the next message.
|
|
|
|
idleTimer := time.AfterFunc(idleTimeout, func() {
|
|
|
|
err := fmt.Errorf("Peer %s no answer for %s -- disconnecting",
|
|
|
|
p, idleTimeout)
|
|
|
|
p.Disconnect(err)
|
|
|
|
})
|
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
discStream := newDiscMsgStream(p)
|
|
|
|
discStream.Start()
|
|
|
|
defer discStream.Stop()
|
|
|
|
|
|
|
|
chanMsgStreams := make(map[lnwire.ChannelID]*msgStream)
|
2016-01-14 08:41:46 +03:00
|
|
|
out:
|
|
|
|
for atomic.LoadInt32(&p.disconnect) == 0 {
|
2017-04-20 02:23:17 +03:00
|
|
|
nextMsg, err := p.readNextMessage()
|
2017-10-16 01:13:27 +03:00
|
|
|
idleTimer.Stop()
|
2016-01-14 08:41:46 +03:00
|
|
|
if err != nil {
|
2017-01-24 07:33:18 +03:00
|
|
|
peerLog.Infof("unable to read message from %v: %v",
|
|
|
|
p, err)
|
2017-01-17 05:03:34 +03:00
|
|
|
|
|
|
|
switch err.(type) {
|
|
|
|
// If this is just a message we don't yet recognize,
|
|
|
|
// we'll continue processing as normal as this allows
|
|
|
|
// us to introduce new messages in a forwards
|
|
|
|
// compatible manner.
|
|
|
|
case *lnwire.UnknownMessage:
|
2017-10-16 01:13:27 +03:00
|
|
|
idleTimer.Reset(idleTimeout)
|
2017-01-17 05:03:34 +03:00
|
|
|
continue
|
|
|
|
|
2018-03-24 01:49:25 +03:00
|
|
|
// If they sent us an address type that we don't yet
|
|
|
|
// know of, then this isn't a dire error, so we'll
|
|
|
|
// simply continue parsing the remainder of their
|
|
|
|
// messages.
|
|
|
|
case *lnwire.ErrUnknownAddrType:
|
|
|
|
idleTimer.Reset(idleTimeout)
|
|
|
|
continue
|
|
|
|
|
2017-01-17 05:03:34 +03:00
|
|
|
// If the error we encountered wasn't just a message we
|
|
|
|
// didn't recognize, then we'll stop all processing s
|
|
|
|
// this is a fatal error.
|
|
|
|
default:
|
|
|
|
break out
|
|
|
|
}
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-02-21 05:10:05 +03:00
|
|
|
var (
|
|
|
|
isChanUpdate bool
|
2017-04-17 01:41:11 +03:00
|
|
|
targetChan lnwire.ChannelID
|
2017-02-21 05:10:05 +03:00
|
|
|
)
|
2016-07-13 03:45:29 +03:00
|
|
|
|
2016-01-14 08:41:46 +03:00
|
|
|
switch msg := nextMsg.(type) {
|
2017-01-26 05:20:55 +03:00
|
|
|
case *lnwire.Pong:
|
|
|
|
// When we receive a Pong message in response to our
|
|
|
|
// last ping message, we'll use the time in which we
|
|
|
|
// sent the ping message to measure a rough estimate of
|
|
|
|
// round trip time.
|
|
|
|
pingSendTime := atomic.LoadInt64(&p.pingLastSend)
|
|
|
|
delay := (time.Now().UnixNano() - pingSendTime) / 1000
|
|
|
|
atomic.StoreInt64(&p.pingTime, delay)
|
|
|
|
|
2016-11-11 04:15:25 +03:00
|
|
|
case *lnwire.Ping:
|
2017-04-17 04:11:39 +03:00
|
|
|
pongBytes := make([]byte, msg.NumPongBytes)
|
|
|
|
p.queueMsg(lnwire.NewPong(pongBytes), nil)
|
2016-11-11 04:15:25 +03:00
|
|
|
|
2017-07-31 00:13:28 +03:00
|
|
|
case *lnwire.OpenChannel:
|
2018-07-05 23:41:51 +03:00
|
|
|
p.server.fundingMgr.processFundingOpen(msg, p)
|
2017-07-31 00:13:28 +03:00
|
|
|
case *lnwire.AcceptChannel:
|
2018-07-05 23:41:51 +03:00
|
|
|
p.server.fundingMgr.processFundingAccept(msg, p)
|
2017-07-31 00:13:28 +03:00
|
|
|
case *lnwire.FundingCreated:
|
2018-07-05 23:41:51 +03:00
|
|
|
p.server.fundingMgr.processFundingCreated(msg, p)
|
2017-07-31 00:13:28 +03:00
|
|
|
case *lnwire.FundingSigned:
|
2018-07-05 23:41:51 +03:00
|
|
|
p.server.fundingMgr.processFundingSigned(msg, p)
|
2017-01-31 05:45:28 +03:00
|
|
|
case *lnwire.FundingLocked:
|
2018-07-05 23:41:51 +03:00
|
|
|
p.server.fundingMgr.processFundingLocked(msg, p)
|
2017-03-25 04:26:09 +03:00
|
|
|
|
|
|
|
case *lnwire.Shutdown:
|
2017-09-28 06:18:20 +03:00
|
|
|
select {
|
2017-11-23 10:21:07 +03:00
|
|
|
case p.chanCloseMsgs <- &closeMsg{msg.ChannelID, msg}:
|
2017-09-28 06:18:20 +03:00
|
|
|
case <-p.quit:
|
|
|
|
break out
|
|
|
|
}
|
2017-03-09 02:32:11 +03:00
|
|
|
case *lnwire.ClosingSigned:
|
2017-09-28 06:18:20 +03:00
|
|
|
select {
|
2017-11-23 10:21:07 +03:00
|
|
|
case p.chanCloseMsgs <- &closeMsg{msg.ChannelID, msg}:
|
2017-09-28 06:18:20 +03:00
|
|
|
case <-p.quit:
|
|
|
|
break out
|
|
|
|
}
|
2016-10-15 16:24:56 +03:00
|
|
|
|
2017-04-17 01:41:11 +03:00
|
|
|
case *lnwire.Error:
|
2018-07-05 23:38:11 +03:00
|
|
|
key := p.addr.IdentityKey
|
2018-03-01 02:40:32 +03:00
|
|
|
|
2018-07-05 23:38:11 +03:00
|
|
|
switch {
|
2018-03-01 02:40:32 +03:00
|
|
|
// In the case of an all-zero channel ID we want to
|
|
|
|
// forward the error to all channels with this peer.
|
|
|
|
case msg.ChanID == lnwire.ConnectionWideID:
|
2018-03-13 13:03:55 +03:00
|
|
|
for chanID, chanStream := range chanMsgStreams {
|
2018-03-01 02:40:32 +03:00
|
|
|
chanStream.AddMsg(nextMsg)
|
2018-03-13 13:03:55 +03:00
|
|
|
|
|
|
|
// Also marked this channel as failed,
|
|
|
|
// so we won't try to restart it on
|
|
|
|
// reconnect with this peer.
|
|
|
|
p.failedChannels[chanID] = struct{}{}
|
2018-03-01 02:40:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the channel ID for the error message corresponds
|
|
|
|
// to a pending channel, then the funding manager will
|
|
|
|
// handle the error.
|
2018-07-05 23:38:11 +03:00
|
|
|
case p.server.fundingMgr.IsPendingChannel(msg.ChanID, key):
|
|
|
|
p.server.fundingMgr.processFundingError(msg, key)
|
2018-03-01 02:40:32 +03:00
|
|
|
|
|
|
|
// If not we hand the error to the channel link for
|
|
|
|
// this channel.
|
|
|
|
default:
|
|
|
|
isChanUpdate = true
|
|
|
|
targetChan = msg.ChanID
|
2018-03-13 13:03:55 +03:00
|
|
|
|
|
|
|
// Also marked this channel as failed, so we
|
|
|
|
// won't try to restart it on reconnect with
|
|
|
|
// this peer.
|
|
|
|
p.failedChannels[targetChan] = struct{}{}
|
2018-03-01 02:40:32 +03:00
|
|
|
}
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-01-08 07:23:06 +03:00
|
|
|
// TODO(roasbeef): create ChanUpdater interface for the below
|
2017-02-21 05:10:05 +03:00
|
|
|
case *lnwire.UpdateAddHTLC:
|
2016-10-15 16:18:38 +03:00
|
|
|
isChanUpdate = true
|
2017-04-17 01:41:11 +03:00
|
|
|
targetChan = msg.ChanID
|
2018-02-07 06:11:11 +03:00
|
|
|
case *lnwire.UpdateFulfillHTLC:
|
2016-10-15 16:18:38 +03:00
|
|
|
isChanUpdate = true
|
2017-12-12 22:22:44 +03:00
|
|
|
targetChan = msg.ChanID
|
|
|
|
case *lnwire.UpdateFailMalformedHTLC:
|
|
|
|
isChanUpdate = true
|
2017-04-17 01:41:11 +03:00
|
|
|
targetChan = msg.ChanID
|
2017-02-21 05:10:05 +03:00
|
|
|
case *lnwire.UpdateFailHTLC:
|
2017-01-08 07:23:06 +03:00
|
|
|
isChanUpdate = true
|
2017-04-17 01:41:11 +03:00
|
|
|
targetChan = msg.ChanID
|
2017-02-21 05:10:05 +03:00
|
|
|
case *lnwire.RevokeAndAck:
|
2016-10-15 16:18:38 +03:00
|
|
|
isChanUpdate = true
|
2017-04-17 01:41:11 +03:00
|
|
|
targetChan = msg.ChanID
|
2017-02-21 05:10:05 +03:00
|
|
|
case *lnwire.CommitSig:
|
2016-10-15 16:18:38 +03:00
|
|
|
isChanUpdate = true
|
2017-04-17 01:41:11 +03:00
|
|
|
targetChan = msg.ChanID
|
2017-07-14 21:43:20 +03:00
|
|
|
case *lnwire.UpdateFee:
|
|
|
|
isChanUpdate = true
|
|
|
|
targetChan = msg.ChanID
|
2017-07-09 02:30:20 +03:00
|
|
|
case *lnwire.ChannelReestablish:
|
|
|
|
isChanUpdate = true
|
|
|
|
targetChan = msg.ChanID
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-04-20 02:23:17 +03:00
|
|
|
case *lnwire.ChannelUpdate,
|
2016-12-27 08:42:23 +03:00
|
|
|
*lnwire.ChannelAnnouncement,
|
2017-03-28 22:08:14 +03:00
|
|
|
*lnwire.NodeAnnouncement,
|
2018-04-17 05:09:57 +03:00
|
|
|
*lnwire.AnnounceSignatures,
|
|
|
|
*lnwire.GossipTimestampRange,
|
|
|
|
*lnwire.QueryShortChanIDs,
|
|
|
|
*lnwire.QueryChannelRange,
|
|
|
|
*lnwire.ReplyChannelRange,
|
|
|
|
*lnwire.ReplyShortChanIDsEnd:
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
discStream.AddMsg(msg)
|
|
|
|
|
2017-03-28 22:08:14 +03:00
|
|
|
default:
|
2017-10-17 04:43:14 +03:00
|
|
|
peerLog.Errorf("unknown message %v received from peer "+
|
|
|
|
"%v", uint16(msg.MsgType()), p)
|
2016-07-13 03:45:29 +03:00
|
|
|
}
|
|
|
|
|
2016-10-15 16:18:38 +03:00
|
|
|
if isChanUpdate {
|
2017-08-01 07:25:46 +03:00
|
|
|
// If this is a channel update, then we need to feed it
|
|
|
|
// into the channel's in-order message stream.
|
|
|
|
chanStream, ok := chanMsgStreams[targetChan]
|
|
|
|
if !ok {
|
|
|
|
// If a stream hasn't yet been created, then
|
|
|
|
// we'll do so, add it to the map, and finally
|
|
|
|
// start it.
|
2017-11-02 01:50:55 +03:00
|
|
|
chanStream = newChanMsgStream(p, targetChan)
|
2017-08-01 07:25:46 +03:00
|
|
|
chanMsgStreams[targetChan] = chanStream
|
|
|
|
chanStream.Start()
|
2016-07-13 03:45:29 +03:00
|
|
|
}
|
2017-04-17 01:45:18 +03:00
|
|
|
|
2017-08-01 07:25:46 +03:00
|
|
|
// With the stream obtained, add the message to the
|
|
|
|
// stream so we can continue processing message.
|
|
|
|
chanStream.AddMsg(nextMsg)
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
2017-10-16 01:13:27 +03:00
|
|
|
|
|
|
|
idleTimer.Reset(idleTimeout)
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-09-28 06:22:40 +03:00
|
|
|
p.wg.Done()
|
|
|
|
|
2017-07-12 16:44:17 +03:00
|
|
|
p.Disconnect(errors.New("read handler closed"))
|
2016-07-14 02:40:01 +03:00
|
|
|
|
2017-08-03 23:52:02 +03:00
|
|
|
for cid, chanStream := range chanMsgStreams {
|
2017-08-01 07:25:46 +03:00
|
|
|
chanStream.Stop()
|
2017-08-03 23:52:02 +03:00
|
|
|
|
|
|
|
delete(chanMsgStreams, cid)
|
2017-08-01 07:25:46 +03:00
|
|
|
}
|
|
|
|
|
2016-07-14 02:40:01 +03:00
|
|
|
peerLog.Tracef("readHandler for peer %v done", p)
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-10-18 05:13:19 +03:00
|
|
|
// messageSummary returns a human-readable string that summarizes a
|
|
|
|
// incoming/outgoing message. Not all messages will have a summary, only those
|
|
|
|
// which have additional data that can be informative at a glance.
|
|
|
|
func messageSummary(msg lnwire.Message) string {
|
|
|
|
switch msg := msg.(type) {
|
|
|
|
case *lnwire.Init:
|
|
|
|
// No summary.
|
|
|
|
return ""
|
|
|
|
|
|
|
|
case *lnwire.OpenChannel:
|
2017-10-18 05:35:10 +03:00
|
|
|
return fmt.Sprintf("temp_chan_id=%x, chain=%v, csv=%v, amt=%v, "+
|
2017-10-18 05:13:19 +03:00
|
|
|
"push_amt=%v, reserve=%v, flags=%v",
|
2017-10-18 05:35:10 +03:00
|
|
|
msg.PendingChannelID[:], msg.ChainHash,
|
2017-10-18 05:13:19 +03:00
|
|
|
msg.CsvDelay, msg.FundingAmount, msg.PushAmount,
|
|
|
|
msg.ChannelReserve, msg.ChannelFlags)
|
|
|
|
|
|
|
|
case *lnwire.AcceptChannel:
|
2017-11-28 02:32:06 +03:00
|
|
|
return fmt.Sprintf("temp_chan_id=%x, reserve=%v, csv=%v, num_confs=%v",
|
|
|
|
msg.PendingChannelID[:], msg.ChannelReserve, msg.CsvDelay,
|
|
|
|
msg.MinAcceptDepth)
|
2017-10-18 05:13:19 +03:00
|
|
|
|
|
|
|
case *lnwire.FundingCreated:
|
|
|
|
return fmt.Sprintf("temp_chan_id=%x, chan_point=%v",
|
|
|
|
msg.PendingChannelID[:], msg.FundingPoint)
|
|
|
|
|
|
|
|
case *lnwire.FundingSigned:
|
|
|
|
return fmt.Sprintf("chan_id=%v", msg.ChanID)
|
|
|
|
|
|
|
|
case *lnwire.FundingLocked:
|
|
|
|
return fmt.Sprintf("chan_id=%v, next_point=%x",
|
|
|
|
msg.ChanID, msg.NextPerCommitmentPoint.SerializeCompressed())
|
|
|
|
|
|
|
|
case *lnwire.Shutdown:
|
|
|
|
return fmt.Sprintf("chan_id=%v, script=%x", msg.ChannelID,
|
|
|
|
msg.Address[:])
|
|
|
|
|
|
|
|
case *lnwire.ClosingSigned:
|
|
|
|
return fmt.Sprintf("chan_id=%v, fee_sat=%v", msg.ChannelID,
|
|
|
|
msg.FeeSatoshis)
|
|
|
|
|
|
|
|
case *lnwire.UpdateAddHTLC:
|
|
|
|
return fmt.Sprintf("chan_id=%v, id=%v, amt=%v, expiry=%v, hash=%x",
|
|
|
|
msg.ChanID, msg.ID, msg.Amount, msg.Expiry, msg.PaymentHash[:])
|
|
|
|
|
|
|
|
case *lnwire.UpdateFailHTLC:
|
2017-10-21 01:40:50 +03:00
|
|
|
return fmt.Sprintf("chan_id=%v, id=%v, reason=%x", msg.ChanID,
|
2017-10-18 05:13:19 +03:00
|
|
|
msg.ID, msg.Reason)
|
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
case *lnwire.UpdateFulfillHTLC:
|
2017-10-18 05:13:19 +03:00
|
|
|
return fmt.Sprintf("chan_id=%v, id=%v, pre_image=%x",
|
|
|
|
msg.ChanID, msg.ID, msg.PaymentPreimage[:])
|
|
|
|
|
|
|
|
case *lnwire.CommitSig:
|
|
|
|
return fmt.Sprintf("chan_id=%v, num_htlcs=%v", msg.ChanID,
|
|
|
|
len(msg.HtlcSigs))
|
|
|
|
|
|
|
|
case *lnwire.RevokeAndAck:
|
|
|
|
return fmt.Sprintf("chan_id=%v, rev=%x, next_point=%x",
|
|
|
|
msg.ChanID, msg.Revocation[:],
|
|
|
|
msg.NextRevocationKey.SerializeCompressed())
|
|
|
|
|
|
|
|
case *lnwire.UpdateFailMalformedHTLC:
|
|
|
|
return fmt.Sprintf("chan_id=%v, id=%v, fail_code=%v",
|
|
|
|
msg.ChanID, msg.ID, msg.FailureCode)
|
|
|
|
|
|
|
|
case *lnwire.Error:
|
2017-12-01 09:19:12 +03:00
|
|
|
return fmt.Sprintf("chan_id=%v, err=%v", msg.ChanID, string(msg.Data))
|
2017-10-18 05:13:19 +03:00
|
|
|
|
|
|
|
case *lnwire.AnnounceSignatures:
|
|
|
|
return fmt.Sprintf("chan_id=%v, short_chan_id=%v", msg.ChannelID,
|
|
|
|
msg.ShortChannelID.ToUint64())
|
|
|
|
|
|
|
|
case *lnwire.ChannelAnnouncement:
|
2017-10-18 05:35:10 +03:00
|
|
|
return fmt.Sprintf("chain_hash=%v, short_chan_id=%v",
|
|
|
|
msg.ChainHash, msg.ShortChannelID.ToUint64())
|
2017-10-18 05:13:19 +03:00
|
|
|
|
|
|
|
case *lnwire.ChannelUpdate:
|
2017-11-23 10:21:07 +03:00
|
|
|
return fmt.Sprintf("chain_hash=%v, short_chan_id=%v, flag=%v, "+
|
|
|
|
"update_time=%v", msg.ChainHash,
|
|
|
|
msg.ShortChannelID.ToUint64(), msg.Flags,
|
2017-10-18 05:13:19 +03:00
|
|
|
time.Unix(int64(msg.Timestamp), 0))
|
|
|
|
|
|
|
|
case *lnwire.NodeAnnouncement:
|
|
|
|
return fmt.Sprintf("node=%x, update_time=%v",
|
2018-01-31 07:30:00 +03:00
|
|
|
msg.NodeID, time.Unix(int64(msg.Timestamp), 0))
|
2017-10-18 05:13:19 +03:00
|
|
|
|
|
|
|
case *lnwire.Ping:
|
|
|
|
// No summary.
|
|
|
|
return ""
|
|
|
|
|
|
|
|
case *lnwire.Pong:
|
|
|
|
// No summary.
|
|
|
|
return ""
|
|
|
|
|
|
|
|
case *lnwire.UpdateFee:
|
|
|
|
return fmt.Sprintf("chan_id=%v, fee_update_sat=%v",
|
|
|
|
msg.ChanID, int64(msg.FeePerKw))
|
2017-11-11 02:40:15 +03:00
|
|
|
|
|
|
|
case *lnwire.ChannelReestablish:
|
|
|
|
return fmt.Sprintf("next_local_height=%v, remote_tail_height=%v",
|
|
|
|
msg.NextLocalCommitHeight, msg.RemoteCommitTailHeight)
|
2018-04-17 05:10:15 +03:00
|
|
|
|
|
|
|
case *lnwire.ReplyShortChanIDsEnd:
|
|
|
|
return fmt.Sprintf("chain_hash=%v, complete=%v", msg.ChainHash,
|
|
|
|
msg.Complete)
|
|
|
|
|
|
|
|
case *lnwire.ReplyChannelRange:
|
|
|
|
return fmt.Sprintf("complete=%v, encoding=%v, num_chans=%v",
|
|
|
|
msg.Complete, msg.EncodingType, len(msg.ShortChanIDs))
|
|
|
|
|
|
|
|
case *lnwire.QueryShortChanIDs:
|
|
|
|
return fmt.Sprintf("chain_hash=%v, encoding=%v, num_chans=%v",
|
|
|
|
msg.ChainHash, msg.EncodingType, len(msg.ShortChanIDs))
|
|
|
|
|
|
|
|
case *lnwire.QueryChannelRange:
|
|
|
|
return fmt.Sprintf("chain_hash=%v, start_height=%v, "+
|
|
|
|
"num_blocks=%v", msg.ChainHash, msg.FirstBlockHeight,
|
|
|
|
msg.NumBlocks)
|
|
|
|
|
|
|
|
case *lnwire.GossipTimestampRange:
|
|
|
|
return fmt.Sprintf("chain_hash=%v, first_stamp=%v, "+
|
|
|
|
"stamp_range=%v", msg.ChainHash,
|
|
|
|
time.Unix(int64(msg.FirstTimestamp), 0),
|
|
|
|
msg.TimestampRange)
|
|
|
|
|
2017-10-18 05:13:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2017-01-15 04:52:05 +03:00
|
|
|
// logWireMessage logs the receipt or sending of particular wire message. This
|
|
|
|
// function is used rather than just logging the message in order to produce
|
|
|
|
// less spammy log messages in trace mode by setting the 'Curve" parameter to
|
|
|
|
// nil. Doing this avoids printing out each of the field elements in the curve
|
|
|
|
// parameters for secp256k1.
|
|
|
|
func (p *peer) logWireMessage(msg lnwire.Message, read bool) {
|
2017-10-18 05:13:19 +03:00
|
|
|
summaryPrefix := "Received"
|
|
|
|
if !read {
|
|
|
|
summaryPrefix = "Sending"
|
|
|
|
}
|
|
|
|
|
|
|
|
peerLog.Debugf("%v", newLogClosure(func() string {
|
|
|
|
// Debug summary of message.
|
|
|
|
summary := messageSummary(msg)
|
|
|
|
if len(summary) > 0 {
|
|
|
|
summary = "(" + summary + ")"
|
|
|
|
}
|
|
|
|
|
2017-10-20 05:45:29 +03:00
|
|
|
preposition := "to"
|
|
|
|
if read {
|
|
|
|
preposition = "from"
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf("%v %v%s %v %s", summaryPrefix,
|
|
|
|
msg.MsgType(), summary, preposition, p)
|
2017-10-18 05:13:19 +03:00
|
|
|
}))
|
|
|
|
|
2017-01-15 04:52:05 +03:00
|
|
|
switch m := msg.(type) {
|
2017-12-07 03:27:56 +03:00
|
|
|
case *lnwire.ChannelReestablish:
|
2017-12-08 00:11:23 +03:00
|
|
|
if m.LocalUnrevokedCommitPoint != nil {
|
|
|
|
m.LocalUnrevokedCommitPoint.Curve = nil
|
|
|
|
}
|
2017-02-21 05:10:05 +03:00
|
|
|
case *lnwire.RevokeAndAck:
|
2017-01-15 04:52:05 +03:00
|
|
|
m.NextRevocationKey.Curve = nil
|
2017-07-31 00:13:28 +03:00
|
|
|
case *lnwire.AcceptChannel:
|
|
|
|
m.FundingKey.Curve = nil
|
|
|
|
m.RevocationPoint.Curve = nil
|
|
|
|
m.PaymentPoint.Curve = nil
|
|
|
|
m.DelayedPaymentPoint.Curve = nil
|
2017-12-02 06:27:38 +03:00
|
|
|
m.HtlcPoint.Curve = nil
|
2017-07-31 00:13:28 +03:00
|
|
|
m.FirstCommitmentPoint.Curve = nil
|
|
|
|
case *lnwire.OpenChannel:
|
|
|
|
m.FundingKey.Curve = nil
|
|
|
|
m.RevocationPoint.Curve = nil
|
|
|
|
m.PaymentPoint.Curve = nil
|
|
|
|
m.DelayedPaymentPoint.Curve = nil
|
2017-12-02 06:27:38 +03:00
|
|
|
m.HtlcPoint.Curve = nil
|
2017-07-31 00:13:28 +03:00
|
|
|
m.FirstCommitmentPoint.Curve = nil
|
2017-01-31 05:45:28 +03:00
|
|
|
case *lnwire.FundingLocked:
|
|
|
|
m.NextPerCommitmentPoint.Curve = nil
|
2017-01-15 04:52:05 +03:00
|
|
|
}
|
|
|
|
|
2017-01-25 04:43:38 +03:00
|
|
|
prefix := "readMessage from"
|
2017-01-15 04:52:05 +03:00
|
|
|
if !read {
|
2017-01-25 04:43:38 +03:00
|
|
|
prefix = "writeMessage to"
|
2017-01-15 04:52:05 +03:00
|
|
|
}
|
|
|
|
|
2017-01-25 04:43:38 +03:00
|
|
|
peerLog.Tracef(prefix+" %v: %v", p, newLogClosure(func() string {
|
2017-01-15 04:52:05 +03:00
|
|
|
return spew.Sdump(msg)
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// writeMessage writes the target lnwire.Message to the remote peer.
|
2016-01-14 08:41:46 +03:00
|
|
|
func (p *peer) writeMessage(msg lnwire.Message) error {
|
|
|
|
// Simply exit if we're shutting down.
|
|
|
|
if atomic.LoadInt32(&p.disconnect) != 0 {
|
2018-05-08 04:32:00 +03:00
|
|
|
return ErrPeerExiting
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-01-15 04:52:05 +03:00
|
|
|
p.logWireMessage(msg, false)
|
2016-06-21 22:32:32 +03:00
|
|
|
|
peer: re-use a static writeBuf within writeMessage optimize memory usage
In this commit, we might a very small change to the way writing messages
works in the peer, which should have large implications w.r.t reducing
memory usage amongst chatty nodes.
When profiling the heap on one of my nodes earlier, I noticed this
fragment:
```
Showing top 20 nodes out of 68
flat flat% sum% cum cum%
0 0% 0% 75.53MB 54.61% main.(*peer).writeHandler
75.53MB 54.61% 54.61% 75.53MB 54.61% main.(*peer).writeMessage
```
Which points to an inefficiency with the way we handle allocations when
writing new messages, drilling down further we see:
```
(pprof) list writeMessage
Total: 138.31MB
ROUTINE ======================== main.(*peer).writeMessage in /root/go/src/github.com/lightningnetwork/lnd/peer.go
75.53MB 75.53MB (flat, cum) 54.61% of Total
. . 1104: p.logWireMessage(msg, false)
. . 1105:
. . 1106: // As the Lightning wire protocol is fully message oriented, we only
. . 1107: // allows one wire message per outer encapsulated crypto message. So
. . 1108: // we'll create a temporary buffer to write the message directly to.
75.53MB 75.53MB 1109: var msgPayload [lnwire.MaxMessagePayload]byte
. . 1110: b := bytes.NewBuffer(msgPayload[0:0:len(msgPayload)])
. . 1111:
. . 1112: // With the temp buffer created and sliced properly (length zero, full
. . 1113: // capacity), we'll now encode the message directly into this buffer.
. . 1114: n, err := lnwire.WriteMessage(b, msg, 0)
(pprof) list writeHandler
Total: 138.31MB
ROUTINE ======================== main.(*peer).writeHandler in /root/go/src/github.com/lightningnetwork/lnd/peer.go
0 75.53MB (flat, cum) 54.61% of Total
. . 1148:
. . 1149: // Write out the message to the socket, closing the
. . 1150: // 'sentChan' if it's non-nil, The 'sentChan' allows
. . 1151: // callers to optionally synchronize sends with the
. . 1152: // writeHandler.
. 75.53MB 1153: err := p.writeMessage(outMsg.msg)
. . 1154: if outMsg.errChan != nil {
. . 1155: outMsg.errChan <- err
. . 1156: }
. . 1157:
. . 1158: if err != nil {
```
Ah hah! We create a _new_ buffer each time we want to write a message
out. This is unnecessary and _very_ wasteful (as seen by the profile).
The fix is simple: re-use a buffer unique to each peer when writing out
messages. Since we know what the max message size is, we just allocate
one of these 65KB buffers for each peer, and keep it around until the
peer is removed.
2018-04-06 22:55:11 +03:00
|
|
|
// We'll re-slice of static write buffer to allow this new message to
|
|
|
|
// utilize all available space. We also ensure we cap the capacity of
|
|
|
|
// this new buffer to the static buffer which is sized for the largest
|
|
|
|
// possible protocol message.
|
|
|
|
b := bytes.NewBuffer(p.writeBuf[0:0:len(p.writeBuf)])
|
2017-04-21 01:45:04 +03:00
|
|
|
|
|
|
|
// With the temp buffer created and sliced properly (length zero, full
|
|
|
|
// capacity), we'll now encode the message directly into this buffer.
|
|
|
|
n, err := lnwire.WriteMessage(b, msg, 0)
|
2016-06-21 22:32:32 +03:00
|
|
|
atomic.AddUint64(&p.bytesSent, uint64(n))
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2018-06-27 03:27:22 +03:00
|
|
|
p.conn.SetWriteDeadline(time.Now().Add(writeMessageTimeout))
|
2017-10-16 01:19:45 +03:00
|
|
|
|
2017-04-21 01:45:04 +03:00
|
|
|
// Finally, write the message itself in a single swoop.
|
|
|
|
_, err = p.conn.Write(b.Bytes())
|
2016-01-14 08:41:46 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// writeHandler is a goroutine dedicated to reading messages off of an incoming
|
|
|
|
// queue, and writing them out to the wire. This goroutine coordinates with the
|
2017-10-16 01:19:45 +03:00
|
|
|
// queueHandler in order to ensure the incoming message queue is quickly
|
|
|
|
// drained.
|
2016-06-21 22:32:32 +03:00
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
|
|
|
func (p *peer) writeHandler() {
|
2017-09-28 06:22:40 +03:00
|
|
|
var exitErr error
|
2018-02-23 01:28:31 +03:00
|
|
|
|
2017-09-28 06:22:40 +03:00
|
|
|
out:
|
2016-01-14 08:41:46 +03:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case outMsg := <-p.sendQueue:
|
2017-01-26 05:20:55 +03:00
|
|
|
switch outMsg.msg.(type) {
|
2017-02-02 04:01:33 +03:00
|
|
|
// If we're about to send a ping message, then log the
|
|
|
|
// exact time in which we send the message so we can
|
|
|
|
// use the delay as a rough estimate of latency to the
|
|
|
|
// remote peer.
|
2017-01-26 05:20:55 +03:00
|
|
|
case *lnwire.Ping:
|
|
|
|
// TODO(roasbeef): do this before the write?
|
|
|
|
// possibly account for processing within func?
|
|
|
|
now := time.Now().UnixNano()
|
|
|
|
atomic.StoreInt64(&p.pingLastSend, now)
|
|
|
|
}
|
|
|
|
|
2018-05-08 04:32:00 +03:00
|
|
|
// Write out the message to the socket, responding with
|
|
|
|
// error if `errChan` is non-nil. The `errChan` allows
|
2017-02-02 04:01:33 +03:00
|
|
|
// callers to optionally synchronize sends with the
|
|
|
|
// writeHandler.
|
|
|
|
err := p.writeMessage(outMsg.msg)
|
2017-11-16 05:23:46 +03:00
|
|
|
if outMsg.errChan != nil {
|
|
|
|
outMsg.errChan <- err
|
2017-02-02 04:01:33 +03:00
|
|
|
}
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2017-02-02 04:01:33 +03:00
|
|
|
if err != nil {
|
2017-09-28 06:22:40 +03:00
|
|
|
exitErr = errors.Errorf("unable to write message: %v", err)
|
|
|
|
break out
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
2017-02-02 04:01:33 +03:00
|
|
|
|
|
|
|
case <-p.quit:
|
2018-05-08 04:32:00 +03:00
|
|
|
exitErr = ErrPeerExiting
|
2017-09-28 06:22:40 +03:00
|
|
|
break out
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
}
|
2017-09-28 06:22:40 +03:00
|
|
|
|
|
|
|
p.wg.Done()
|
|
|
|
|
|
|
|
p.Disconnect(exitErr)
|
|
|
|
|
|
|
|
peerLog.Tracef("writeHandler for peer %v done", p)
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// queueHandler is responsible for accepting messages from outside subsystems
|
2016-06-21 22:32:32 +03:00
|
|
|
// to be eventually sent out on the wire by the writeHandler.
|
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
2016-01-14 08:41:46 +03:00
|
|
|
func (p *peer) queueHandler() {
|
2017-02-02 04:01:33 +03:00
|
|
|
defer p.wg.Done()
|
|
|
|
|
2017-11-16 04:56:33 +03:00
|
|
|
// pendingMsgs will hold all messages waiting to be added
|
|
|
|
// to the sendQueue.
|
2016-01-14 08:41:46 +03:00
|
|
|
pendingMsgs := list.New()
|
2017-02-02 04:01:33 +03:00
|
|
|
|
2017-11-16 04:56:33 +03:00
|
|
|
for {
|
|
|
|
// Examine the front of the queue.
|
|
|
|
elem := pendingMsgs.Front()
|
|
|
|
if elem != nil {
|
|
|
|
// There's an element on the queue, try adding
|
|
|
|
// it to the sendQueue. We also watch for
|
|
|
|
// messages on the outgoingQueue, in case the
|
|
|
|
// writeHandler cannot accept messages on the
|
|
|
|
// sendQueue.
|
2017-02-02 04:01:33 +03:00
|
|
|
select {
|
2018-02-07 06:11:11 +03:00
|
|
|
case p.sendQueue <- elem.Value.(outgoingMsg):
|
2017-02-02 04:01:33 +03:00
|
|
|
pendingMsgs.Remove(elem)
|
2017-11-16 04:56:33 +03:00
|
|
|
case msg := <-p.outgoingQueue:
|
|
|
|
pendingMsgs.PushBack(msg)
|
|
|
|
case <-p.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If there weren't any messages to send to the
|
|
|
|
// writeHandler, then we'll accept a new message
|
|
|
|
// into the queue from outside sub-systems.
|
|
|
|
select {
|
|
|
|
case msg := <-p.outgoingQueue:
|
|
|
|
pendingMsgs.PushBack(msg)
|
2017-02-02 04:01:33 +03:00
|
|
|
case <-p.quit:
|
|
|
|
return
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
2017-02-02 04:01:33 +03:00
|
|
|
}
|
|
|
|
}
|
2015-12-21 00:16:38 +03:00
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2016-11-11 04:15:25 +03:00
|
|
|
// pingHandler is responsible for periodically sending ping messages to the
|
|
|
|
// remote peer in order to keep the connection alive and/or determine if the
|
|
|
|
// connection is still active.
|
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
|
|
|
func (p *peer) pingHandler() {
|
2017-08-09 02:51:19 +03:00
|
|
|
defer p.wg.Done()
|
|
|
|
|
2016-11-11 04:15:25 +03:00
|
|
|
pingTicker := time.NewTicker(pingInterval)
|
|
|
|
defer pingTicker.Stop()
|
|
|
|
|
2017-04-17 04:11:39 +03:00
|
|
|
// TODO(roasbeef): make dynamic in order to create fake cover traffic
|
|
|
|
const numPingBytes = 16
|
2016-11-11 04:15:25 +03:00
|
|
|
|
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-pingTicker.C:
|
2017-04-17 04:11:39 +03:00
|
|
|
p.queueMsg(lnwire.NewPing(numPingBytes), nil)
|
2016-11-11 04:15:25 +03:00
|
|
|
case <-p.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-26 05:20:55 +03:00
|
|
|
// PingTime returns the estimated ping time to the peer in microseconds.
|
|
|
|
func (p *peer) PingTime() int64 {
|
|
|
|
return atomic.LoadInt64(&p.pingTime)
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// queueMsg queues a new lnwire.Message to be eventually sent out on the
|
2017-11-16 05:23:46 +03:00
|
|
|
// wire. It returns an error if we failed to queue the message. An error
|
|
|
|
// is sent on errChan if the message fails being sent to the peer, or
|
|
|
|
// nil otherwise.
|
|
|
|
func (p *peer) queueMsg(msg lnwire.Message, errChan chan error) {
|
2016-12-20 04:00:18 +03:00
|
|
|
select {
|
2018-02-07 06:11:11 +03:00
|
|
|
case p.outgoingQueue <- outgoingMsg{msg, errChan}:
|
2016-12-20 04:00:18 +03:00
|
|
|
case <-p.quit:
|
2018-01-29 01:55:24 +03:00
|
|
|
peerLog.Tracef("Peer shutting down, could not enqueue msg.")
|
2017-11-16 05:23:46 +03:00
|
|
|
if errChan != nil {
|
2018-05-08 04:32:00 +03:00
|
|
|
errChan <- ErrPeerExiting
|
2017-11-16 05:23:46 +03:00
|
|
|
}
|
2016-12-20 04:00:18 +03:00
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2016-11-11 04:15:25 +03:00
|
|
|
// ChannelSnapshots returns a slice of channel snapshots detailing all
|
|
|
|
// currently active channels maintained with the remote peer.
|
2016-06-23 08:22:06 +03:00
|
|
|
func (p *peer) ChannelSnapshots() []*channeldb.ChannelSnapshot {
|
2017-08-09 02:51:19 +03:00
|
|
|
p.activeChanMtx.RLock()
|
|
|
|
defer p.activeChanMtx.RUnlock()
|
|
|
|
|
|
|
|
snapshots := make([]*channeldb.ChannelSnapshot, 0, len(p.activeChannels))
|
|
|
|
for _, activeChan := range p.activeChannels {
|
2018-02-09 06:40:48 +03:00
|
|
|
// We'll only return a snapshot for channels that are
|
|
|
|
// *immedately* available for routing payments over.
|
|
|
|
if activeChan.RemoteNextRevocation() == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-08-09 02:51:19 +03:00
|
|
|
snapshot := activeChan.StateSnapshot()
|
|
|
|
snapshots = append(snapshots, snapshot)
|
|
|
|
}
|
|
|
|
|
|
|
|
return snapshots
|
2016-06-23 08:22:06 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// genDeliveryScript returns a new script to be used to send our funds to in
|
|
|
|
// the case of a cooperative channel close negotiation.
|
|
|
|
func (p *peer) genDeliveryScript() ([]byte, error) {
|
|
|
|
deliveryAddr, err := p.server.cc.wallet.NewAddress(
|
|
|
|
lnwallet.WitnessPubKey, false,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
peerLog.Infof("Delivery addr for channel close: %v",
|
|
|
|
deliveryAddr)
|
|
|
|
|
|
|
|
return txscript.PayToAddrScript(deliveryAddr)
|
2017-07-31 00:21:21 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// channelManager is goroutine dedicated to handling all requests/signals
|
|
|
|
// pertaining to the opening, cooperative closing, and force closing of all
|
|
|
|
// channels maintained with the remote peer.
|
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
|
|
|
func (p *peer) channelManager() {
|
2017-08-09 02:51:19 +03:00
|
|
|
defer p.wg.Done()
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
2017-07-31 00:21:21 +03:00
|
|
|
// A new channel has arrived which means we've just completed a
|
|
|
|
// funding workflow. We'll initialize the necessary local
|
|
|
|
// state, and notify the htlc switch of a new link.
|
2017-01-24 02:33:46 +03:00
|
|
|
case newChanReq := <-p.newChannels:
|
2017-04-17 01:41:11 +03:00
|
|
|
chanPoint := newChanReq.channel.ChannelPoint()
|
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
2017-07-31 00:21:21 +03:00
|
|
|
newChan := newChanReq.channel
|
2016-11-18 05:43:33 +03:00
|
|
|
|
2017-10-02 14:11:26 +03:00
|
|
|
// Make sure this channel is not already active.
|
|
|
|
p.activeChanMtx.Lock()
|
peer: properly process retransmitted FundingLocked message we've never processed
In this commit, we modify the logic within the channelManager to be
able to process any retransmitted FundingLocked messages. Before this
commit, we would simply ignore any new channels sent to us, iff, we
already had an active channel with the same channel point. With the
recent change to the loadActiveChannels method in the peer, this is now
incorrect.
When a peer retransmits the FundingLocked message, it goes through to
the fundingManager. The fundingMgr will then (if we haven’t already
processed it), send the channel to the breach arbiter and also to the
peer’s channelManager. In order to handle this case properly, if we
already have the channel, we’ll check if our current channel *doesn’t*
already have the RemoteNextRevocation field set. If it doesn’t, then
this means that we haven’t yet processed the FundingLcoked message, so
we’ll process it for the first time.
This new logic will properly:
* ensure that the breachArbiter still has the most up to date channel
* allow us to update the state of the link has been added to the
switch at this point
* this link will now be eligible for forwarding after this
sequence
2017-12-06 05:00:33 +03:00
|
|
|
if currentChan, ok := p.activeChannels[chanID]; ok {
|
2017-11-23 10:21:07 +03:00
|
|
|
peerLog.Infof("Already have ChannelPoint(%v), "+
|
|
|
|
"ignoring.", chanPoint)
|
peer: properly process retransmitted FundingLocked message we've never processed
In this commit, we modify the logic within the channelManager to be
able to process any retransmitted FundingLocked messages. Before this
commit, we would simply ignore any new channels sent to us, iff, we
already had an active channel with the same channel point. With the
recent change to the loadActiveChannels method in the peer, this is now
incorrect.
When a peer retransmits the FundingLocked message, it goes through to
the fundingManager. The fundingMgr will then (if we haven’t already
processed it), send the channel to the breach arbiter and also to the
peer’s channelManager. In order to handle this case properly, if we
already have the channel, we’ll check if our current channel *doesn’t*
already have the RemoteNextRevocation field set. If it doesn’t, then
this means that we haven’t yet processed the FundingLcoked message, so
we’ll process it for the first time.
This new logic will properly:
* ensure that the breachArbiter still has the most up to date channel
* allow us to update the state of the link has been added to the
switch at this point
* this link will now be eligible for forwarding after this
sequence
2017-12-06 05:00:33 +03:00
|
|
|
|
2017-10-02 14:11:26 +03:00
|
|
|
p.activeChanMtx.Unlock()
|
|
|
|
close(newChanReq.done)
|
|
|
|
newChanReq.channel.Stop()
|
peer: properly process retransmitted FundingLocked message we've never processed
In this commit, we modify the logic within the channelManager to be
able to process any retransmitted FundingLocked messages. Before this
commit, we would simply ignore any new channels sent to us, iff, we
already had an active channel with the same channel point. With the
recent change to the loadActiveChannels method in the peer, this is now
incorrect.
When a peer retransmits the FundingLocked message, it goes through to
the fundingManager. The fundingMgr will then (if we haven’t already
processed it), send the channel to the breach arbiter and also to the
peer’s channelManager. In order to handle this case properly, if we
already have the channel, we’ll check if our current channel *doesn’t*
already have the RemoteNextRevocation field set. If it doesn’t, then
this means that we haven’t yet processed the FundingLcoked message, so
we’ll process it for the first time.
This new logic will properly:
* ensure that the breachArbiter still has the most up to date channel
* allow us to update the state of the link has been added to the
switch at this point
* this link will now be eligible for forwarding after this
sequence
2017-12-06 05:00:33 +03:00
|
|
|
|
|
|
|
// If we're being sent a new channel, and our
|
|
|
|
// existing channel doesn't have the next
|
|
|
|
// revocation, then we need to update the
|
2017-12-18 05:40:05 +03:00
|
|
|
// current existing channel.
|
peer: properly process retransmitted FundingLocked message we've never processed
In this commit, we modify the logic within the channelManager to be
able to process any retransmitted FundingLocked messages. Before this
commit, we would simply ignore any new channels sent to us, iff, we
already had an active channel with the same channel point. With the
recent change to the loadActiveChannels method in the peer, this is now
incorrect.
When a peer retransmits the FundingLocked message, it goes through to
the fundingManager. The fundingMgr will then (if we haven’t already
processed it), send the channel to the breach arbiter and also to the
peer’s channelManager. In order to handle this case properly, if we
already have the channel, we’ll check if our current channel *doesn’t*
already have the RemoteNextRevocation field set. If it doesn’t, then
this means that we haven’t yet processed the FundingLcoked message, so
we’ll process it for the first time.
This new logic will properly:
* ensure that the breachArbiter still has the most up to date channel
* allow us to update the state of the link has been added to the
switch at this point
* this link will now be eligible for forwarding after this
sequence
2017-12-06 05:00:33 +03:00
|
|
|
if currentChan.RemoteNextRevocation() != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
peerLog.Infof("Processing retransmitted "+
|
|
|
|
"FundingLocked for ChannelPoint(%v)",
|
|
|
|
chanPoint)
|
|
|
|
|
|
|
|
nextRevoke := newChan.RemoteNextRevocation()
|
|
|
|
err := currentChan.InitNextRevocation(nextRevoke)
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("unable to init chan "+
|
|
|
|
"revocation: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-10-02 14:11:26 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// If not already active, we'll add this channel to the
|
|
|
|
// set of active channels, so we can look it up later
|
|
|
|
// easily according to its channel ID.
|
2017-07-31 00:21:21 +03:00
|
|
|
p.activeChannels[chanID] = newChan
|
2016-11-18 05:43:33 +03:00
|
|
|
p.activeChanMtx.Unlock()
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
peerLog.Infof("New channel active ChannelPoint(%v) "+
|
2018-02-20 02:01:23 +03:00
|
|
|
"with NodeKey(%x)", chanPoint, p.PubKey())
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-07-31 00:21:21 +03:00
|
|
|
// Next, we'll assemble a ChannelLink along with the
|
|
|
|
// necessary items it needs to function.
|
2017-08-03 07:15:49 +03:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): panic on below?
|
|
|
|
_, currentHeight, err := p.server.cc.chainIO.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("unable to get best block: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
2018-01-21 07:25:54 +03:00
|
|
|
chainEvents, err := p.server.chainArb.SubscribeChannelEvents(
|
2018-04-18 14:57:02 +03:00
|
|
|
*chanPoint,
|
2018-01-21 07:25:54 +03:00
|
|
|
)
|
2018-01-19 01:03:13 +03:00
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("unable to subscribe to chain "+
|
|
|
|
"events: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
2017-05-02 23:04:58 +03:00
|
|
|
|
2018-05-23 14:33:41 +03:00
|
|
|
// Create the link and add it to the switch.
|
2018-06-12 05:05:00 +03:00
|
|
|
err = p.addLink(
|
|
|
|
chanPoint, newChan, &p.server.cc.routingPolicy,
|
|
|
|
chainEvents, currentHeight, false,
|
|
|
|
)
|
2018-05-23 14:33:41 +03:00
|
|
|
if err != nil {
|
2017-05-02 23:04:58 +03:00
|
|
|
peerLog.Errorf("can't register new channel "+
|
2018-06-12 10:43:04 +03:00
|
|
|
"link(%v) with NodeKey(%x): %v", chanPoint,
|
|
|
|
p.PubKey(), err)
|
2017-05-02 23:04:58 +03:00
|
|
|
}
|
2017-01-24 02:33:46 +03:00
|
|
|
|
|
|
|
close(newChanReq.done)
|
2016-07-14 02:40:01 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// We've just received a local request to close an active
|
|
|
|
// channel. If will either kick of a cooperative channel
|
|
|
|
// closure negotiation, or be a notification of a breached
|
|
|
|
// contract that should be abandoned.
|
2016-06-21 22:32:32 +03:00
|
|
|
case req := <-p.localCloseChanReqs:
|
2017-11-23 10:21:07 +03:00
|
|
|
p.handleLocalCloseReq(req)
|
|
|
|
|
|
|
|
// We've received a new cooperative channel closure related
|
|
|
|
// message from the remote peer, we'll use this message to
|
|
|
|
// advance the chan closer state machine.
|
|
|
|
case closeMsg := <-p.chanCloseMsgs:
|
|
|
|
// We'll now fetch the matching closing state machine
|
|
|
|
// in order to continue, or finalize the channel
|
|
|
|
// closure process.
|
|
|
|
chanCloser, err := p.fetchActiveChanCloser(closeMsg.cid)
|
|
|
|
if err != nil {
|
2018-05-23 10:09:19 +03:00
|
|
|
// If the channel is not known to us, we'll
|
|
|
|
// simply ignore this message.
|
|
|
|
if err == ErrChannelNotFound {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
peerLog.Errorf("Unable to respond to remote "+
|
2017-11-23 10:21:07 +03:00
|
|
|
"close msg: %v", err)
|
2018-03-30 23:04:59 +03:00
|
|
|
|
|
|
|
errMsg := &lnwire.Error{
|
|
|
|
ChanID: closeMsg.cid,
|
|
|
|
Data: lnwire.ErrorData(err.Error()),
|
|
|
|
}
|
|
|
|
p.queueMsg(errMsg, nil)
|
2017-11-23 10:21:07 +03:00
|
|
|
continue
|
|
|
|
}
|
2017-07-31 00:21:21 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// Next, we'll process the next message using the
|
|
|
|
// target state machine. We'll either continue
|
|
|
|
// negotiation, or halt.
|
|
|
|
msgs, closeFin, err := chanCloser.ProcessCloseMsg(
|
|
|
|
closeMsg.msg,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
err := fmt.Errorf("unable to process close "+
|
|
|
|
"msg: %v", err)
|
|
|
|
peerLog.Error(err)
|
2017-07-31 00:21:21 +03:00
|
|
|
|
2017-11-23 22:49:48 +03:00
|
|
|
// As the negotiations failed, we'll reset the
|
|
|
|
// channel state to ensure we act to on-chain
|
|
|
|
// events as normal.
|
|
|
|
chanCloser.cfg.channel.ResetState()
|
2017-07-31 00:21:21 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
if chanCloser.CloseRequest() != nil {
|
|
|
|
chanCloser.CloseRequest().Err <- err
|
2017-07-31 00:21:21 +03:00
|
|
|
}
|
2017-11-23 10:21:07 +03:00
|
|
|
delete(p.activeChanCloses, closeMsg.cid)
|
2017-07-31 00:21:21 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// Queue any messages to the remote peer that need to
|
|
|
|
// be sent as a part of this latest round of
|
|
|
|
// negotiations.
|
|
|
|
for _, msg := range msgs {
|
|
|
|
p.queueMsg(msg, nil)
|
2017-03-25 04:26:09 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// If we haven't finished close negotiations, then
|
|
|
|
// we'll continue as we can't yet finalize the closure.
|
|
|
|
if !closeFin {
|
2017-05-24 01:26:38 +03:00
|
|
|
continue
|
2017-03-25 04:26:09 +03:00
|
|
|
}
|
2016-07-14 02:40:01 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// Otherwise, we've agreed on a closing fee! In this
|
|
|
|
// case, we'll wrap up the channel closure by notifying
|
|
|
|
// relevant sub-systems and launching a goroutine to
|
|
|
|
// wait for close tx conf.
|
|
|
|
p.finalizeChanClosure(chanCloser)
|
2017-11-23 22:49:48 +03:00
|
|
|
case <-p.quit:
|
|
|
|
|
|
|
|
// As, we've been signalled to exit, we'll reset all
|
|
|
|
// our active channel back to their default state.
|
|
|
|
p.activeChanMtx.Lock()
|
|
|
|
for _, channel := range p.activeChannels {
|
|
|
|
channel.ResetState()
|
2017-07-14 22:05:55 +03:00
|
|
|
}
|
2017-11-23 22:49:48 +03:00
|
|
|
p.activeChanMtx.Unlock()
|
2017-05-24 01:26:38 +03:00
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// fetchActiveChanCloser attempts to fetch the active chan closer state machine
|
|
|
|
// for the target channel ID. If the channel isn't active an error is returned.
|
|
|
|
// Otherwise, either an existing state machine will be returned, or a new one
|
|
|
|
// will be created.
|
|
|
|
func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (*channelCloser, error) {
|
|
|
|
// First, we'll ensure that we actually know of the target channel. If
|
|
|
|
// not, we'll ignore this message.
|
|
|
|
p.activeChanMtx.RLock()
|
|
|
|
channel, ok := p.activeChannels[chanID]
|
|
|
|
p.activeChanMtx.RUnlock()
|
|
|
|
if !ok {
|
2018-05-23 10:09:19 +03:00
|
|
|
return nil, ErrChannelNotFound
|
2017-11-23 10:21:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll attempt to look up the matching state machine, if we can't
|
|
|
|
// find one then this means that the remote party is initiating a
|
|
|
|
// cooperative channel closure.
|
|
|
|
chanCloser, ok := p.activeChanCloses[chanID]
|
|
|
|
if !ok {
|
2018-03-30 23:04:59 +03:00
|
|
|
// If we need to create a chan closer for the first time, then
|
|
|
|
// we'll check to ensure that the channel is even in the proper
|
|
|
|
// state to allow a co-op channel closure.
|
|
|
|
if len(channel.ActiveHtlcs()) != 0 {
|
|
|
|
return nil, fmt.Errorf("cannot co-op close " +
|
|
|
|
"channel w/ active htlcs")
|
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// We'll create a valid closing state machine in order to
|
|
|
|
// respond to the initiated cooperative channel closure.
|
|
|
|
deliveryAddr, err := p.genDeliveryScript()
|
|
|
|
if err != nil {
|
2018-03-30 23:04:59 +03:00
|
|
|
peerLog.Errorf("unable to gen delivery script: %v", err)
|
|
|
|
|
|
|
|
return nil, fmt.Errorf("close addr unavailable")
|
2017-11-23 10:21:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// In order to begin fee negotiations, we'll first compute our
|
|
|
|
// target ideal fee-per-kw. We'll set this to a lax value, as
|
|
|
|
// we weren't the ones that initiated the channel closure.
|
2018-02-13 17:07:20 +03:00
|
|
|
feePerVSize, err := p.server.cc.feeEstimator.EstimateFeePerVSize(6)
|
2017-11-23 10:21:07 +03:00
|
|
|
if err != nil {
|
2018-03-30 23:04:59 +03:00
|
|
|
peerLog.Errorf("unable to query fee estimator: %v", err)
|
|
|
|
|
|
|
|
return nil, fmt.Errorf("unable to estimate fee")
|
2017-11-23 10:21:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll then convert the sat per weight to sat per k/w as this
|
|
|
|
// is the native unit used within the protocol when dealing
|
|
|
|
// with fees.
|
2018-02-13 17:07:20 +03:00
|
|
|
targetFeePerKw := feePerVSize.FeePerKWeight()
|
2017-11-23 10:21:07 +03:00
|
|
|
|
|
|
|
_, startingHeight, err := p.server.cc.chainIO.GetBestBlock()
|
|
|
|
if err != nil {
|
2018-03-30 23:04:59 +03:00
|
|
|
peerLog.Errorf("unable to obtain best block: %v", err)
|
|
|
|
return nil, fmt.Errorf("cannot obtain best block")
|
2017-11-23 10:21:07 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
chanCloser = newChannelCloser(
|
|
|
|
chanCloseCfg{
|
|
|
|
channel: channel,
|
|
|
|
unregisterChannel: p.server.htlcSwitch.RemoveLink,
|
|
|
|
broadcastTx: p.server.cc.wallet.PublishTransaction,
|
2018-06-14 05:43:42 +03:00
|
|
|
disableChannel: p.server.disableChannel,
|
2017-11-23 10:21:07 +03:00
|
|
|
quit: p.quit,
|
|
|
|
},
|
|
|
|
deliveryAddr,
|
|
|
|
targetFeePerKw,
|
|
|
|
uint32(startingHeight),
|
|
|
|
nil,
|
|
|
|
)
|
|
|
|
p.activeChanCloses[chanID] = chanCloser
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanCloser, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleLocalCloseReq kicks-off the workflow to execute a cooperative or
|
|
|
|
// forced unilateral closure of the channel initiated by a local subsystem.
|
|
|
|
func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) {
|
2017-05-02 23:04:58 +03:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(req.ChanPoint)
|
2017-04-17 01:41:11 +03:00
|
|
|
|
2016-11-18 05:43:33 +03:00
|
|
|
p.activeChanMtx.RLock()
|
2017-05-24 01:21:35 +03:00
|
|
|
channel, ok := p.activeChannels[chanID]
|
2016-11-18 05:43:33 +03:00
|
|
|
p.activeChanMtx.RUnlock()
|
2017-05-24 01:21:35 +03:00
|
|
|
if !ok {
|
|
|
|
err := fmt.Errorf("unable to close channel, ChannelID(%v) is "+
|
|
|
|
"unknown", chanID)
|
|
|
|
peerLog.Errorf(err.Error())
|
2017-05-02 23:04:58 +03:00
|
|
|
req.Err <- err
|
2017-05-24 01:21:35 +03:00
|
|
|
return
|
|
|
|
}
|
2016-09-12 22:42:26 +03:00
|
|
|
|
2016-11-29 05:44:14 +03:00
|
|
|
switch req.CloseType {
|
2017-07-31 00:21:21 +03:00
|
|
|
|
2016-11-29 05:44:14 +03:00
|
|
|
// A type of CloseRegular indicates that the user has opted to close
|
2017-05-05 02:03:47 +03:00
|
|
|
// out this channel on-chain, so we execute the cooperative channel
|
2017-02-03 04:05:25 +03:00
|
|
|
// closure workflow.
|
2017-05-02 23:04:58 +03:00
|
|
|
case htlcswitch.CloseRegular:
|
2017-11-23 10:21:07 +03:00
|
|
|
// First, we'll fetch a fresh delivery address that we'll use
|
|
|
|
// to send the funds to in the case of a successful
|
|
|
|
// negotiation.
|
|
|
|
deliveryAddr, err := p.genDeliveryScript()
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf(err.Error())
|
|
|
|
req.Err <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll create a new channel closer state machine to
|
|
|
|
// handle the close negotiation.
|
2018-01-20 04:23:38 +03:00
|
|
|
_, startingHeight, err := p.server.cc.chainIO.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf(err.Error())
|
|
|
|
req.Err <- err
|
|
|
|
return
|
|
|
|
}
|
2018-06-14 05:43:42 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
chanCloser := newChannelCloser(
|
|
|
|
chanCloseCfg{
|
|
|
|
channel: channel,
|
|
|
|
unregisterChannel: p.server.htlcSwitch.RemoveLink,
|
|
|
|
broadcastTx: p.server.cc.wallet.PublishTransaction,
|
2018-06-14 05:43:42 +03:00
|
|
|
disableChannel: p.server.disableChannel,
|
2017-11-23 10:21:07 +03:00
|
|
|
quit: p.quit,
|
|
|
|
},
|
|
|
|
deliveryAddr,
|
|
|
|
req.TargetFeePerKw,
|
|
|
|
uint32(startingHeight),
|
|
|
|
req,
|
|
|
|
)
|
|
|
|
p.activeChanCloses[chanID] = chanCloser
|
|
|
|
|
|
|
|
// Finally, we'll initiate the channel shutdown within the
|
|
|
|
// chanCloser, and send the shutdown message to the remote
|
|
|
|
// party to kick things off.
|
|
|
|
shutdownMsg, err := chanCloser.ShutdownChan()
|
2017-03-25 04:26:09 +03:00
|
|
|
if err != nil {
|
2017-11-23 10:21:07 +03:00
|
|
|
peerLog.Errorf(err.Error())
|
2017-05-02 23:04:58 +03:00
|
|
|
req.Err <- err
|
2017-11-23 10:21:07 +03:00
|
|
|
delete(p.activeChanCloses, chanID)
|
|
|
|
|
2017-11-23 22:49:48 +03:00
|
|
|
// As we were unable to shutdown the channel, we'll
|
|
|
|
// return it back to its normal state.
|
|
|
|
channel.ResetState()
|
2017-03-25 04:26:09 +03:00
|
|
|
return
|
|
|
|
}
|
2016-11-29 05:44:14 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
p.queueMsg(shutdownMsg, nil)
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// A type of CloseBreach indicates that the counterparty has breached
|
2017-02-03 04:05:25 +03:00
|
|
|
// the channel therefore we need to clean up our local state.
|
2017-05-02 23:04:58 +03:00
|
|
|
case htlcswitch.CloseBreach:
|
2017-07-31 00:21:21 +03:00
|
|
|
// TODO(roasbeef): no longer need with newer beach logic?
|
2016-11-29 05:44:14 +03:00
|
|
|
peerLog.Infof("ChannelPoint(%v) has been breached, wiping "+
|
2017-05-02 23:04:58 +03:00
|
|
|
"channel", req.ChanPoint)
|
2017-11-23 10:21:07 +03:00
|
|
|
if err := p.WipeChannel(req.ChanPoint); err != nil {
|
2016-11-29 05:44:14 +03:00
|
|
|
peerLog.Infof("Unable to wipe channel after detected "+
|
|
|
|
"breach: %v", err)
|
2017-05-02 23:04:58 +03:00
|
|
|
req.Err <- err
|
2016-11-29 05:44:14 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
2016-09-12 22:42:26 +03:00
|
|
|
}
|
2017-03-25 04:26:09 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// finalizeChanClosure performs the final clean up steps once the cooperative
|
|
|
|
// closure transaction has been fully broadcast. The finalized closing state
|
2017-12-18 05:40:05 +03:00
|
|
|
// machine should be passed in. Once the transaction has been sufficiently
|
|
|
|
// confirmed, the channel will be marked as fully closed within the database,
|
2017-11-23 10:21:07 +03:00
|
|
|
// and any clients will be notified of updates to the closing state.
|
|
|
|
func (p *peer) finalizeChanClosure(chanCloser *channelCloser) {
|
|
|
|
closeReq := chanCloser.CloseRequest()
|
|
|
|
|
|
|
|
// First, we'll clear all indexes related to the channel in question.
|
|
|
|
chanPoint := chanCloser.cfg.channel.ChannelPoint()
|
|
|
|
if err := p.WipeChannel(chanPoint); err != nil {
|
|
|
|
if closeReq != nil {
|
|
|
|
closeReq.Err <- err
|
2017-07-14 22:05:55 +03:00
|
|
|
}
|
2017-08-18 22:16:20 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
chanCloser.cfg.channel.Stop()
|
2017-07-14 22:05:55 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// Next, we'll launch a goroutine which will request to be notified by
|
2018-01-17 07:25:34 +03:00
|
|
|
// the ChainNotifier once the closure transaction obtains a single
|
|
|
|
// confirmation.
|
2017-11-23 10:21:07 +03:00
|
|
|
notifier := p.server.cc.chainNotifier
|
2017-10-18 05:25:13 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// If any error happens during waitForChanToClose, forward it to
|
|
|
|
// closeReq. If this channel closure is not locally initiated, closeReq
|
|
|
|
// will be nil, so just ignore the error.
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
if closeReq != nil {
|
|
|
|
errChan = closeReq.Err
|
2016-09-12 22:42:26 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
closingTx, err := chanCloser.ClosingTx()
|
|
|
|
if err != nil {
|
|
|
|
if closeReq != nil {
|
|
|
|
peerLog.Error(err)
|
|
|
|
closeReq.Err <- err
|
2017-07-14 22:05:55 +03:00
|
|
|
}
|
2017-05-05 02:03:47 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
closingTxid := closingTx.TxHash()
|
2017-05-05 02:03:47 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// If this is a locally requested shutdown, update the caller with a
|
|
|
|
// new event detailing the current pending state of this request.
|
|
|
|
if closeReq != nil {
|
|
|
|
closeReq.Updates <- &lnrpc.CloseStatusUpdate{
|
2017-07-14 22:05:55 +03:00
|
|
|
Update: &lnrpc.CloseStatusUpdate_ClosePending{
|
|
|
|
ClosePending: &lnrpc.PendingUpdate{
|
|
|
|
Txid: closingTxid[:],
|
|
|
|
},
|
2016-08-31 02:52:53 +03:00
|
|
|
},
|
2017-07-14 22:05:55 +03:00
|
|
|
}
|
2016-08-31 02:52:53 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
go waitForChanToClose(chanCloser.negotiationHeight, notifier, errChan,
|
2017-07-14 22:05:55 +03:00
|
|
|
chanPoint, &closingTxid, func() {
|
2017-05-11 03:27:05 +03:00
|
|
|
// Respond to the local subsystem which requested the
|
|
|
|
// channel closure.
|
2017-11-23 10:21:07 +03:00
|
|
|
if closeReq != nil {
|
|
|
|
closeReq.Updates <- &lnrpc.CloseStatusUpdate{
|
2017-07-14 22:05:55 +03:00
|
|
|
Update: &lnrpc.CloseStatusUpdate_ChanClose{
|
|
|
|
ChanClose: &lnrpc.ChannelCloseUpdate{
|
|
|
|
ClosingTxid: closingTxid[:],
|
|
|
|
Success: true,
|
|
|
|
},
|
2017-05-11 03:27:05 +03:00
|
|
|
},
|
2017-07-14 22:05:55 +03:00
|
|
|
}
|
2017-05-11 03:27:05 +03:00
|
|
|
}
|
2018-06-15 22:33:24 +03:00
|
|
|
|
|
|
|
// Remove the persistent connection to this peer if we
|
|
|
|
// no longer have open channels with them.
|
|
|
|
p.activeChanMtx.Lock()
|
|
|
|
numActiveChans := len(p.activeChannels)
|
|
|
|
p.activeChanMtx.Unlock()
|
|
|
|
|
|
|
|
if numActiveChans == 0 {
|
|
|
|
p.server.prunePersistentPeerConnection(
|
|
|
|
p.pubKeyBytes,
|
|
|
|
)
|
|
|
|
}
|
2017-05-11 03:27:05 +03:00
|
|
|
})
|
2017-05-05 02:03:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// waitForChanToClose uses the passed notifier to wait until the channel has
|
|
|
|
// been detected as closed on chain and then concludes by executing the
|
|
|
|
// following actions: the channel point will be sent over the settleChan, and
|
|
|
|
// finally the callback will be executed. If any error is encountered within
|
|
|
|
// the function, then it will be sent over the errChan.
|
2017-05-11 03:27:05 +03:00
|
|
|
func waitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier,
|
2017-05-05 02:03:47 +03:00
|
|
|
errChan chan error, chanPoint *wire.OutPoint,
|
|
|
|
closingTxID *chainhash.Hash, cb func()) {
|
|
|
|
|
2017-03-25 04:26:09 +03:00
|
|
|
peerLog.Infof("Waiting for confirmation of cooperative close of "+
|
|
|
|
"ChannelPoint(%v) with txid: %v", chanPoint,
|
|
|
|
closingTxID)
|
|
|
|
|
2017-05-05 02:03:47 +03:00
|
|
|
// TODO(roasbeef): add param for num needed confs
|
2017-05-11 03:27:05 +03:00
|
|
|
confNtfn, err := notifier.RegisterConfirmationsNtfn(closingTxID, 1,
|
|
|
|
bestHeight)
|
2017-05-16 03:53:22 +03:00
|
|
|
if err != nil {
|
|
|
|
if errChan != nil {
|
|
|
|
errChan <- err
|
|
|
|
}
|
2017-05-05 02:03:47 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// In the case that the ChainNotifier is shutting down, all subscriber
|
|
|
|
// notification channels will be closed, generating a nil receive.
|
|
|
|
height, ok := <-confNtfn.Confirmed
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// The channel has been closed, remove it from any active indexes, and
|
|
|
|
// the database state.
|
2017-08-05 04:32:25 +03:00
|
|
|
peerLog.Infof("ChannelPoint(%v) is now closed at "+
|
2017-05-05 02:03:47 +03:00
|
|
|
"height %v", chanPoint, height.BlockHeight)
|
|
|
|
|
|
|
|
// Finally, execute the closure call back to mark the confirmation of
|
|
|
|
// the transaction closing the contract.
|
|
|
|
cb()
|
2016-06-23 08:19:24 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:15:48 +03:00
|
|
|
// WipeChannel removes the passed channel point from all indexes associated
|
|
|
|
// with the peer, and the switch.
|
|
|
|
func (p *peer) WipeChannel(chanPoint *wire.OutPoint) error {
|
2017-03-25 04:26:09 +03:00
|
|
|
|
2017-11-23 10:15:48 +03:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
2016-06-23 08:19:24 +03:00
|
|
|
|
2016-11-18 05:43:33 +03:00
|
|
|
p.activeChanMtx.Lock()
|
2017-11-23 10:15:48 +03:00
|
|
|
if channel, ok := p.activeChannels[chanID]; ok {
|
|
|
|
channel.Stop()
|
|
|
|
delete(p.activeChannels, chanID)
|
2018-06-15 22:33:24 +03:00
|
|
|
if len(p.activeChannels) == 0 {
|
|
|
|
p.server.prunePersistentPeerConnection(p.pubKeyBytes)
|
|
|
|
}
|
2017-11-23 10:15:48 +03:00
|
|
|
}
|
2016-11-18 05:43:33 +03:00
|
|
|
p.activeChanMtx.Unlock()
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-11-23 10:15:48 +03:00
|
|
|
// Instruct the HtlcSwitch to close this link as the channel is no
|
2016-07-10 02:41:06 +03:00
|
|
|
// longer active.
|
2017-05-02 23:04:58 +03:00
|
|
|
if err := p.server.htlcSwitch.RemoveLink(chanID); err != nil {
|
|
|
|
if err == htlcswitch.ErrChannelLinkNotFound {
|
|
|
|
peerLog.Warnf("unable remove channel link with "+
|
|
|
|
"ChannelPoint(%v): %v", chanID, err)
|
|
|
|
return nil
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
2017-05-02 23:04:58 +03:00
|
|
|
return err
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2017-05-02 23:04:58 +03:00
|
|
|
return nil
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2017-02-16 15:39:38 +03:00
|
|
|
// handleInitMsg handles the incoming init message which contains global and
|
|
|
|
// local features vectors. If feature vectors are incompatible then disconnect.
|
|
|
|
func (p *peer) handleInitMsg(msg *lnwire.Init) error {
|
2017-10-19 01:14:22 +03:00
|
|
|
p.remoteLocalFeatures = lnwire.NewFeatureVector(msg.LocalFeatures,
|
2017-10-11 21:36:23 +03:00
|
|
|
lnwire.LocalFeatures)
|
2017-10-19 01:14:22 +03:00
|
|
|
p.remoteGlobalFeatures = lnwire.NewFeatureVector(msg.GlobalFeatures,
|
2017-10-11 21:36:23 +03:00
|
|
|
lnwire.GlobalFeatures)
|
|
|
|
|
2017-10-19 01:14:22 +03:00
|
|
|
unknownLocalFeatures := p.remoteLocalFeatures.UnknownRequiredFeatures()
|
2017-10-11 21:36:23 +03:00
|
|
|
if len(unknownLocalFeatures) > 0 {
|
|
|
|
err := errors.Errorf("Peer set unknown local feature bits: %v",
|
|
|
|
unknownLocalFeatures)
|
2017-02-16 15:39:38 +03:00
|
|
|
peerLog.Error(err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-19 01:14:22 +03:00
|
|
|
unknownGlobalFeatures := p.remoteGlobalFeatures.UnknownRequiredFeatures()
|
2017-10-11 21:36:23 +03:00
|
|
|
if len(unknownGlobalFeatures) > 0 {
|
|
|
|
err := errors.Errorf("Peer set unknown global feature bits: %v",
|
|
|
|
unknownGlobalFeatures)
|
2017-02-16 15:39:38 +03:00
|
|
|
peerLog.Error(err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-17 05:45:10 +03:00
|
|
|
// sendInitMsg sends init message to remote peer which contains our currently
|
|
|
|
// supported local and global features.
|
2017-02-16 15:39:38 +03:00
|
|
|
func (p *peer) sendInitMsg() error {
|
|
|
|
msg := lnwire.NewInitMessage(
|
2017-10-11 21:36:23 +03:00
|
|
|
p.server.globalFeatures.RawFeatureVector,
|
2017-10-19 01:16:03 +03:00
|
|
|
p.localFeatures,
|
2017-02-16 15:39:38 +03:00
|
|
|
)
|
|
|
|
|
2017-03-17 05:45:10 +03:00
|
|
|
return p.writeMessage(msg)
|
2017-02-16 15:39:38 +03:00
|
|
|
}
|
|
|
|
|
2018-06-08 06:07:30 +03:00
|
|
|
// SendMessage sends a variadic number of message to remote peer. The first
|
|
|
|
// argument denotes if the method should block until the message has been sent
|
|
|
|
// to the remote peer.
|
|
|
|
func (p *peer) SendMessage(sync bool, msgs ...lnwire.Message) error {
|
|
|
|
// Add all incoming messages to the outgoing queue. A list of error
|
|
|
|
// chans is populated for each message if the caller requested a sync
|
|
|
|
// send.
|
|
|
|
var errChans []chan error
|
|
|
|
for _, msg := range msgs {
|
|
|
|
// If a sync send was requested, create an error chan to listen
|
|
|
|
// for an ack from the writeHandler.
|
|
|
|
var errChan chan error
|
|
|
|
if sync {
|
|
|
|
errChan = make(chan error, 1)
|
|
|
|
errChans = append(errChans, errChan)
|
|
|
|
}
|
2018-04-05 03:43:51 +03:00
|
|
|
|
2018-06-08 06:07:30 +03:00
|
|
|
p.queueMsg(msg, errChan)
|
|
|
|
}
|
2018-04-05 03:43:51 +03:00
|
|
|
|
2018-06-08 06:07:30 +03:00
|
|
|
// Wait for all replies from the writeHandler. For async sends, this
|
|
|
|
// will be a NOP as the list of error chans is nil.
|
|
|
|
for _, errChan := range errChans {
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
return err
|
|
|
|
case <-p.quit:
|
|
|
|
return ErrPeerExiting
|
|
|
|
}
|
2018-04-05 03:43:51 +03:00
|
|
|
}
|
2018-06-08 06:07:30 +03:00
|
|
|
|
|
|
|
return nil
|
2016-07-22 03:10:30 +03:00
|
|
|
}
|
|
|
|
|
2017-06-17 01:11:02 +03:00
|
|
|
// PubKey returns the pubkey of the peer in compressed serialized format.
|
|
|
|
func (p *peer) PubKey() [33]byte {
|
|
|
|
return p.pubKeyBytes
|
2016-07-13 03:45:29 +03:00
|
|
|
}
|
|
|
|
|
2018-06-08 06:07:30 +03:00
|
|
|
// IdentityKey returns the public key of the remote peer.
|
|
|
|
func (p *peer) IdentityKey() *btcec.PublicKey {
|
|
|
|
return p.addr.IdentityKey
|
|
|
|
}
|
|
|
|
|
2018-07-05 23:27:35 +03:00
|
|
|
// Address returns the network address of the remote peer.
|
|
|
|
func (p *peer) Address() net.Addr {
|
|
|
|
return p.addr.Address
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddNewChannel adds a new channel to the peer. The channel should fail to be
|
|
|
|
// added if the cancel channel is closed.
|
|
|
|
func (p *peer) AddNewChannel(channel *lnwallet.LightningChannel,
|
|
|
|
cancel <-chan struct{}) error {
|
|
|
|
|
|
|
|
newChanDone := make(chan struct{})
|
|
|
|
newChanMsg := &newChannelMsg{
|
|
|
|
channel: channel,
|
|
|
|
done: newChanDone,
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case p.newChannels <- newChanMsg:
|
|
|
|
case <-cancel:
|
|
|
|
return errors.New("canceled adding new channel")
|
|
|
|
case <-p.quit:
|
|
|
|
return ErrPeerExiting
|
|
|
|
}
|
|
|
|
|
|
|
|
// We pause here to wait for the peer to recognize the new channel
|
|
|
|
// before we close the channel barrier corresponding to the channel.
|
|
|
|
select {
|
|
|
|
case <-newChanDone:
|
|
|
|
case <-p.quit:
|
|
|
|
return ErrPeerExiting
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// TODO(roasbeef): make all start/stop mutexes a CAS
|
2017-06-29 16:52:55 +03:00
|
|
|
|
2018-04-04 06:12:03 +03:00
|
|
|
// fetchLastChanUpdate returns a function which is able to retrieve the last
|
|
|
|
// channel update for a target channel.
|
2018-05-08 06:00:32 +03:00
|
|
|
func fetchLastChanUpdate(s *server,
|
2018-04-04 06:12:03 +03:00
|
|
|
pubKey [33]byte) func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) {
|
2017-06-29 16:52:55 +03:00
|
|
|
|
2018-04-04 06:12:03 +03:00
|
|
|
return func(cid lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) {
|
2018-05-08 06:00:32 +03:00
|
|
|
info, edge1, edge2, err := s.chanRouter.GetChannelByID(cid)
|
2017-06-29 16:52:55 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if edge1 == nil || edge2 == nil {
|
2018-06-14 05:43:42 +03:00
|
|
|
return nil, fmt.Errorf("unable to find channel by "+
|
|
|
|
"ShortChannelID(%v)", cid)
|
2017-06-29 16:52:55 +03:00
|
|
|
}
|
|
|
|
|
2018-01-31 07:30:00 +03:00
|
|
|
// If we're the outgoing node on the first edge, then that
|
|
|
|
// means the second edge is our policy. Otherwise, the first
|
|
|
|
// edge is our policy.
|
2017-06-29 16:52:55 +03:00
|
|
|
var local *channeldb.ChannelEdgePolicy
|
2018-01-31 07:30:00 +03:00
|
|
|
if bytes.Equal(edge1.Node.PubKeyBytes[:], pubKey[:]) {
|
2017-06-29 16:52:55 +03:00
|
|
|
local = edge2
|
|
|
|
} else {
|
|
|
|
local = edge1
|
|
|
|
}
|
|
|
|
|
2018-06-14 05:43:42 +03:00
|
|
|
return extractChannelUpdate(info, local)
|
2017-06-29 16:52:55 +03:00
|
|
|
}
|
|
|
|
}
|