2015-12-21 00:16:38 +03:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2016-01-14 08:41:46 +03:00
|
|
|
"container/list"
|
2016-07-10 02:41:06 +03:00
|
|
|
"fmt"
|
2015-12-21 02:10:09 +03:00
|
|
|
"net"
|
2015-12-21 00:16:38 +03:00
|
|
|
"sync"
|
2016-01-14 08:41:46 +03:00
|
|
|
"sync/atomic"
|
2015-12-21 00:16:38 +03:00
|
|
|
"time"
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
2017-04-21 01:45:04 +03:00
|
|
|
"github.com/lightningnetwork/lnd/brontide"
|
2017-05-02 23:04:58 +03:00
|
|
|
|
|
|
|
"bytes"
|
|
|
|
|
|
|
|
"github.com/go-errors/errors"
|
2017-05-05 02:03:47 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2016-06-21 22:32:32 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2017-05-02 23:04:58 +03:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
2016-08-31 02:52:53 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnrpc"
|
2016-01-18 06:14:47 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2017-06-29 16:52:55 +03:00
|
|
|
"github.com/lightningnetwork/lnd/routing"
|
2017-01-06 00:56:27 +03:00
|
|
|
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
2017-01-06 00:58:06 +03:00
|
|
|
"github.com/roasbeef/btcd/connmgr"
|
2017-03-25 04:26:09 +03:00
|
|
|
"github.com/roasbeef/btcd/txscript"
|
2016-05-15 17:17:44 +03:00
|
|
|
"github.com/roasbeef/btcd/wire"
|
2016-01-17 06:03:03 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
numNodes int32
|
2015-12-21 00:16:38 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2016-06-21 22:32:32 +03:00
|
|
|
// pingInterval is the interval at which ping messages are sent.
|
2017-01-23 01:35:26 +03:00
|
|
|
pingInterval = 1 * time.Minute
|
2016-01-17 06:03:03 +03:00
|
|
|
|
2017-10-16 01:13:27 +03:00
|
|
|
// idleTimeout is the duration of inactivity before we time out a peer.
|
|
|
|
idleTimeout = 5 * time.Minute
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// outgoingQueueLen is the buffer size of the channel which houses
|
|
|
|
// messages to be sent across the wire, requested by objects outside
|
|
|
|
// this struct.
|
2016-01-17 06:03:03 +03:00
|
|
|
outgoingQueueLen = 50
|
2015-12-21 00:16:38 +03:00
|
|
|
)
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// outgoinMsg packages an lnwire.Message to be sent out on the wire, along with
|
|
|
|
// a buffered channel which will be sent upon once the write is complete. This
|
|
|
|
// buffered channel acts as a semaphore to be used for synchronization purposes.
|
2016-01-14 08:41:46 +03:00
|
|
|
type outgoinMsg struct {
|
2017-11-16 05:23:46 +03:00
|
|
|
msg lnwire.Message
|
|
|
|
errChan chan error // MUST be buffered.
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-01-24 05:19:54 +03:00
|
|
|
// newChannelMsg packages a lnwallet.LightningChannel with a channel that
|
|
|
|
// allows the receiver of the request to report when the funding transaction
|
|
|
|
// has been confirmed and the channel creation process completed.
|
|
|
|
type newChannelMsg struct {
|
|
|
|
channel *lnwallet.LightningChannel
|
|
|
|
done chan struct{}
|
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// closeMsgs is a wrapper struct around any wire messages that deal with the
|
|
|
|
// cooperative channel closure negotiation process. This struct includes the
|
|
|
|
// raw channel ID targeted along with the original message.
|
|
|
|
type closeMsg struct {
|
|
|
|
cid lnwire.ChannelID
|
|
|
|
msg lnwire.Message
|
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// chanSnapshotReq is a message sent by outside subsystems to a peer in order
|
2016-06-23 08:22:06 +03:00
|
|
|
// to gain a snapshot of the peer's currently active channels.
|
|
|
|
type chanSnapshotReq struct {
|
|
|
|
resp chan []*channeldb.ChannelSnapshot
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// peer is an active peer on the Lightning Network. This struct is responsible
|
2016-11-11 04:15:25 +03:00
|
|
|
// for managing any channel state related to this peer. To do so, it has
|
|
|
|
// several helper goroutines to handle events such as HTLC timeouts, new
|
|
|
|
// funding workflow, and detecting an uncooperative closure of any active
|
|
|
|
// channels.
|
2016-09-26 20:39:47 +03:00
|
|
|
// TODO(roasbeef): proper reconnection logic
|
2015-12-21 00:16:38 +03:00
|
|
|
type peer struct {
|
2017-01-30 11:53:09 +03:00
|
|
|
// The following fields are only meant to be used *atomically*
|
|
|
|
bytesReceived uint64
|
|
|
|
bytesSent uint64
|
|
|
|
|
|
|
|
// pingTime is a rough estimate of the RTT (round-trip-time) between us
|
|
|
|
// and the connected peer. This time is expressed in micro seconds.
|
|
|
|
// TODO(roasbeef): also use a WMA or EMA?
|
|
|
|
pingTime int64
|
|
|
|
|
|
|
|
// pingLastSend is the Unix time expressed in nanoseconds when we sent
|
|
|
|
// our last ping message.
|
|
|
|
pingLastSend int64
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// MUST be used atomically.
|
2015-12-21 00:16:38 +03:00
|
|
|
started int32
|
2016-01-17 06:03:03 +03:00
|
|
|
disconnect int32
|
2015-12-21 00:16:38 +03:00
|
|
|
|
2016-12-15 05:11:31 +03:00
|
|
|
connReq *connmgr.ConnReq
|
|
|
|
conn net.Conn
|
2015-12-21 00:16:38 +03:00
|
|
|
|
2016-10-28 05:49:10 +03:00
|
|
|
addr *lnwire.NetAddress
|
2017-06-17 01:11:02 +03:00
|
|
|
pubKeyBytes [33]byte
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2016-06-23 08:22:06 +03:00
|
|
|
inbound bool
|
|
|
|
id int32
|
2015-12-21 00:16:38 +03:00
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// This mutex protects all the stats below it.
|
2016-01-17 06:03:03 +03:00
|
|
|
sync.RWMutex
|
2016-06-21 22:32:32 +03:00
|
|
|
timeConnected time.Time
|
|
|
|
lastSend time.Time
|
|
|
|
lastRecv time.Time
|
|
|
|
|
|
|
|
// sendQueue is the channel which is used to queue outgoing to be
|
|
|
|
// written onto the wire. Note that this channel is unbuffered.
|
|
|
|
sendQueue chan outgoinMsg
|
|
|
|
|
|
|
|
// outgoingQueue is a buffered channel which allows second/third party
|
|
|
|
// objects to queue messages to be sent out on the wire.
|
2016-01-14 08:41:46 +03:00
|
|
|
outgoingQueue chan outgoinMsg
|
2015-12-21 00:16:38 +03:00
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// activeChannels is a map which stores the state machines of all
|
|
|
|
// active channels. Channels are indexed into the map by the txid of
|
|
|
|
// the funding transaction which opened the channel.
|
2017-08-09 02:51:19 +03:00
|
|
|
activeChanMtx sync.RWMutex
|
|
|
|
activeChannels map[lnwire.ChannelID]*lnwallet.LightningChannel
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
// newChannels is used by the fundingManager to send fully opened
|
|
|
|
// channels to the source peer which handled the funding workflow.
|
2017-01-24 05:19:54 +03:00
|
|
|
newChannels chan *newChannelMsg
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// activeChanCloses is a map that keep track of all the active
|
|
|
|
// cooperative channel closures that are active. Any channel closing
|
|
|
|
// messages are directed to one of these active state machines. Once
|
|
|
|
// the channel has been closed, the state machine will be delete from
|
|
|
|
// the map.
|
|
|
|
activeChanCloses map[lnwire.ChannelID]*channelCloser
|
|
|
|
|
2016-11-11 04:15:25 +03:00
|
|
|
// localCloseChanReqs is a channel in which any local requests to close
|
|
|
|
// a particular channel are sent over.
|
2017-05-02 23:04:58 +03:00
|
|
|
localCloseChanReqs chan *htlcswitch.ChanClose
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// chanCloseMsgs is a channel that any message related to channel
|
|
|
|
// closures are sent over. This includes lnwire.Shutdown message as
|
|
|
|
// well as lnwire.ClosingSigned messages.
|
|
|
|
chanCloseMsgs chan *closeMsg
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
server *server
|
2015-12-21 00:16:38 +03:00
|
|
|
|
2017-10-19 01:16:03 +03:00
|
|
|
// localFeatures is the set of local features that we advertised to the
|
|
|
|
// remote node.
|
|
|
|
localFeatures *lnwire.RawFeatureVector
|
2017-02-16 15:39:38 +03:00
|
|
|
|
2017-10-19 01:14:22 +03:00
|
|
|
// remoteLocalFeatures is the local feature vector received from the
|
|
|
|
// peer during the connection handshake.
|
|
|
|
remoteLocalFeatures *lnwire.FeatureVector
|
|
|
|
|
|
|
|
// remoteGlobalFeatures is the global feature vector received from the
|
|
|
|
// peer during the connection handshake.
|
|
|
|
remoteGlobalFeatures *lnwire.FeatureVector
|
2017-02-16 15:39:38 +03:00
|
|
|
|
2015-12-21 00:16:38 +03:00
|
|
|
queueQuit chan struct{}
|
|
|
|
quit chan struct{}
|
2016-01-14 08:41:46 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// newPeer creates a new peer from an establish connection object, and a
|
|
|
|
// pointer to the main server.
|
2017-02-22 12:10:07 +03:00
|
|
|
func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server,
|
2017-10-19 01:16:03 +03:00
|
|
|
addr *lnwire.NetAddress, inbound bool,
|
|
|
|
localFeatures *lnwire.RawFeatureVector) (*peer, error) {
|
2016-10-28 05:49:10 +03:00
|
|
|
|
|
|
|
nodePub := addr.IdentityKey
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
p := &peer{
|
2017-06-17 01:11:02 +03:00
|
|
|
conn: conn,
|
|
|
|
addr: addr,
|
2016-10-28 05:49:10 +03:00
|
|
|
|
|
|
|
id: atomic.AddInt32(&numNodes, 1),
|
|
|
|
inbound: inbound,
|
2017-02-22 12:10:07 +03:00
|
|
|
connReq: connReq,
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
server: server,
|
2016-01-17 06:03:03 +03:00
|
|
|
|
2017-10-19 01:16:03 +03:00
|
|
|
localFeatures: localFeatures,
|
|
|
|
|
2017-08-09 02:51:19 +03:00
|
|
|
sendQueue: make(chan outgoinMsg),
|
|
|
|
outgoingQueue: make(chan outgoinMsg),
|
2016-01-17 06:03:03 +03:00
|
|
|
|
2017-08-09 02:51:19 +03:00
|
|
|
activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
|
|
|
|
newChannels: make(chan *newChannelMsg, 1),
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
activeChanCloses: make(map[lnwire.ChannelID]*channelCloser),
|
|
|
|
localCloseChanReqs: make(chan *htlcswitch.ChanClose),
|
|
|
|
chanCloseMsgs: make(chan *closeMsg),
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2016-01-17 06:03:03 +03:00
|
|
|
queueQuit: make(chan struct{}),
|
|
|
|
quit: make(chan struct{}),
|
|
|
|
}
|
2017-06-17 01:11:02 +03:00
|
|
|
copy(p.pubKeyBytes[:], nodePub.SerializeCompressed())
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
2017-04-12 07:59:45 +03:00
|
|
|
// Start starts all helper goroutines the peer needs for normal operations. In
|
|
|
|
// the case this peer has already been started, then this function is a loop.
|
2016-01-17 06:03:03 +03:00
|
|
|
func (p *peer) Start() error {
|
|
|
|
if atomic.AddInt32(&p.started, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
peerLog.Tracef("peer %v starting", p)
|
2016-01-17 06:03:03 +03:00
|
|
|
|
2017-03-17 05:45:10 +03:00
|
|
|
// Exchange local and global features, the init message should be very
|
|
|
|
// first between two nodes.
|
2017-02-16 15:39:38 +03:00
|
|
|
if err := p.sendInitMsg(); err != nil {
|
2017-04-24 05:24:28 +03:00
|
|
|
return fmt.Errorf("unable to send init msg: %v", err)
|
2017-02-16 15:39:38 +03:00
|
|
|
}
|
|
|
|
|
2017-03-17 05:45:10 +03:00
|
|
|
// Before we launch any of the helper goroutines off the peer struct,
|
2017-04-24 05:24:28 +03:00
|
|
|
// we'll first ensure proper adherence to the p2p protocol. The init
|
2017-03-17 05:45:10 +03:00
|
|
|
// message MUST be sent before any other message.
|
2017-03-30 04:33:20 +03:00
|
|
|
readErr := make(chan error, 1)
|
|
|
|
msgChan := make(chan lnwire.Message, 1)
|
2017-08-09 02:51:19 +03:00
|
|
|
p.wg.Add(1)
|
2017-03-30 04:33:20 +03:00
|
|
|
go func() {
|
2017-08-09 02:51:19 +03:00
|
|
|
defer p.wg.Done()
|
|
|
|
|
2017-04-20 02:23:17 +03:00
|
|
|
msg, err := p.readNextMessage()
|
2017-03-30 04:33:20 +03:00
|
|
|
if err != nil {
|
|
|
|
readErr <- err
|
|
|
|
msgChan <- nil
|
2017-08-11 04:07:45 +03:00
|
|
|
return
|
2017-03-30 04:33:20 +03:00
|
|
|
}
|
|
|
|
readErr <- nil
|
|
|
|
msgChan <- msg
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
2017-04-12 07:59:45 +03:00
|
|
|
// In order to avoid blocking indefinitely, we'll give the other peer
|
2017-04-14 00:48:38 +03:00
|
|
|
// an upper timeout of 15 seconds to respond before we bail out early.
|
|
|
|
case <-time.After(time.Second * 15):
|
2017-03-30 04:33:20 +03:00
|
|
|
return fmt.Errorf("peer did not complete handshake within 5 " +
|
|
|
|
"seconds")
|
|
|
|
case err := <-readErr:
|
|
|
|
if err != nil {
|
2017-04-24 05:24:28 +03:00
|
|
|
return fmt.Errorf("unable to read init msg: %v", err)
|
2017-03-30 04:33:20 +03:00
|
|
|
}
|
2017-02-16 15:39:38 +03:00
|
|
|
}
|
|
|
|
|
2017-05-11 03:37:59 +03:00
|
|
|
// Once the init message arrives, we can parse it so we can figure out
|
|
|
|
// the negotiation of features for this session.
|
2017-03-30 04:33:20 +03:00
|
|
|
msg := <-msgChan
|
2017-02-16 15:39:38 +03:00
|
|
|
if msg, ok := msg.(*lnwire.Init); ok {
|
|
|
|
if err := p.handleInitMsg(msg); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return errors.New("very first message between nodes " +
|
|
|
|
"must be init message")
|
|
|
|
}
|
|
|
|
|
2017-04-24 05:23:15 +03:00
|
|
|
// Fetch and then load all the active channels we have with this remote
|
|
|
|
// peer from the database.
|
|
|
|
activeChans, err := p.server.chanDB.FetchOpenChannels(p.addr.IdentityKey)
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("unable to fetch active chans "+
|
|
|
|
"for peer %v: %v", p, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, load all the active channels we have with this peer,
|
|
|
|
// registering them with the switch and launching the necessary
|
|
|
|
// goroutines required to operate them.
|
|
|
|
peerLog.Debugf("Loaded %v active channels from database with "+
|
|
|
|
"peerID(%v)", len(activeChans), p.id)
|
|
|
|
if err := p.loadActiveChannels(activeChans); err != nil {
|
|
|
|
return fmt.Errorf("unable to load channels: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-11 03:37:59 +03:00
|
|
|
p.wg.Add(5)
|
|
|
|
go p.queueHandler()
|
|
|
|
go p.writeHandler()
|
|
|
|
go p.readHandler()
|
|
|
|
go p.channelManager()
|
|
|
|
go p.pingHandler()
|
|
|
|
|
2016-01-17 06:03:03 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-24 05:23:15 +03:00
|
|
|
// loadActiveChannels creates indexes within the peer for tracking all active
|
|
|
|
// channels returned by the database.
|
|
|
|
func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
|
|
|
|
for _, dbChan := range chans {
|
2017-05-18 21:55:25 +03:00
|
|
|
lnChan, err := lnwallet.NewLightningChannel(p.server.cc.signer,
|
|
|
|
p.server.cc.chainNotifier, p.server.cc.feeEstimator, dbChan)
|
2017-04-24 05:23:15 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-01-17 06:03:03 +03:00
|
|
|
|
2017-07-31 00:13:28 +03:00
|
|
|
chanPoint := &dbChan.FundingOutpoint
|
2017-10-03 02:30:17 +03:00
|
|
|
|
2017-07-31 00:13:28 +03:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
2017-04-24 05:23:15 +03:00
|
|
|
|
|
|
|
p.activeChanMtx.Lock()
|
|
|
|
p.activeChannels[chanID] = lnChan
|
|
|
|
p.activeChanMtx.Unlock()
|
|
|
|
|
2017-08-18 22:16:20 +03:00
|
|
|
peerLog.Infof("peerID(%v) loading ChannelPoint(%v)", p.id, chanPoint)
|
2017-04-24 05:23:15 +03:00
|
|
|
|
2017-08-18 22:16:20 +03:00
|
|
|
select {
|
|
|
|
case p.server.breachArbiter.newContracts <- lnChan:
|
|
|
|
case <-p.server.quit:
|
|
|
|
return fmt.Errorf("server shutting down")
|
|
|
|
case <-p.quit:
|
|
|
|
return fmt.Errorf("peer shutting down")
|
|
|
|
}
|
2017-04-24 05:23:15 +03:00
|
|
|
|
2017-08-03 07:15:49 +03:00
|
|
|
blockEpoch, err := p.server.cc.chainNotifier.RegisterBlockEpochNtfn()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
_, currentHeight, err := p.server.cc.chainIO.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-08-22 09:53:21 +03:00
|
|
|
|
|
|
|
// Before we register this new link with the HTLC Switch, we'll
|
|
|
|
// need to fetch its current link-layer forwarding policy from
|
|
|
|
// the database.
|
|
|
|
graph := p.server.chanDB.ChannelGraph()
|
2017-08-23 21:32:50 +03:00
|
|
|
info, p1, p2, err := graph.FetchChannelEdgesByOutpoint(chanPoint)
|
2017-08-31 01:33:49 +03:00
|
|
|
if err != nil && err != channeldb.ErrEdgeNotFound {
|
2017-08-22 09:53:21 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll filter out our policy from the directional channel
|
|
|
|
// edges based whom the edge connects to. If it doesn't connect
|
|
|
|
// to us, then we know that we were the one that advertised the
|
|
|
|
// policy.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): can add helper method to get policy for
|
|
|
|
// particular channel.
|
|
|
|
var selfPolicy *channeldb.ChannelEdgePolicy
|
2017-08-31 01:33:49 +03:00
|
|
|
if info != nil && info.NodeKey1.IsEqual(p.server.identityPriv.PubKey()) {
|
2017-08-22 09:53:21 +03:00
|
|
|
selfPolicy = p1
|
|
|
|
} else {
|
|
|
|
selfPolicy = p2
|
|
|
|
}
|
|
|
|
|
2017-08-23 21:32:50 +03:00
|
|
|
// If we don't yet have an advertised routing policy, then
|
|
|
|
// we'll use the current default, otherwise we'll translate the
|
|
|
|
// routing policy into a forwarding policy.
|
|
|
|
var forwardingPolicy *htlcswitch.ForwardingPolicy
|
|
|
|
if selfPolicy != nil {
|
|
|
|
forwardingPolicy = &htlcswitch.ForwardingPolicy{
|
|
|
|
MinHTLC: selfPolicy.MinHTLC,
|
|
|
|
BaseFee: selfPolicy.FeeBaseMSat,
|
|
|
|
FeeRate: selfPolicy.FeeProportionalMillionths,
|
|
|
|
TimeLockDelta: uint32(selfPolicy.TimeLockDelta),
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
forwardingPolicy = &p.server.cc.routingPolicy
|
2017-08-22 09:53:21 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
peerLog.Tracef("Using link policy of: %v", spew.Sdump(forwardingPolicy))
|
|
|
|
|
|
|
|
// Register this new channel link with the HTLC Switch. This is
|
|
|
|
// necessary to properly route multi-hop payments, and forward
|
|
|
|
// new payments triggered by RPC clients.
|
2017-08-03 07:15:49 +03:00
|
|
|
linkCfg := htlcswitch.ChannelLinkConfig{
|
|
|
|
Peer: p,
|
|
|
|
DecodeHopIterator: p.server.sphinx.DecodeHopIterator,
|
2017-10-11 05:38:31 +03:00
|
|
|
DecodeOnionObfuscator: p.server.sphinx.ExtractErrorEncrypter,
|
2017-08-03 07:15:49 +03:00
|
|
|
GetLastChannelUpdate: createGetLastUpdate(p.server.chanRouter,
|
|
|
|
p.PubKey(), lnChan.ShortChanID()),
|
|
|
|
SettledContracts: p.server.breachArbiter.settledContracts,
|
|
|
|
DebugHTLC: cfg.DebugHTLC,
|
2017-09-01 04:30:11 +03:00
|
|
|
HodlHTLC: cfg.HodlHTLC,
|
2017-08-03 07:15:49 +03:00
|
|
|
Registry: p.server.invoices,
|
|
|
|
Switch: p.server.htlcSwitch,
|
2017-08-22 09:53:21 +03:00
|
|
|
FwrdingPolicy: *forwardingPolicy,
|
2017-11-24 08:08:38 +03:00
|
|
|
FeeEstimator: p.server.cc.feeEstimator,
|
2017-08-03 07:15:49 +03:00
|
|
|
BlockEpochs: blockEpoch,
|
2017-07-09 02:30:20 +03:00
|
|
|
SyncStates: true,
|
2017-08-03 07:15:49 +03:00
|
|
|
}
|
|
|
|
link := htlcswitch.NewChannelLink(linkCfg, lnChan,
|
|
|
|
uint32(currentHeight))
|
2017-05-02 23:04:58 +03:00
|
|
|
|
|
|
|
if err := p.server.htlcSwitch.AddLink(link); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-04-24 05:23:15 +03:00
|
|
|
}
|
2016-01-17 06:03:03 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-04-24 05:29:38 +03:00
|
|
|
// WaitForDisconnect waits until the peer has disconnected. A peer may be
|
|
|
|
// disconnected if the local or remote side terminating the connection, or an
|
|
|
|
// irrecoverable protocol error has been encountered.
|
|
|
|
func (p *peer) WaitForDisconnect() {
|
|
|
|
<-p.quit
|
|
|
|
}
|
2017-02-21 09:06:16 +03:00
|
|
|
|
2016-07-14 02:40:01 +03:00
|
|
|
// Disconnect terminates the connection with the remote peer. Additionally, a
|
|
|
|
// signal is sent to the server and htlcSwitch indicating the resources
|
|
|
|
// allocated to the peer can now be cleaned up.
|
2017-07-12 16:44:17 +03:00
|
|
|
func (p *peer) Disconnect(reason error) {
|
2016-07-14 02:40:01 +03:00
|
|
|
if !atomic.CompareAndSwapInt32(&p.disconnect, 0, 1) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-07-12 16:44:17 +03:00
|
|
|
peerLog.Tracef("Disconnecting %s, reason: %v", p, reason)
|
2017-02-07 02:04:52 +03:00
|
|
|
|
|
|
|
// Ensure that the TCP connection is properly closed before continuing.
|
|
|
|
p.conn.Close()
|
2016-07-14 02:40:01 +03:00
|
|
|
|
|
|
|
close(p.quit)
|
2017-08-09 02:51:19 +03:00
|
|
|
|
|
|
|
p.wg.Wait()
|
2016-07-14 02:40:01 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// String returns the string representation of this peer.
|
|
|
|
func (p *peer) String() string {
|
|
|
|
return p.conn.RemoteAddr().String()
|
|
|
|
}
|
|
|
|
|
|
|
|
// readNextMessage reads, and returns the next message on the wire along with
|
|
|
|
// any additional raw payload.
|
2017-04-20 02:23:17 +03:00
|
|
|
func (p *peer) readNextMessage() (lnwire.Message, error) {
|
2017-04-21 01:45:04 +03:00
|
|
|
noiseConn, ok := p.conn.(*brontide.Conn)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("brontide.Conn required to read messages")
|
|
|
|
}
|
|
|
|
|
|
|
|
// First we'll read the next _full_ message. We do this rather than
|
|
|
|
// reading incrementally from the stream as the Lightning wire protocol
|
|
|
|
// is message oriented and allows nodes to pad on additional data to
|
|
|
|
// the message stream.
|
|
|
|
rawMsg, err := noiseConn.ReadNextMessage()
|
|
|
|
atomic.AddUint64(&p.bytesReceived, uint64(len(rawMsg)))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, create a new io.Reader implementation from the raw message,
|
|
|
|
// and use this to decode the message directly from.
|
|
|
|
msgReader := bytes.NewReader(rawMsg)
|
|
|
|
nextMsg, err := lnwire.ReadMessage(msgReader, 0)
|
2016-01-14 08:41:46 +03:00
|
|
|
if err != nil {
|
2017-04-20 02:23:17 +03:00
|
|
|
return nil, err
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// TODO(roasbeef): add message summaries
|
2017-01-15 04:52:05 +03:00
|
|
|
p.logWireMessage(nextMsg, true)
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-04-20 02:23:17 +03:00
|
|
|
return nextMsg, nil
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
// msgStream implements a goroutine-safe, in-order stream of messages to be
|
|
|
|
// delivered via closure to a receiver. These messages MUST be in order due to
|
|
|
|
// the nature of the lightning channel commitment and gossiper state machines.
|
|
|
|
// TODO(conner): use stream handler interface to abstract out stream
|
|
|
|
// state/logging
|
|
|
|
type msgStream struct {
|
|
|
|
peer *peer
|
2017-08-01 07:20:36 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
apply func(lnwire.Message)
|
2017-08-01 07:20:36 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
startMsg string
|
|
|
|
stopMsg string
|
2017-08-01 07:20:36 +03:00
|
|
|
|
|
|
|
msgCond *sync.Cond
|
|
|
|
msgs []lnwire.Message
|
|
|
|
|
|
|
|
mtx sync.Mutex
|
|
|
|
|
2017-08-09 02:51:19 +03:00
|
|
|
wg sync.WaitGroup
|
2017-08-01 07:20:36 +03:00
|
|
|
quit chan struct{}
|
|
|
|
}
|
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
// newMsgStream creates a new instance of a chanMsgStream for a particular
|
2017-08-01 07:20:36 +03:00
|
|
|
// channel identified by its channel ID.
|
2017-11-02 01:50:55 +03:00
|
|
|
func newMsgStream(p *peer, startMsg, stopMsg string,
|
|
|
|
apply func(lnwire.Message)) *msgStream {
|
|
|
|
|
|
|
|
stream := &msgStream{
|
|
|
|
peer: p,
|
|
|
|
apply: apply,
|
|
|
|
startMsg: startMsg,
|
|
|
|
stopMsg: stopMsg,
|
|
|
|
quit: make(chan struct{}),
|
2017-08-01 07:20:36 +03:00
|
|
|
}
|
|
|
|
stream.msgCond = sync.NewCond(&stream.mtx)
|
|
|
|
|
|
|
|
return stream
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start starts the chanMsgStream.
|
2017-11-02 01:50:55 +03:00
|
|
|
func (ms *msgStream) Start() {
|
|
|
|
ms.wg.Add(1)
|
|
|
|
go ms.msgConsumer()
|
2017-08-01 07:20:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Stop stops the chanMsgStream.
|
2017-11-02 01:50:55 +03:00
|
|
|
func (ms *msgStream) Stop() {
|
2017-08-01 07:20:36 +03:00
|
|
|
// TODO(roasbeef): signal too?
|
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
close(ms.quit)
|
2017-08-01 07:31:16 +03:00
|
|
|
|
|
|
|
// Wake up the msgConsumer is we've been signalled to exit.
|
2017-11-02 01:50:55 +03:00
|
|
|
ms.msgCond.Signal()
|
2017-08-09 02:51:19 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
ms.wg.Wait()
|
2017-08-01 07:20:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// msgConsumer is the main goroutine that streams messages from the peer's
|
|
|
|
// readHandler directly to the target channel.
|
2017-11-02 01:50:55 +03:00
|
|
|
func (ms *msgStream) msgConsumer() {
|
|
|
|
defer ms.wg.Done()
|
|
|
|
defer peerLog.Tracef(ms.stopMsg)
|
2017-08-09 02:51:19 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
peerLog.Tracef(ms.startMsg)
|
2017-08-01 07:20:36 +03:00
|
|
|
|
|
|
|
for {
|
|
|
|
// First, we'll check our condition. If the queue of messages
|
|
|
|
// is empty, then we'll wait until a new item is added.
|
2017-11-02 01:50:55 +03:00
|
|
|
ms.msgCond.L.Lock()
|
|
|
|
for len(ms.msgs) == 0 {
|
|
|
|
ms.msgCond.Wait()
|
2017-08-01 07:31:16 +03:00
|
|
|
|
|
|
|
// If we were woke up in order to exit, then we'll do
|
|
|
|
// so. Otherwise, we'll check the message queue for any
|
|
|
|
// new items.
|
|
|
|
select {
|
2017-11-02 01:50:55 +03:00
|
|
|
case <-ms.quit:
|
|
|
|
ms.msgCond.L.Unlock()
|
2017-08-01 07:31:16 +03:00
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
2017-08-01 07:20:36 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Grab the message off the front of the queue, shifting the
|
|
|
|
// slice's reference down one in order to remove the message
|
|
|
|
// from the queue.
|
2017-11-02 01:50:55 +03:00
|
|
|
msg := ms.msgs[0]
|
|
|
|
ms.msgs[0] = nil // Set to nil to prevent GC leak.
|
|
|
|
ms.msgs = ms.msgs[1:]
|
|
|
|
|
|
|
|
ms.msgCond.L.Unlock()
|
2017-08-01 07:20:36 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
ms.apply(msg)
|
2017-08-01 07:20:36 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
// AddMsg adds a new message to the msgStream. This function is safe for
|
2017-08-01 07:20:36 +03:00
|
|
|
// concurrent access.
|
2017-11-02 01:50:55 +03:00
|
|
|
func (ms *msgStream) AddMsg(msg lnwire.Message) {
|
2017-08-01 07:20:36 +03:00
|
|
|
// First, we'll lock the condition, and add the message to the end of
|
|
|
|
// the message queue.
|
2017-11-02 01:50:55 +03:00
|
|
|
ms.msgCond.L.Lock()
|
|
|
|
ms.msgs = append(ms.msgs, msg)
|
|
|
|
ms.msgCond.L.Unlock()
|
2017-08-01 07:20:36 +03:00
|
|
|
|
|
|
|
// With the message added, we signal to the msgConsumer that there are
|
|
|
|
// additional messages to consume.
|
2017-11-02 01:50:55 +03:00
|
|
|
ms.msgCond.Signal()
|
|
|
|
}
|
|
|
|
|
|
|
|
// newChanMsgStream is used to create a msgStream between the peer and
|
|
|
|
// particular channel link in the htlcswitch. We utilize additional
|
|
|
|
// synchronization with the fundingManager to ensure we don't attempt to
|
|
|
|
// dispatch a message to a channel before it is fully active. A reference to the
|
|
|
|
// channel this stream forwards to his held in scope to prevent unnecessary
|
|
|
|
// lookups.
|
|
|
|
func newChanMsgStream(p *peer, cid lnwire.ChannelID) *msgStream {
|
|
|
|
|
|
|
|
var chanLink htlcswitch.ChannelLink
|
|
|
|
|
|
|
|
return newMsgStream(p,
|
|
|
|
fmt.Sprintf("Update stream for ChannelID(%x) created", cid),
|
|
|
|
fmt.Sprintf("Update stream for ChannelID(%x) exiting", cid),
|
|
|
|
func(msg lnwire.Message) {
|
2017-12-07 03:30:50 +03:00
|
|
|
_, isChanSycMsg := msg.(*lnwire.ChannelReestablish)
|
|
|
|
|
2017-12-18 05:40:05 +03:00
|
|
|
// If this is the chanSync message, then we'll deliver
|
|
|
|
// it immediately to the active link.
|
2017-12-07 03:30:50 +03:00
|
|
|
if !isChanSycMsg {
|
|
|
|
// We'll send a message to the funding manager
|
|
|
|
// and wait iff an active funding process for
|
|
|
|
// this channel hasn't yet completed. We do
|
|
|
|
// this in order to account for the following
|
|
|
|
// scenario: we send the funding locked message
|
|
|
|
// to the other side, they immediately send a
|
|
|
|
// channel update message, but we haven't yet
|
|
|
|
// sent the channel to the channelManager.
|
|
|
|
p.server.fundingMgr.waitUntilChannelOpen(cid)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(roasbeef): only wait if not chan sync
|
2017-11-02 01:50:55 +03:00
|
|
|
|
|
|
|
// Dispatch the commitment update message to the proper active
|
|
|
|
// goroutine dedicated to this channel.
|
|
|
|
if chanLink == nil {
|
|
|
|
link, err := p.server.htlcSwitch.GetLink(cid)
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("recv'd update for unknown "+
|
|
|
|
"channel %v from %v", cid, p)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
chanLink = link
|
|
|
|
}
|
|
|
|
|
|
|
|
chanLink.HandleChannelUpdate(msg)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// newDiscMsgStream is used to setup a msgStream between the peer and the
|
|
|
|
// authenticated gossiper. This stream should be used to forward all remote
|
|
|
|
// channel announcements.
|
|
|
|
func newDiscMsgStream(p *peer) *msgStream {
|
|
|
|
return newMsgStream(p,
|
|
|
|
"Update stream for gossiper created",
|
|
|
|
"Update stream for gossiper exited",
|
|
|
|
func(msg lnwire.Message) {
|
|
|
|
p.server.authGossiper.ProcessRemoteAnnouncement(msg,
|
|
|
|
p.addr.IdentityKey)
|
|
|
|
},
|
|
|
|
)
|
2017-08-01 07:20:36 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// readHandler is responsible for reading messages off the wire in series, then
|
2017-01-13 08:01:50 +03:00
|
|
|
// properly dispatching the handling of the message to the proper subsystem.
|
2016-06-21 22:32:32 +03:00
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
|
|
|
func (p *peer) readHandler() {
|
2017-08-09 02:51:19 +03:00
|
|
|
|
2017-10-16 01:13:27 +03:00
|
|
|
// We'll stop the timer after a new messages is received, and also
|
|
|
|
// reset it after we process the next message.
|
|
|
|
idleTimer := time.AfterFunc(idleTimeout, func() {
|
|
|
|
err := fmt.Errorf("Peer %s no answer for %s -- disconnecting",
|
|
|
|
p, idleTimeout)
|
|
|
|
p.Disconnect(err)
|
|
|
|
})
|
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
discStream := newDiscMsgStream(p)
|
|
|
|
discStream.Start()
|
|
|
|
defer discStream.Stop()
|
|
|
|
|
|
|
|
chanMsgStreams := make(map[lnwire.ChannelID]*msgStream)
|
2016-01-14 08:41:46 +03:00
|
|
|
out:
|
|
|
|
for atomic.LoadInt32(&p.disconnect) == 0 {
|
2017-04-20 02:23:17 +03:00
|
|
|
nextMsg, err := p.readNextMessage()
|
2017-10-16 01:13:27 +03:00
|
|
|
idleTimer.Stop()
|
2016-01-14 08:41:46 +03:00
|
|
|
if err != nil {
|
2017-01-24 07:33:18 +03:00
|
|
|
peerLog.Infof("unable to read message from %v: %v",
|
|
|
|
p, err)
|
2017-01-17 05:03:34 +03:00
|
|
|
|
|
|
|
switch err.(type) {
|
|
|
|
// If this is just a message we don't yet recognize,
|
|
|
|
// we'll continue processing as normal as this allows
|
|
|
|
// us to introduce new messages in a forwards
|
|
|
|
// compatible manner.
|
|
|
|
case *lnwire.UnknownMessage:
|
2017-10-16 01:13:27 +03:00
|
|
|
idleTimer.Reset(idleTimeout)
|
2017-01-17 05:03:34 +03:00
|
|
|
continue
|
|
|
|
|
|
|
|
// If the error we encountered wasn't just a message we
|
|
|
|
// didn't recognize, then we'll stop all processing s
|
|
|
|
// this is a fatal error.
|
|
|
|
default:
|
|
|
|
break out
|
|
|
|
}
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-02-21 05:10:05 +03:00
|
|
|
var (
|
|
|
|
isChanUpdate bool
|
2017-04-17 01:41:11 +03:00
|
|
|
targetChan lnwire.ChannelID
|
2017-02-21 05:10:05 +03:00
|
|
|
)
|
2016-07-13 03:45:29 +03:00
|
|
|
|
2016-01-14 08:41:46 +03:00
|
|
|
switch msg := nextMsg.(type) {
|
2017-01-26 05:20:55 +03:00
|
|
|
case *lnwire.Pong:
|
|
|
|
// When we receive a Pong message in response to our
|
|
|
|
// last ping message, we'll use the time in which we
|
|
|
|
// sent the ping message to measure a rough estimate of
|
|
|
|
// round trip time.
|
|
|
|
pingSendTime := atomic.LoadInt64(&p.pingLastSend)
|
|
|
|
delay := (time.Now().UnixNano() - pingSendTime) / 1000
|
|
|
|
atomic.StoreInt64(&p.pingTime, delay)
|
|
|
|
|
2016-11-11 04:15:25 +03:00
|
|
|
case *lnwire.Ping:
|
2017-04-17 04:11:39 +03:00
|
|
|
pongBytes := make([]byte, msg.NumPongBytes)
|
|
|
|
p.queueMsg(lnwire.NewPong(pongBytes), nil)
|
2016-11-11 04:15:25 +03:00
|
|
|
|
2017-07-31 00:13:28 +03:00
|
|
|
case *lnwire.OpenChannel:
|
|
|
|
p.server.fundingMgr.processFundingOpen(msg, p.addr)
|
|
|
|
case *lnwire.AcceptChannel:
|
2017-08-01 07:04:48 +03:00
|
|
|
p.server.fundingMgr.processFundingAccept(msg, p.addr)
|
2017-07-31 00:13:28 +03:00
|
|
|
case *lnwire.FundingCreated:
|
|
|
|
p.server.fundingMgr.processFundingCreated(msg, p.addr)
|
|
|
|
case *lnwire.FundingSigned:
|
|
|
|
p.server.fundingMgr.processFundingSigned(msg, p.addr)
|
2017-01-31 05:45:28 +03:00
|
|
|
case *lnwire.FundingLocked:
|
|
|
|
p.server.fundingMgr.processFundingLocked(msg, p.addr)
|
2017-03-25 04:26:09 +03:00
|
|
|
|
|
|
|
case *lnwire.Shutdown:
|
2017-09-28 06:18:20 +03:00
|
|
|
select {
|
2017-11-23 10:21:07 +03:00
|
|
|
case p.chanCloseMsgs <- &closeMsg{msg.ChannelID, msg}:
|
2017-09-28 06:18:20 +03:00
|
|
|
case <-p.quit:
|
|
|
|
break out
|
|
|
|
}
|
2017-03-09 02:32:11 +03:00
|
|
|
case *lnwire.ClosingSigned:
|
2017-09-28 06:18:20 +03:00
|
|
|
select {
|
2017-11-23 10:21:07 +03:00
|
|
|
case p.chanCloseMsgs <- &closeMsg{msg.ChannelID, msg}:
|
2017-09-28 06:18:20 +03:00
|
|
|
case <-p.quit:
|
|
|
|
break out
|
|
|
|
}
|
2016-10-15 16:24:56 +03:00
|
|
|
|
2017-04-17 01:41:11 +03:00
|
|
|
case *lnwire.Error:
|
|
|
|
p.server.fundingMgr.processFundingError(msg, p.addr)
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-01-08 07:23:06 +03:00
|
|
|
// TODO(roasbeef): create ChanUpdater interface for the below
|
2017-02-21 05:10:05 +03:00
|
|
|
case *lnwire.UpdateAddHTLC:
|
2016-10-15 16:18:38 +03:00
|
|
|
isChanUpdate = true
|
2017-04-17 01:41:11 +03:00
|
|
|
targetChan = msg.ChanID
|
2017-02-21 05:10:05 +03:00
|
|
|
case *lnwire.UpdateFufillHTLC:
|
2016-10-15 16:18:38 +03:00
|
|
|
isChanUpdate = true
|
2017-12-12 22:22:44 +03:00
|
|
|
targetChan = msg.ChanID
|
|
|
|
case *lnwire.UpdateFailMalformedHTLC:
|
|
|
|
isChanUpdate = true
|
2017-04-17 01:41:11 +03:00
|
|
|
targetChan = msg.ChanID
|
2017-02-21 05:10:05 +03:00
|
|
|
case *lnwire.UpdateFailHTLC:
|
2017-01-08 07:23:06 +03:00
|
|
|
isChanUpdate = true
|
2017-04-17 01:41:11 +03:00
|
|
|
targetChan = msg.ChanID
|
2017-02-21 05:10:05 +03:00
|
|
|
case *lnwire.RevokeAndAck:
|
2016-10-15 16:18:38 +03:00
|
|
|
isChanUpdate = true
|
2017-04-17 01:41:11 +03:00
|
|
|
targetChan = msg.ChanID
|
2017-02-21 05:10:05 +03:00
|
|
|
case *lnwire.CommitSig:
|
2016-10-15 16:18:38 +03:00
|
|
|
isChanUpdate = true
|
2017-04-17 01:41:11 +03:00
|
|
|
targetChan = msg.ChanID
|
2017-07-14 21:43:20 +03:00
|
|
|
case *lnwire.UpdateFee:
|
|
|
|
isChanUpdate = true
|
|
|
|
targetChan = msg.ChanID
|
2017-07-09 02:30:20 +03:00
|
|
|
case *lnwire.ChannelReestablish:
|
|
|
|
isChanUpdate = true
|
|
|
|
targetChan = msg.ChanID
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-04-20 02:23:17 +03:00
|
|
|
case *lnwire.ChannelUpdate,
|
2016-12-27 08:42:23 +03:00
|
|
|
*lnwire.ChannelAnnouncement,
|
2017-03-28 22:08:14 +03:00
|
|
|
*lnwire.NodeAnnouncement,
|
|
|
|
*lnwire.AnnounceSignatures:
|
2016-12-27 08:42:23 +03:00
|
|
|
|
2017-11-02 01:50:55 +03:00
|
|
|
discStream.AddMsg(msg)
|
|
|
|
|
2017-03-28 22:08:14 +03:00
|
|
|
default:
|
2017-10-17 04:43:14 +03:00
|
|
|
peerLog.Errorf("unknown message %v received from peer "+
|
|
|
|
"%v", uint16(msg.MsgType()), p)
|
2016-07-13 03:45:29 +03:00
|
|
|
}
|
|
|
|
|
2016-10-15 16:18:38 +03:00
|
|
|
if isChanUpdate {
|
2017-08-01 07:25:46 +03:00
|
|
|
// If this is a channel update, then we need to feed it
|
|
|
|
// into the channel's in-order message stream.
|
|
|
|
chanStream, ok := chanMsgStreams[targetChan]
|
|
|
|
if !ok {
|
|
|
|
// If a stream hasn't yet been created, then
|
|
|
|
// we'll do so, add it to the map, and finally
|
|
|
|
// start it.
|
2017-11-02 01:50:55 +03:00
|
|
|
chanStream = newChanMsgStream(p, targetChan)
|
2017-08-01 07:25:46 +03:00
|
|
|
chanMsgStreams[targetChan] = chanStream
|
|
|
|
chanStream.Start()
|
2016-07-13 03:45:29 +03:00
|
|
|
}
|
2017-04-17 01:45:18 +03:00
|
|
|
|
2017-08-01 07:25:46 +03:00
|
|
|
// With the stream obtained, add the message to the
|
|
|
|
// stream so we can continue processing message.
|
|
|
|
chanStream.AddMsg(nextMsg)
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
2017-10-16 01:13:27 +03:00
|
|
|
|
|
|
|
idleTimer.Reset(idleTimeout)
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-09-28 06:22:40 +03:00
|
|
|
p.wg.Done()
|
|
|
|
|
2017-07-12 16:44:17 +03:00
|
|
|
p.Disconnect(errors.New("read handler closed"))
|
2016-07-14 02:40:01 +03:00
|
|
|
|
2017-08-03 23:52:02 +03:00
|
|
|
for cid, chanStream := range chanMsgStreams {
|
2017-08-01 07:25:46 +03:00
|
|
|
chanStream.Stop()
|
2017-08-03 23:52:02 +03:00
|
|
|
|
|
|
|
delete(chanMsgStreams, cid)
|
2017-08-01 07:25:46 +03:00
|
|
|
}
|
|
|
|
|
2016-07-14 02:40:01 +03:00
|
|
|
peerLog.Tracef("readHandler for peer %v done", p)
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-10-18 05:13:19 +03:00
|
|
|
// messageSummary returns a human-readable string that summarizes a
|
|
|
|
// incoming/outgoing message. Not all messages will have a summary, only those
|
|
|
|
// which have additional data that can be informative at a glance.
|
|
|
|
func messageSummary(msg lnwire.Message) string {
|
|
|
|
switch msg := msg.(type) {
|
|
|
|
case *lnwire.Init:
|
|
|
|
// No summary.
|
|
|
|
return ""
|
|
|
|
|
|
|
|
case *lnwire.OpenChannel:
|
2017-10-18 05:35:10 +03:00
|
|
|
return fmt.Sprintf("temp_chan_id=%x, chain=%v, csv=%v, amt=%v, "+
|
2017-10-18 05:13:19 +03:00
|
|
|
"push_amt=%v, reserve=%v, flags=%v",
|
2017-10-18 05:35:10 +03:00
|
|
|
msg.PendingChannelID[:], msg.ChainHash,
|
2017-10-18 05:13:19 +03:00
|
|
|
msg.CsvDelay, msg.FundingAmount, msg.PushAmount,
|
|
|
|
msg.ChannelReserve, msg.ChannelFlags)
|
|
|
|
|
|
|
|
case *lnwire.AcceptChannel:
|
2017-11-28 02:32:06 +03:00
|
|
|
return fmt.Sprintf("temp_chan_id=%x, reserve=%v, csv=%v, num_confs=%v",
|
|
|
|
msg.PendingChannelID[:], msg.ChannelReserve, msg.CsvDelay,
|
|
|
|
msg.MinAcceptDepth)
|
2017-10-18 05:13:19 +03:00
|
|
|
|
|
|
|
case *lnwire.FundingCreated:
|
|
|
|
return fmt.Sprintf("temp_chan_id=%x, chan_point=%v",
|
|
|
|
msg.PendingChannelID[:], msg.FundingPoint)
|
|
|
|
|
|
|
|
case *lnwire.FundingSigned:
|
|
|
|
return fmt.Sprintf("chan_id=%v", msg.ChanID)
|
|
|
|
|
|
|
|
case *lnwire.FundingLocked:
|
|
|
|
return fmt.Sprintf("chan_id=%v, next_point=%x",
|
|
|
|
msg.ChanID, msg.NextPerCommitmentPoint.SerializeCompressed())
|
|
|
|
|
|
|
|
case *lnwire.Shutdown:
|
|
|
|
return fmt.Sprintf("chan_id=%v, script=%x", msg.ChannelID,
|
|
|
|
msg.Address[:])
|
|
|
|
|
|
|
|
case *lnwire.ClosingSigned:
|
|
|
|
return fmt.Sprintf("chan_id=%v, fee_sat=%v", msg.ChannelID,
|
|
|
|
msg.FeeSatoshis)
|
|
|
|
|
|
|
|
case *lnwire.UpdateAddHTLC:
|
|
|
|
return fmt.Sprintf("chan_id=%v, id=%v, amt=%v, expiry=%v, hash=%x",
|
|
|
|
msg.ChanID, msg.ID, msg.Amount, msg.Expiry, msg.PaymentHash[:])
|
|
|
|
|
|
|
|
case *lnwire.UpdateFailHTLC:
|
2017-10-21 01:40:50 +03:00
|
|
|
return fmt.Sprintf("chan_id=%v, id=%v, reason=%x", msg.ChanID,
|
2017-10-18 05:13:19 +03:00
|
|
|
msg.ID, msg.Reason)
|
|
|
|
|
|
|
|
case *lnwire.UpdateFufillHTLC:
|
|
|
|
return fmt.Sprintf("chan_id=%v, id=%v, pre_image=%x",
|
|
|
|
msg.ChanID, msg.ID, msg.PaymentPreimage[:])
|
|
|
|
|
|
|
|
case *lnwire.CommitSig:
|
|
|
|
return fmt.Sprintf("chan_id=%v, num_htlcs=%v", msg.ChanID,
|
|
|
|
len(msg.HtlcSigs))
|
|
|
|
|
|
|
|
case *lnwire.RevokeAndAck:
|
|
|
|
return fmt.Sprintf("chan_id=%v, rev=%x, next_point=%x",
|
|
|
|
msg.ChanID, msg.Revocation[:],
|
|
|
|
msg.NextRevocationKey.SerializeCompressed())
|
|
|
|
|
|
|
|
case *lnwire.UpdateFailMalformedHTLC:
|
|
|
|
return fmt.Sprintf("chan_id=%v, id=%v, fail_code=%v",
|
|
|
|
msg.ChanID, msg.ID, msg.FailureCode)
|
|
|
|
|
|
|
|
case *lnwire.Error:
|
2017-12-01 09:19:12 +03:00
|
|
|
return fmt.Sprintf("chan_id=%v, err=%v", msg.ChanID, string(msg.Data))
|
2017-10-18 05:13:19 +03:00
|
|
|
|
|
|
|
case *lnwire.AnnounceSignatures:
|
|
|
|
return fmt.Sprintf("chan_id=%v, short_chan_id=%v", msg.ChannelID,
|
|
|
|
msg.ShortChannelID.ToUint64())
|
|
|
|
|
|
|
|
case *lnwire.ChannelAnnouncement:
|
2017-10-18 05:35:10 +03:00
|
|
|
return fmt.Sprintf("chain_hash=%v, short_chan_id=%v",
|
|
|
|
msg.ChainHash, msg.ShortChannelID.ToUint64())
|
2017-10-18 05:13:19 +03:00
|
|
|
|
|
|
|
case *lnwire.ChannelUpdate:
|
2017-11-23 10:21:07 +03:00
|
|
|
return fmt.Sprintf("chain_hash=%v, short_chan_id=%v, flag=%v, "+
|
|
|
|
"update_time=%v", msg.ChainHash,
|
|
|
|
msg.ShortChannelID.ToUint64(), msg.Flags,
|
2017-10-18 05:13:19 +03:00
|
|
|
time.Unix(int64(msg.Timestamp), 0))
|
|
|
|
|
|
|
|
case *lnwire.NodeAnnouncement:
|
|
|
|
return fmt.Sprintf("node=%x, update_time=%v",
|
|
|
|
msg.NodeID.SerializeCompressed(),
|
|
|
|
time.Unix(int64(msg.Timestamp), 0))
|
|
|
|
|
|
|
|
case *lnwire.Ping:
|
|
|
|
// No summary.
|
|
|
|
return ""
|
|
|
|
|
|
|
|
case *lnwire.Pong:
|
|
|
|
// No summary.
|
|
|
|
return ""
|
|
|
|
|
|
|
|
case *lnwire.UpdateFee:
|
|
|
|
return fmt.Sprintf("chan_id=%v, fee_update_sat=%v",
|
|
|
|
msg.ChanID, int64(msg.FeePerKw))
|
2017-11-11 02:40:15 +03:00
|
|
|
|
|
|
|
case *lnwire.ChannelReestablish:
|
|
|
|
return fmt.Sprintf("next_local_height=%v, remote_tail_height=%v",
|
|
|
|
msg.NextLocalCommitHeight, msg.RemoteCommitTailHeight)
|
2017-10-18 05:13:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2017-01-15 04:52:05 +03:00
|
|
|
// logWireMessage logs the receipt or sending of particular wire message. This
|
|
|
|
// function is used rather than just logging the message in order to produce
|
|
|
|
// less spammy log messages in trace mode by setting the 'Curve" parameter to
|
|
|
|
// nil. Doing this avoids printing out each of the field elements in the curve
|
|
|
|
// parameters for secp256k1.
|
|
|
|
func (p *peer) logWireMessage(msg lnwire.Message, read bool) {
|
2017-10-18 05:13:19 +03:00
|
|
|
summaryPrefix := "Received"
|
|
|
|
if !read {
|
|
|
|
summaryPrefix = "Sending"
|
|
|
|
}
|
|
|
|
|
|
|
|
peerLog.Debugf("%v", newLogClosure(func() string {
|
|
|
|
// Debug summary of message.
|
|
|
|
summary := messageSummary(msg)
|
|
|
|
if len(summary) > 0 {
|
|
|
|
summary = "(" + summary + ")"
|
|
|
|
}
|
|
|
|
|
2017-10-20 05:45:29 +03:00
|
|
|
preposition := "to"
|
|
|
|
if read {
|
|
|
|
preposition = "from"
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Sprintf("%v %v%s %v %s", summaryPrefix,
|
|
|
|
msg.MsgType(), summary, preposition, p)
|
2017-10-18 05:13:19 +03:00
|
|
|
}))
|
|
|
|
|
2017-01-15 04:52:05 +03:00
|
|
|
switch m := msg.(type) {
|
2017-12-07 03:27:56 +03:00
|
|
|
case *lnwire.ChannelReestablish:
|
2017-12-08 00:11:23 +03:00
|
|
|
if m.LocalUnrevokedCommitPoint != nil {
|
|
|
|
m.LocalUnrevokedCommitPoint.Curve = nil
|
|
|
|
}
|
2017-02-21 05:10:05 +03:00
|
|
|
case *lnwire.RevokeAndAck:
|
2017-01-15 04:52:05 +03:00
|
|
|
m.NextRevocationKey.Curve = nil
|
|
|
|
case *lnwire.NodeAnnouncement:
|
|
|
|
m.NodeID.Curve = nil
|
|
|
|
case *lnwire.ChannelAnnouncement:
|
2017-03-27 18:22:37 +03:00
|
|
|
m.NodeID1.Curve = nil
|
|
|
|
m.NodeID2.Curve = nil
|
|
|
|
m.BitcoinKey1.Curve = nil
|
|
|
|
m.BitcoinKey2.Curve = nil
|
2017-07-31 00:13:28 +03:00
|
|
|
case *lnwire.AcceptChannel:
|
|
|
|
m.FundingKey.Curve = nil
|
|
|
|
m.RevocationPoint.Curve = nil
|
|
|
|
m.PaymentPoint.Curve = nil
|
|
|
|
m.DelayedPaymentPoint.Curve = nil
|
2017-12-02 06:27:38 +03:00
|
|
|
m.HtlcPoint.Curve = nil
|
2017-07-31 00:13:28 +03:00
|
|
|
m.FirstCommitmentPoint.Curve = nil
|
|
|
|
case *lnwire.OpenChannel:
|
|
|
|
m.FundingKey.Curve = nil
|
|
|
|
m.RevocationPoint.Curve = nil
|
|
|
|
m.PaymentPoint.Curve = nil
|
|
|
|
m.DelayedPaymentPoint.Curve = nil
|
2017-12-02 06:27:38 +03:00
|
|
|
m.HtlcPoint.Curve = nil
|
2017-07-31 00:13:28 +03:00
|
|
|
m.FirstCommitmentPoint.Curve = nil
|
2017-01-31 05:45:28 +03:00
|
|
|
case *lnwire.FundingLocked:
|
|
|
|
m.NextPerCommitmentPoint.Curve = nil
|
2017-01-15 04:52:05 +03:00
|
|
|
}
|
|
|
|
|
2017-01-25 04:43:38 +03:00
|
|
|
prefix := "readMessage from"
|
2017-01-15 04:52:05 +03:00
|
|
|
if !read {
|
2017-01-25 04:43:38 +03:00
|
|
|
prefix = "writeMessage to"
|
2017-01-15 04:52:05 +03:00
|
|
|
}
|
|
|
|
|
2017-01-25 04:43:38 +03:00
|
|
|
peerLog.Tracef(prefix+" %v: %v", p, newLogClosure(func() string {
|
2017-01-15 04:52:05 +03:00
|
|
|
return spew.Sdump(msg)
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// writeMessage writes the target lnwire.Message to the remote peer.
|
2016-01-14 08:41:46 +03:00
|
|
|
func (p *peer) writeMessage(msg lnwire.Message) error {
|
|
|
|
// Simply exit if we're shutting down.
|
|
|
|
if atomic.LoadInt32(&p.disconnect) != 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// TODO(roasbeef): add message summaries
|
2017-01-15 04:52:05 +03:00
|
|
|
p.logWireMessage(msg, false)
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-04-21 01:45:04 +03:00
|
|
|
// As the Lightning wire protocol is fully message oriented, we only
|
|
|
|
// allows one wire message per outer encapsulated crypto message. So
|
|
|
|
// we'll create a temporary buffer to write the message directly to.
|
|
|
|
var msgPayload [lnwire.MaxMessagePayload]byte
|
|
|
|
b := bytes.NewBuffer(msgPayload[0:0:len(msgPayload)])
|
|
|
|
|
|
|
|
// With the temp buffer created and sliced properly (length zero, full
|
|
|
|
// capacity), we'll now encode the message directly into this buffer.
|
|
|
|
n, err := lnwire.WriteMessage(b, msg, 0)
|
2016-06-21 22:32:32 +03:00
|
|
|
atomic.AddUint64(&p.bytesSent, uint64(n))
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2017-10-16 01:19:45 +03:00
|
|
|
// TODO(roasbeef): add write deadline?
|
|
|
|
|
2017-04-21 01:45:04 +03:00
|
|
|
// Finally, write the message itself in a single swoop.
|
|
|
|
_, err = p.conn.Write(b.Bytes())
|
2016-01-14 08:41:46 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// writeHandler is a goroutine dedicated to reading messages off of an incoming
|
|
|
|
// queue, and writing them out to the wire. This goroutine coordinates with the
|
2017-10-16 01:19:45 +03:00
|
|
|
// queueHandler in order to ensure the incoming message queue is quickly
|
|
|
|
// drained.
|
2016-06-21 22:32:32 +03:00
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
|
|
|
func (p *peer) writeHandler() {
|
2017-09-28 06:22:40 +03:00
|
|
|
var exitErr error
|
|
|
|
out:
|
2016-01-14 08:41:46 +03:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case outMsg := <-p.sendQueue:
|
2017-01-26 05:20:55 +03:00
|
|
|
switch outMsg.msg.(type) {
|
2017-02-02 04:01:33 +03:00
|
|
|
// If we're about to send a ping message, then log the
|
|
|
|
// exact time in which we send the message so we can
|
|
|
|
// use the delay as a rough estimate of latency to the
|
|
|
|
// remote peer.
|
2017-01-26 05:20:55 +03:00
|
|
|
case *lnwire.Ping:
|
|
|
|
// TODO(roasbeef): do this before the write?
|
|
|
|
// possibly account for processing within func?
|
|
|
|
now := time.Now().UnixNano()
|
|
|
|
atomic.StoreInt64(&p.pingLastSend, now)
|
|
|
|
}
|
|
|
|
|
2017-02-02 04:01:33 +03:00
|
|
|
// Write out the message to the socket, closing the
|
|
|
|
// 'sentChan' if it's non-nil, The 'sentChan' allows
|
|
|
|
// callers to optionally synchronize sends with the
|
|
|
|
// writeHandler.
|
|
|
|
err := p.writeMessage(outMsg.msg)
|
2017-11-16 05:23:46 +03:00
|
|
|
if outMsg.errChan != nil {
|
|
|
|
outMsg.errChan <- err
|
2017-02-02 04:01:33 +03:00
|
|
|
}
|
2016-01-14 08:41:46 +03:00
|
|
|
|
2017-02-02 04:01:33 +03:00
|
|
|
if err != nil {
|
2017-09-28 06:22:40 +03:00
|
|
|
exitErr = errors.Errorf("unable to write message: %v", err)
|
|
|
|
break out
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
2017-02-02 04:01:33 +03:00
|
|
|
|
|
|
|
case <-p.quit:
|
2017-09-28 06:22:40 +03:00
|
|
|
exitErr = errors.Errorf("peer exiting")
|
|
|
|
break out
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
}
|
2017-09-28 06:22:40 +03:00
|
|
|
|
|
|
|
p.wg.Done()
|
|
|
|
|
|
|
|
p.Disconnect(exitErr)
|
|
|
|
|
|
|
|
peerLog.Tracef("writeHandler for peer %v done", p)
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// queueHandler is responsible for accepting messages from outside subsystems
|
2016-06-21 22:32:32 +03:00
|
|
|
// to be eventually sent out on the wire by the writeHandler.
|
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
2016-01-14 08:41:46 +03:00
|
|
|
func (p *peer) queueHandler() {
|
2017-02-02 04:01:33 +03:00
|
|
|
defer p.wg.Done()
|
|
|
|
|
2017-11-16 04:56:33 +03:00
|
|
|
// pendingMsgs will hold all messages waiting to be added
|
|
|
|
// to the sendQueue.
|
2016-01-14 08:41:46 +03:00
|
|
|
pendingMsgs := list.New()
|
2017-02-02 04:01:33 +03:00
|
|
|
|
2017-11-16 04:56:33 +03:00
|
|
|
for {
|
|
|
|
// Examine the front of the queue.
|
|
|
|
elem := pendingMsgs.Front()
|
|
|
|
if elem != nil {
|
|
|
|
// There's an element on the queue, try adding
|
|
|
|
// it to the sendQueue. We also watch for
|
|
|
|
// messages on the outgoingQueue, in case the
|
|
|
|
// writeHandler cannot accept messages on the
|
|
|
|
// sendQueue.
|
2017-02-02 04:01:33 +03:00
|
|
|
select {
|
|
|
|
case p.sendQueue <- elem.Value.(outgoinMsg):
|
|
|
|
pendingMsgs.Remove(elem)
|
2017-11-16 04:56:33 +03:00
|
|
|
case msg := <-p.outgoingQueue:
|
|
|
|
pendingMsgs.PushBack(msg)
|
|
|
|
case <-p.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If there weren't any messages to send to the
|
|
|
|
// writeHandler, then we'll accept a new message
|
|
|
|
// into the queue from outside sub-systems.
|
|
|
|
select {
|
|
|
|
case msg := <-p.outgoingQueue:
|
|
|
|
pendingMsgs.PushBack(msg)
|
2017-02-02 04:01:33 +03:00
|
|
|
case <-p.quit:
|
|
|
|
return
|
2016-01-14 08:41:46 +03:00
|
|
|
}
|
2017-02-02 04:01:33 +03:00
|
|
|
}
|
|
|
|
}
|
2015-12-21 00:16:38 +03:00
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2016-11-11 04:15:25 +03:00
|
|
|
// pingHandler is responsible for periodically sending ping messages to the
|
|
|
|
// remote peer in order to keep the connection alive and/or determine if the
|
|
|
|
// connection is still active.
|
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
|
|
|
func (p *peer) pingHandler() {
|
2017-08-09 02:51:19 +03:00
|
|
|
defer p.wg.Done()
|
|
|
|
|
2016-11-11 04:15:25 +03:00
|
|
|
pingTicker := time.NewTicker(pingInterval)
|
|
|
|
defer pingTicker.Stop()
|
|
|
|
|
2017-04-17 04:11:39 +03:00
|
|
|
// TODO(roasbeef): make dynamic in order to create fake cover traffic
|
|
|
|
const numPingBytes = 16
|
2016-11-11 04:15:25 +03:00
|
|
|
|
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-pingTicker.C:
|
2017-04-17 04:11:39 +03:00
|
|
|
p.queueMsg(lnwire.NewPing(numPingBytes), nil)
|
2016-11-11 04:15:25 +03:00
|
|
|
case <-p.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-26 05:20:55 +03:00
|
|
|
// PingTime returns the estimated ping time to the peer in microseconds.
|
|
|
|
func (p *peer) PingTime() int64 {
|
|
|
|
return atomic.LoadInt64(&p.pingTime)
|
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// queueMsg queues a new lnwire.Message to be eventually sent out on the
|
2017-11-16 05:23:46 +03:00
|
|
|
// wire. It returns an error if we failed to queue the message. An error
|
|
|
|
// is sent on errChan if the message fails being sent to the peer, or
|
|
|
|
// nil otherwise.
|
|
|
|
func (p *peer) queueMsg(msg lnwire.Message, errChan chan error) {
|
2016-12-20 04:00:18 +03:00
|
|
|
select {
|
2017-11-16 05:23:46 +03:00
|
|
|
case p.outgoingQueue <- outgoinMsg{msg, errChan}:
|
2016-12-20 04:00:18 +03:00
|
|
|
case <-p.quit:
|
2017-11-16 05:23:46 +03:00
|
|
|
peerLog.Debugf("Peer shutting down, could not enqueue msg.")
|
|
|
|
if errChan != nil {
|
|
|
|
errChan <- fmt.Errorf("peer shutting down")
|
|
|
|
}
|
2016-12-20 04:00:18 +03:00
|
|
|
}
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2016-11-11 04:15:25 +03:00
|
|
|
// ChannelSnapshots returns a slice of channel snapshots detailing all
|
|
|
|
// currently active channels maintained with the remote peer.
|
2016-06-23 08:22:06 +03:00
|
|
|
func (p *peer) ChannelSnapshots() []*channeldb.ChannelSnapshot {
|
2017-08-09 02:51:19 +03:00
|
|
|
p.activeChanMtx.RLock()
|
|
|
|
defer p.activeChanMtx.RUnlock()
|
|
|
|
|
|
|
|
snapshots := make([]*channeldb.ChannelSnapshot, 0, len(p.activeChannels))
|
|
|
|
for _, activeChan := range p.activeChannels {
|
|
|
|
snapshot := activeChan.StateSnapshot()
|
|
|
|
snapshots = append(snapshots, snapshot)
|
|
|
|
}
|
|
|
|
|
|
|
|
return snapshots
|
2016-06-23 08:22:06 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// genDeliveryScript returns a new script to be used to send our funds to in
|
|
|
|
// the case of a cooperative channel close negotiation.
|
|
|
|
func (p *peer) genDeliveryScript() ([]byte, error) {
|
|
|
|
deliveryAddr, err := p.server.cc.wallet.NewAddress(
|
|
|
|
lnwallet.WitnessPubKey, false,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
peerLog.Infof("Delivery addr for channel close: %v",
|
|
|
|
deliveryAddr)
|
|
|
|
|
|
|
|
return txscript.PayToAddrScript(deliveryAddr)
|
2017-07-31 00:21:21 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// channelManager is goroutine dedicated to handling all requests/signals
|
|
|
|
// pertaining to the opening, cooperative closing, and force closing of all
|
|
|
|
// channels maintained with the remote peer.
|
|
|
|
//
|
|
|
|
// NOTE: This method MUST be run as a goroutine.
|
|
|
|
func (p *peer) channelManager() {
|
2017-08-09 02:51:19 +03:00
|
|
|
defer p.wg.Done()
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
2017-07-31 00:21:21 +03:00
|
|
|
// A new channel has arrived which means we've just completed a
|
|
|
|
// funding workflow. We'll initialize the necessary local
|
|
|
|
// state, and notify the htlc switch of a new link.
|
2017-01-24 02:33:46 +03:00
|
|
|
case newChanReq := <-p.newChannels:
|
2017-04-17 01:41:11 +03:00
|
|
|
chanPoint := newChanReq.channel.ChannelPoint()
|
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
2017-07-31 00:21:21 +03:00
|
|
|
newChan := newChanReq.channel
|
2016-11-18 05:43:33 +03:00
|
|
|
|
2017-10-02 14:11:26 +03:00
|
|
|
// Make sure this channel is not already active.
|
|
|
|
p.activeChanMtx.Lock()
|
peer: properly process retransmitted FundingLocked message we've never processed
In this commit, we modify the logic within the channelManager to be
able to process any retransmitted FundingLocked messages. Before this
commit, we would simply ignore any new channels sent to us, iff, we
already had an active channel with the same channel point. With the
recent change to the loadActiveChannels method in the peer, this is now
incorrect.
When a peer retransmits the FundingLocked message, it goes through to
the fundingManager. The fundingMgr will then (if we haven’t already
processed it), send the channel to the breach arbiter and also to the
peer’s channelManager. In order to handle this case properly, if we
already have the channel, we’ll check if our current channel *doesn’t*
already have the RemoteNextRevocation field set. If it doesn’t, then
this means that we haven’t yet processed the FundingLcoked message, so
we’ll process it for the first time.
This new logic will properly:
* ensure that the breachArbiter still has the most up to date channel
* allow us to update the state of the link has been added to the
switch at this point
* this link will now be eligible for forwarding after this
sequence
2017-12-06 05:00:33 +03:00
|
|
|
if currentChan, ok := p.activeChannels[chanID]; ok {
|
2017-11-23 10:21:07 +03:00
|
|
|
peerLog.Infof("Already have ChannelPoint(%v), "+
|
|
|
|
"ignoring.", chanPoint)
|
peer: properly process retransmitted FundingLocked message we've never processed
In this commit, we modify the logic within the channelManager to be
able to process any retransmitted FundingLocked messages. Before this
commit, we would simply ignore any new channels sent to us, iff, we
already had an active channel with the same channel point. With the
recent change to the loadActiveChannels method in the peer, this is now
incorrect.
When a peer retransmits the FundingLocked message, it goes through to
the fundingManager. The fundingMgr will then (if we haven’t already
processed it), send the channel to the breach arbiter and also to the
peer’s channelManager. In order to handle this case properly, if we
already have the channel, we’ll check if our current channel *doesn’t*
already have the RemoteNextRevocation field set. If it doesn’t, then
this means that we haven’t yet processed the FundingLcoked message, so
we’ll process it for the first time.
This new logic will properly:
* ensure that the breachArbiter still has the most up to date channel
* allow us to update the state of the link has been added to the
switch at this point
* this link will now be eligible for forwarding after this
sequence
2017-12-06 05:00:33 +03:00
|
|
|
|
2017-10-02 14:11:26 +03:00
|
|
|
p.activeChanMtx.Unlock()
|
|
|
|
close(newChanReq.done)
|
|
|
|
newChanReq.channel.Stop()
|
2017-11-23 10:21:07 +03:00
|
|
|
newChanReq.channel.CancelObserver()
|
peer: properly process retransmitted FundingLocked message we've never processed
In this commit, we modify the logic within the channelManager to be
able to process any retransmitted FundingLocked messages. Before this
commit, we would simply ignore any new channels sent to us, iff, we
already had an active channel with the same channel point. With the
recent change to the loadActiveChannels method in the peer, this is now
incorrect.
When a peer retransmits the FundingLocked message, it goes through to
the fundingManager. The fundingMgr will then (if we haven’t already
processed it), send the channel to the breach arbiter and also to the
peer’s channelManager. In order to handle this case properly, if we
already have the channel, we’ll check if our current channel *doesn’t*
already have the RemoteNextRevocation field set. If it doesn’t, then
this means that we haven’t yet processed the FundingLcoked message, so
we’ll process it for the first time.
This new logic will properly:
* ensure that the breachArbiter still has the most up to date channel
* allow us to update the state of the link has been added to the
switch at this point
* this link will now be eligible for forwarding after this
sequence
2017-12-06 05:00:33 +03:00
|
|
|
|
|
|
|
// We'll re-send our current channel to the
|
|
|
|
// breachArbiter to ensure that it has the most
|
|
|
|
// up to date version.
|
|
|
|
select {
|
|
|
|
case p.server.breachArbiter.newContracts <- currentChan:
|
|
|
|
case <-p.server.quit:
|
|
|
|
return
|
|
|
|
case <-p.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we're being sent a new channel, and our
|
|
|
|
// existing channel doesn't have the next
|
|
|
|
// revocation, then we need to update the
|
2017-12-18 05:40:05 +03:00
|
|
|
// current existing channel.
|
peer: properly process retransmitted FundingLocked message we've never processed
In this commit, we modify the logic within the channelManager to be
able to process any retransmitted FundingLocked messages. Before this
commit, we would simply ignore any new channels sent to us, iff, we
already had an active channel with the same channel point. With the
recent change to the loadActiveChannels method in the peer, this is now
incorrect.
When a peer retransmits the FundingLocked message, it goes through to
the fundingManager. The fundingMgr will then (if we haven’t already
processed it), send the channel to the breach arbiter and also to the
peer’s channelManager. In order to handle this case properly, if we
already have the channel, we’ll check if our current channel *doesn’t*
already have the RemoteNextRevocation field set. If it doesn’t, then
this means that we haven’t yet processed the FundingLcoked message, so
we’ll process it for the first time.
This new logic will properly:
* ensure that the breachArbiter still has the most up to date channel
* allow us to update the state of the link has been added to the
switch at this point
* this link will now be eligible for forwarding after this
sequence
2017-12-06 05:00:33 +03:00
|
|
|
if currentChan.RemoteNextRevocation() != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
peerLog.Infof("Processing retransmitted "+
|
|
|
|
"FundingLocked for ChannelPoint(%v)",
|
|
|
|
chanPoint)
|
|
|
|
|
|
|
|
nextRevoke := newChan.RemoteNextRevocation()
|
|
|
|
err := currentChan.InitNextRevocation(nextRevoke)
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("unable to init chan "+
|
|
|
|
"revocation: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-10-02 14:11:26 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// If not already active, we'll add this channel to the
|
|
|
|
// set of active channels, so we can look it up later
|
|
|
|
// easily according to its channel ID.
|
2017-07-31 00:21:21 +03:00
|
|
|
p.activeChannels[chanID] = newChan
|
2016-11-18 05:43:33 +03:00
|
|
|
p.activeChanMtx.Unlock()
|
2016-06-21 22:32:32 +03:00
|
|
|
|
|
|
|
peerLog.Infof("New channel active ChannelPoint(%v) "+
|
|
|
|
"with peerId(%v)", chanPoint, p.id)
|
|
|
|
|
2017-07-31 00:21:21 +03:00
|
|
|
// Next, we'll assemble a ChannelLink along with the
|
|
|
|
// necessary items it needs to function.
|
2017-08-03 07:15:49 +03:00
|
|
|
//
|
|
|
|
// TODO(roasbeef): panic on below?
|
|
|
|
blockEpoch, err := p.server.cc.chainNotifier.RegisterBlockEpochNtfn()
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("unable to register for block epoch: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
_, currentHeight, err := p.server.cc.chainIO.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf("unable to get best block: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
2017-07-31 00:21:21 +03:00
|
|
|
linkConfig := htlcswitch.ChannelLinkConfig{
|
|
|
|
Peer: p,
|
|
|
|
DecodeHopIterator: p.server.sphinx.DecodeHopIterator,
|
2017-10-11 05:38:31 +03:00
|
|
|
DecodeOnionObfuscator: p.server.sphinx.ExtractErrorEncrypter,
|
2017-07-31 00:21:21 +03:00
|
|
|
GetLastChannelUpdate: createGetLastUpdate(p.server.chanRouter,
|
|
|
|
p.PubKey(), newChanReq.channel.ShortChanID()),
|
|
|
|
SettledContracts: p.server.breachArbiter.settledContracts,
|
|
|
|
DebugHTLC: cfg.DebugHTLC,
|
2017-09-01 04:30:11 +03:00
|
|
|
HodlHTLC: cfg.HodlHTLC,
|
2017-07-31 00:21:21 +03:00
|
|
|
Registry: p.server.invoices,
|
|
|
|
Switch: p.server.htlcSwitch,
|
|
|
|
FwrdingPolicy: p.server.cc.routingPolicy,
|
2017-11-24 08:08:38 +03:00
|
|
|
FeeEstimator: p.server.cc.feeEstimator,
|
2017-08-03 07:15:49 +03:00
|
|
|
BlockEpochs: blockEpoch,
|
2017-07-09 02:30:20 +03:00
|
|
|
SyncStates: false,
|
2017-07-31 00:21:21 +03:00
|
|
|
}
|
2017-08-03 07:15:49 +03:00
|
|
|
link := htlcswitch.NewChannelLink(linkConfig, newChan,
|
|
|
|
uint32(currentHeight))
|
2017-05-02 23:04:58 +03:00
|
|
|
|
2017-07-31 00:21:21 +03:00
|
|
|
// With the channel link created, we'll now notify the
|
|
|
|
// htlc switch so this channel can be used to dispatch
|
|
|
|
// local payments and also passively forward payments.
|
2017-08-03 07:15:49 +03:00
|
|
|
if err := p.server.htlcSwitch.AddLink(link); err != nil {
|
2017-05-02 23:04:58 +03:00
|
|
|
peerLog.Errorf("can't register new channel "+
|
|
|
|
"link(%v) with peerId(%v)", chanPoint, p.id)
|
|
|
|
}
|
2017-01-24 02:33:46 +03:00
|
|
|
|
|
|
|
close(newChanReq.done)
|
2016-07-14 02:40:01 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// We've just received a local request to close an active
|
|
|
|
// channel. If will either kick of a cooperative channel
|
|
|
|
// closure negotiation, or be a notification of a breached
|
|
|
|
// contract that should be abandoned.
|
2016-06-21 22:32:32 +03:00
|
|
|
case req := <-p.localCloseChanReqs:
|
2017-11-23 10:21:07 +03:00
|
|
|
p.handleLocalCloseReq(req)
|
|
|
|
|
|
|
|
// We've received a new cooperative channel closure related
|
|
|
|
// message from the remote peer, we'll use this message to
|
|
|
|
// advance the chan closer state machine.
|
|
|
|
case closeMsg := <-p.chanCloseMsgs:
|
|
|
|
// We'll now fetch the matching closing state machine
|
|
|
|
// in order to continue, or finalize the channel
|
|
|
|
// closure process.
|
|
|
|
chanCloser, err := p.fetchActiveChanCloser(closeMsg.cid)
|
|
|
|
if err != nil {
|
|
|
|
// TODO(roasbeef): send protocol error?
|
|
|
|
peerLog.Errorf("unable to respond to remote "+
|
|
|
|
"close msg: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
2017-07-31 00:21:21 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// Next, we'll process the next message using the
|
|
|
|
// target state machine. We'll either continue
|
|
|
|
// negotiation, or halt.
|
|
|
|
msgs, closeFin, err := chanCloser.ProcessCloseMsg(
|
|
|
|
closeMsg.msg,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
err := fmt.Errorf("unable to process close "+
|
|
|
|
"msg: %v", err)
|
|
|
|
peerLog.Error(err)
|
2017-07-31 00:21:21 +03:00
|
|
|
|
2017-11-23 22:49:48 +03:00
|
|
|
// As the negotiations failed, we'll reset the
|
|
|
|
// channel state to ensure we act to on-chain
|
|
|
|
// events as normal.
|
|
|
|
chanCloser.cfg.channel.ResetState()
|
2017-07-31 00:21:21 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
if chanCloser.CloseRequest() != nil {
|
|
|
|
chanCloser.CloseRequest().Err <- err
|
2017-07-31 00:21:21 +03:00
|
|
|
}
|
2017-11-23 10:21:07 +03:00
|
|
|
delete(p.activeChanCloses, closeMsg.cid)
|
2017-07-31 00:21:21 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// Queue any messages to the remote peer that need to
|
|
|
|
// be sent as a part of this latest round of
|
|
|
|
// negotiations.
|
|
|
|
for _, msg := range msgs {
|
|
|
|
p.queueMsg(msg, nil)
|
2017-03-25 04:26:09 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// If we haven't finished close negotiations, then
|
|
|
|
// we'll continue as we can't yet finalize the closure.
|
|
|
|
if !closeFin {
|
2017-05-24 01:26:38 +03:00
|
|
|
continue
|
2017-03-25 04:26:09 +03:00
|
|
|
}
|
2016-07-14 02:40:01 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// Otherwise, we've agreed on a closing fee! In this
|
|
|
|
// case, we'll wrap up the channel closure by notifying
|
|
|
|
// relevant sub-systems and launching a goroutine to
|
|
|
|
// wait for close tx conf.
|
|
|
|
p.finalizeChanClosure(chanCloser)
|
2017-11-23 22:49:48 +03:00
|
|
|
case <-p.quit:
|
|
|
|
|
|
|
|
// As, we've been signalled to exit, we'll reset all
|
|
|
|
// our active channel back to their default state.
|
|
|
|
p.activeChanMtx.Lock()
|
|
|
|
for _, channel := range p.activeChannels {
|
|
|
|
channel.ResetState()
|
2017-07-14 22:05:55 +03:00
|
|
|
}
|
2017-11-23 22:49:48 +03:00
|
|
|
p.activeChanMtx.Unlock()
|
2017-05-24 01:26:38 +03:00
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// fetchActiveChanCloser attempts to fetch the active chan closer state machine
|
|
|
|
// for the target channel ID. If the channel isn't active an error is returned.
|
|
|
|
// Otherwise, either an existing state machine will be returned, or a new one
|
|
|
|
// will be created.
|
|
|
|
func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (*channelCloser, error) {
|
|
|
|
// First, we'll ensure that we actually know of the target channel. If
|
|
|
|
// not, we'll ignore this message.
|
|
|
|
p.activeChanMtx.RLock()
|
|
|
|
channel, ok := p.activeChannels[chanID]
|
|
|
|
p.activeChanMtx.RUnlock()
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("unable to close channel, "+
|
|
|
|
"ChannelID(%v) is unknown", chanID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll attempt to look up the matching state machine, if we can't
|
|
|
|
// find one then this means that the remote party is initiating a
|
|
|
|
// cooperative channel closure.
|
|
|
|
chanCloser, ok := p.activeChanCloses[chanID]
|
|
|
|
if !ok {
|
|
|
|
// We'll create a valid closing state machine in order to
|
|
|
|
// respond to the initiated cooperative channel closure.
|
|
|
|
deliveryAddr, err := p.genDeliveryScript()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// In order to begin fee negotiations, we'll first compute our
|
|
|
|
// target ideal fee-per-kw. We'll set this to a lax value, as
|
|
|
|
// we weren't the ones that initiated the channel closure.
|
|
|
|
satPerWight, err := p.server.cc.feeEstimator.EstimateFeePerWeight(6)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to query fee "+
|
|
|
|
"estimator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll then convert the sat per weight to sat per k/w as this
|
|
|
|
// is the native unit used within the protocol when dealing
|
|
|
|
// with fees.
|
|
|
|
targetFeePerKw := satPerWight * 1000
|
|
|
|
|
|
|
|
_, startingHeight, err := p.server.cc.chainIO.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
chanCloser = newChannelCloser(
|
|
|
|
chanCloseCfg{
|
|
|
|
channel: channel,
|
|
|
|
unregisterChannel: p.server.htlcSwitch.RemoveLink,
|
|
|
|
broadcastTx: p.server.cc.wallet.PublishTransaction,
|
|
|
|
settledContracts: p.server.breachArbiter.settledContracts,
|
|
|
|
quit: p.quit,
|
|
|
|
},
|
|
|
|
deliveryAddr,
|
|
|
|
targetFeePerKw,
|
|
|
|
uint32(startingHeight),
|
|
|
|
nil,
|
|
|
|
)
|
|
|
|
p.activeChanCloses[chanID] = chanCloser
|
|
|
|
}
|
|
|
|
|
|
|
|
return chanCloser, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// handleLocalCloseReq kicks-off the workflow to execute a cooperative or
|
|
|
|
// forced unilateral closure of the channel initiated by a local subsystem.
|
2017-05-24 01:26:38 +03:00
|
|
|
//
|
2016-12-15 05:11:31 +03:00
|
|
|
// TODO(roasbeef): if no more active channels with peer call Remove on connMgr
|
|
|
|
// with peerID
|
2017-11-23 10:21:07 +03:00
|
|
|
func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) {
|
2017-05-02 23:04:58 +03:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(req.ChanPoint)
|
2017-04-17 01:41:11 +03:00
|
|
|
|
2016-11-18 05:43:33 +03:00
|
|
|
p.activeChanMtx.RLock()
|
2017-05-24 01:21:35 +03:00
|
|
|
channel, ok := p.activeChannels[chanID]
|
2016-11-18 05:43:33 +03:00
|
|
|
p.activeChanMtx.RUnlock()
|
2017-05-24 01:21:35 +03:00
|
|
|
if !ok {
|
|
|
|
err := fmt.Errorf("unable to close channel, ChannelID(%v) is "+
|
|
|
|
"unknown", chanID)
|
|
|
|
peerLog.Errorf(err.Error())
|
2017-05-02 23:04:58 +03:00
|
|
|
req.Err <- err
|
2017-05-24 01:21:35 +03:00
|
|
|
return
|
|
|
|
}
|
2016-09-12 22:42:26 +03:00
|
|
|
|
2016-11-29 05:44:14 +03:00
|
|
|
switch req.CloseType {
|
2017-07-31 00:21:21 +03:00
|
|
|
|
2016-11-29 05:44:14 +03:00
|
|
|
// A type of CloseRegular indicates that the user has opted to close
|
2017-05-05 02:03:47 +03:00
|
|
|
// out this channel on-chain, so we execute the cooperative channel
|
2017-02-03 04:05:25 +03:00
|
|
|
// closure workflow.
|
2017-05-02 23:04:58 +03:00
|
|
|
case htlcswitch.CloseRegular:
|
2017-11-23 10:21:07 +03:00
|
|
|
// First, we'll fetch a fresh delivery address that we'll use
|
|
|
|
// to send the funds to in the case of a successful
|
|
|
|
// negotiation.
|
|
|
|
deliveryAddr, err := p.genDeliveryScript()
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf(err.Error())
|
|
|
|
req.Err <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
_, startingHeight, err := p.server.cc.chainIO.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
peerLog.Errorf(err.Error())
|
|
|
|
req.Err <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll create a new channel closer state machine to
|
|
|
|
// handle the close negotiation.
|
|
|
|
chanCloser := newChannelCloser(
|
|
|
|
chanCloseCfg{
|
|
|
|
channel: channel,
|
|
|
|
unregisterChannel: p.server.htlcSwitch.RemoveLink,
|
|
|
|
broadcastTx: p.server.cc.wallet.PublishTransaction,
|
|
|
|
settledContracts: p.server.breachArbiter.settledContracts,
|
|
|
|
quit: p.quit,
|
|
|
|
},
|
|
|
|
deliveryAddr,
|
|
|
|
req.TargetFeePerKw,
|
|
|
|
uint32(startingHeight),
|
|
|
|
req,
|
|
|
|
)
|
|
|
|
p.activeChanCloses[chanID] = chanCloser
|
|
|
|
|
|
|
|
// Finally, we'll initiate the channel shutdown within the
|
|
|
|
// chanCloser, and send the shutdown message to the remote
|
|
|
|
// party to kick things off.
|
|
|
|
shutdownMsg, err := chanCloser.ShutdownChan()
|
2017-03-25 04:26:09 +03:00
|
|
|
if err != nil {
|
2017-11-23 10:21:07 +03:00
|
|
|
peerLog.Errorf(err.Error())
|
2017-05-02 23:04:58 +03:00
|
|
|
req.Err <- err
|
2017-11-23 10:21:07 +03:00
|
|
|
delete(p.activeChanCloses, chanID)
|
|
|
|
|
2017-11-23 22:49:48 +03:00
|
|
|
// As we were unable to shutdown the channel, we'll
|
|
|
|
// return it back to its normal state.
|
|
|
|
channel.ResetState()
|
2017-03-25 04:26:09 +03:00
|
|
|
return
|
|
|
|
}
|
2016-11-29 05:44:14 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
p.queueMsg(shutdownMsg, nil)
|
|
|
|
|
2017-01-13 08:01:50 +03:00
|
|
|
// A type of CloseBreach indicates that the counterparty has breached
|
2017-02-03 04:05:25 +03:00
|
|
|
// the channel therefore we need to clean up our local state.
|
2017-05-02 23:04:58 +03:00
|
|
|
case htlcswitch.CloseBreach:
|
2017-07-31 00:21:21 +03:00
|
|
|
// TODO(roasbeef): no longer need with newer beach logic?
|
2016-11-29 05:44:14 +03:00
|
|
|
peerLog.Infof("ChannelPoint(%v) has been breached, wiping "+
|
2017-05-02 23:04:58 +03:00
|
|
|
"channel", req.ChanPoint)
|
2017-11-23 10:21:07 +03:00
|
|
|
if err := p.WipeChannel(req.ChanPoint); err != nil {
|
2016-11-29 05:44:14 +03:00
|
|
|
peerLog.Infof("Unable to wipe channel after detected "+
|
|
|
|
"breach: %v", err)
|
2017-05-02 23:04:58 +03:00
|
|
|
req.Err <- err
|
2016-11-29 05:44:14 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
return
|
2016-09-12 22:42:26 +03:00
|
|
|
}
|
2017-03-25 04:26:09 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// finalizeChanClosure performs the final clean up steps once the cooperative
|
|
|
|
// closure transaction has been fully broadcast. The finalized closing state
|
2017-12-18 05:40:05 +03:00
|
|
|
// machine should be passed in. Once the transaction has been sufficiently
|
|
|
|
// confirmed, the channel will be marked as fully closed within the database,
|
2017-11-23 10:21:07 +03:00
|
|
|
// and any clients will be notified of updates to the closing state.
|
|
|
|
func (p *peer) finalizeChanClosure(chanCloser *channelCloser) {
|
|
|
|
closeReq := chanCloser.CloseRequest()
|
|
|
|
|
|
|
|
// First, we'll clear all indexes related to the channel in question.
|
|
|
|
chanPoint := chanCloser.cfg.channel.ChannelPoint()
|
|
|
|
if err := p.WipeChannel(chanPoint); err != nil {
|
|
|
|
if closeReq != nil {
|
|
|
|
closeReq.Err <- err
|
2017-07-14 22:05:55 +03:00
|
|
|
}
|
2017-08-18 22:16:20 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
chanCloser.cfg.channel.Stop()
|
|
|
|
chanCloser.cfg.channel.CancelObserver()
|
2017-07-14 22:05:55 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// Next, we'll launch a goroutine which will request to be notified by
|
|
|
|
// the ChainNotifier once the closure
|
|
|
|
// transaction obtains a single confirmation.
|
|
|
|
notifier := p.server.cc.chainNotifier
|
2017-10-18 05:25:13 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// If any error happens during waitForChanToClose, forward it to
|
|
|
|
// closeReq. If this channel closure is not locally initiated, closeReq
|
|
|
|
// will be nil, so just ignore the error.
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
if closeReq != nil {
|
|
|
|
errChan = closeReq.Err
|
2016-09-12 22:42:26 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
closingTx, err := chanCloser.ClosingTx()
|
|
|
|
if err != nil {
|
|
|
|
if closeReq != nil {
|
|
|
|
peerLog.Error(err)
|
|
|
|
closeReq.Err <- err
|
2017-07-14 22:05:55 +03:00
|
|
|
}
|
2017-05-05 02:03:47 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
closingTxid := closingTx.TxHash()
|
2017-05-05 02:03:47 +03:00
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
// If this is a locally requested shutdown, update the caller with a
|
|
|
|
// new event detailing the current pending state of this request.
|
|
|
|
if closeReq != nil {
|
|
|
|
closeReq.Updates <- &lnrpc.CloseStatusUpdate{
|
2017-07-14 22:05:55 +03:00
|
|
|
Update: &lnrpc.CloseStatusUpdate_ClosePending{
|
|
|
|
ClosePending: &lnrpc.PendingUpdate{
|
|
|
|
Txid: closingTxid[:],
|
|
|
|
},
|
2016-08-31 02:52:53 +03:00
|
|
|
},
|
2017-07-14 22:05:55 +03:00
|
|
|
}
|
2016-08-31 02:52:53 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:21:07 +03:00
|
|
|
go waitForChanToClose(chanCloser.negotiationHeight, notifier, errChan,
|
2017-07-14 22:05:55 +03:00
|
|
|
chanPoint, &closingTxid, func() {
|
|
|
|
|
2017-05-11 03:27:05 +03:00
|
|
|
// First, we'll mark the database as being fully closed
|
|
|
|
// so we'll no longer watch for its ultimate closure
|
|
|
|
// upon startup.
|
2017-07-14 22:05:55 +03:00
|
|
|
err := p.server.chanDB.MarkChanFullyClosed(chanPoint)
|
2017-05-11 03:27:05 +03:00
|
|
|
if err != nil {
|
2017-11-23 10:21:07 +03:00
|
|
|
if closeReq != nil {
|
|
|
|
closeReq.Err <- err
|
2017-07-14 22:05:55 +03:00
|
|
|
}
|
2017-05-11 03:27:05 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Respond to the local subsystem which requested the
|
|
|
|
// channel closure.
|
2017-11-23 10:21:07 +03:00
|
|
|
if closeReq != nil {
|
|
|
|
closeReq.Updates <- &lnrpc.CloseStatusUpdate{
|
2017-07-14 22:05:55 +03:00
|
|
|
Update: &lnrpc.CloseStatusUpdate_ChanClose{
|
|
|
|
ChanClose: &lnrpc.ChannelCloseUpdate{
|
|
|
|
ClosingTxid: closingTxid[:],
|
|
|
|
Success: true,
|
|
|
|
},
|
2017-05-11 03:27:05 +03:00
|
|
|
},
|
2017-07-14 22:05:55 +03:00
|
|
|
}
|
2017-05-11 03:27:05 +03:00
|
|
|
}
|
|
|
|
})
|
2017-05-05 02:03:47 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// waitForChanToClose uses the passed notifier to wait until the channel has
|
|
|
|
// been detected as closed on chain and then concludes by executing the
|
|
|
|
// following actions: the channel point will be sent over the settleChan, and
|
|
|
|
// finally the callback will be executed. If any error is encountered within
|
|
|
|
// the function, then it will be sent over the errChan.
|
2017-05-11 03:27:05 +03:00
|
|
|
func waitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier,
|
2017-05-05 02:03:47 +03:00
|
|
|
errChan chan error, chanPoint *wire.OutPoint,
|
|
|
|
closingTxID *chainhash.Hash, cb func()) {
|
|
|
|
|
2017-03-25 04:26:09 +03:00
|
|
|
peerLog.Infof("Waiting for confirmation of cooperative close of "+
|
|
|
|
"ChannelPoint(%v) with txid: %v", chanPoint,
|
|
|
|
closingTxID)
|
|
|
|
|
2017-05-05 02:03:47 +03:00
|
|
|
// TODO(roasbeef): add param for num needed confs
|
2017-05-11 03:27:05 +03:00
|
|
|
confNtfn, err := notifier.RegisterConfirmationsNtfn(closingTxID, 1,
|
|
|
|
bestHeight)
|
2017-05-16 03:53:22 +03:00
|
|
|
if err != nil {
|
|
|
|
if errChan != nil {
|
|
|
|
errChan <- err
|
|
|
|
}
|
2017-05-05 02:03:47 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// In the case that the ChainNotifier is shutting down, all subscriber
|
|
|
|
// notification channels will be closed, generating a nil receive.
|
|
|
|
height, ok := <-confNtfn.Confirmed
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// The channel has been closed, remove it from any active indexes, and
|
|
|
|
// the database state.
|
2017-08-05 04:32:25 +03:00
|
|
|
peerLog.Infof("ChannelPoint(%v) is now closed at "+
|
2017-05-05 02:03:47 +03:00
|
|
|
"height %v", chanPoint, height.BlockHeight)
|
|
|
|
|
|
|
|
// Finally, execute the closure call back to mark the confirmation of
|
|
|
|
// the transaction closing the contract.
|
|
|
|
cb()
|
2016-06-23 08:19:24 +03:00
|
|
|
}
|
|
|
|
|
2017-11-23 10:15:48 +03:00
|
|
|
// WipeChannel removes the passed channel point from all indexes associated
|
|
|
|
// with the peer, and the switch.
|
|
|
|
func (p *peer) WipeChannel(chanPoint *wire.OutPoint) error {
|
2017-03-25 04:26:09 +03:00
|
|
|
|
2017-11-23 10:15:48 +03:00
|
|
|
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
2016-06-23 08:19:24 +03:00
|
|
|
|
2016-11-18 05:43:33 +03:00
|
|
|
p.activeChanMtx.Lock()
|
2017-11-23 10:15:48 +03:00
|
|
|
if channel, ok := p.activeChannels[chanID]; ok {
|
|
|
|
channel.Stop()
|
|
|
|
delete(p.activeChannels, chanID)
|
|
|
|
}
|
2016-11-18 05:43:33 +03:00
|
|
|
p.activeChanMtx.Unlock()
|
2016-06-21 22:32:32 +03:00
|
|
|
|
2017-11-23 10:15:48 +03:00
|
|
|
// Instruct the HtlcSwitch to close this link as the channel is no
|
2016-07-10 02:41:06 +03:00
|
|
|
// longer active.
|
2017-05-02 23:04:58 +03:00
|
|
|
if err := p.server.htlcSwitch.RemoveLink(chanID); err != nil {
|
|
|
|
if err == htlcswitch.ErrChannelLinkNotFound {
|
|
|
|
peerLog.Warnf("unable remove channel link with "+
|
|
|
|
"ChannelPoint(%v): %v", chanID, err)
|
|
|
|
return nil
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
2017-05-02 23:04:58 +03:00
|
|
|
return err
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2017-05-02 23:04:58 +03:00
|
|
|
return nil
|
2016-06-21 22:32:32 +03:00
|
|
|
}
|
|
|
|
|
2017-02-16 15:39:38 +03:00
|
|
|
// handleInitMsg handles the incoming init message which contains global and
|
|
|
|
// local features vectors. If feature vectors are incompatible then disconnect.
|
|
|
|
func (p *peer) handleInitMsg(msg *lnwire.Init) error {
|
2017-10-19 01:14:22 +03:00
|
|
|
p.remoteLocalFeatures = lnwire.NewFeatureVector(msg.LocalFeatures,
|
2017-10-11 21:36:23 +03:00
|
|
|
lnwire.LocalFeatures)
|
2017-10-19 01:14:22 +03:00
|
|
|
p.remoteGlobalFeatures = lnwire.NewFeatureVector(msg.GlobalFeatures,
|
2017-10-11 21:36:23 +03:00
|
|
|
lnwire.GlobalFeatures)
|
|
|
|
|
2017-10-19 01:14:22 +03:00
|
|
|
unknownLocalFeatures := p.remoteLocalFeatures.UnknownRequiredFeatures()
|
2017-10-11 21:36:23 +03:00
|
|
|
if len(unknownLocalFeatures) > 0 {
|
|
|
|
err := errors.Errorf("Peer set unknown local feature bits: %v",
|
|
|
|
unknownLocalFeatures)
|
2017-02-16 15:39:38 +03:00
|
|
|
peerLog.Error(err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-10-19 01:14:22 +03:00
|
|
|
unknownGlobalFeatures := p.remoteGlobalFeatures.UnknownRequiredFeatures()
|
2017-10-11 21:36:23 +03:00
|
|
|
if len(unknownGlobalFeatures) > 0 {
|
|
|
|
err := errors.Errorf("Peer set unknown global feature bits: %v",
|
|
|
|
unknownGlobalFeatures)
|
2017-02-16 15:39:38 +03:00
|
|
|
peerLog.Error(err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-03-17 05:45:10 +03:00
|
|
|
// sendInitMsg sends init message to remote peer which contains our currently
|
|
|
|
// supported local and global features.
|
2017-02-16 15:39:38 +03:00
|
|
|
func (p *peer) sendInitMsg() error {
|
|
|
|
msg := lnwire.NewInitMessage(
|
2017-10-11 21:36:23 +03:00
|
|
|
p.server.globalFeatures.RawFeatureVector,
|
2017-10-19 01:16:03 +03:00
|
|
|
p.localFeatures,
|
2017-02-16 15:39:38 +03:00
|
|
|
)
|
|
|
|
|
2017-03-17 05:45:10 +03:00
|
|
|
return p.writeMessage(msg)
|
2017-02-16 15:39:38 +03:00
|
|
|
}
|
|
|
|
|
2017-06-17 01:11:02 +03:00
|
|
|
// SendMessage queues a message for sending to the target peer.
|
2017-05-02 23:04:58 +03:00
|
|
|
func (p *peer) SendMessage(msg lnwire.Message) error {
|
|
|
|
p.queueMsg(msg, nil)
|
2017-02-21 05:10:05 +03:00
|
|
|
return nil
|
2016-07-22 03:10:30 +03:00
|
|
|
}
|
|
|
|
|
2017-06-17 01:11:02 +03:00
|
|
|
// PubKey returns the pubkey of the peer in compressed serialized format.
|
|
|
|
func (p *peer) PubKey() [33]byte {
|
|
|
|
return p.pubKeyBytes
|
2016-07-13 03:45:29 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 22:32:32 +03:00
|
|
|
// TODO(roasbeef): make all start/stop mutexes a CAS
|
2017-06-29 16:52:55 +03:00
|
|
|
|
|
|
|
// createGetLastUpdate returns the handler which serve as a source of the last
|
|
|
|
// update of the channel in a form of lnwire update message.
|
|
|
|
func createGetLastUpdate(router *routing.ChannelRouter,
|
|
|
|
pubKey [33]byte, chanID lnwire.ShortChannelID) func() (*lnwire.ChannelUpdate,
|
|
|
|
error) {
|
|
|
|
|
|
|
|
return func() (*lnwire.ChannelUpdate, error) {
|
2017-08-22 09:57:52 +03:00
|
|
|
info, edge1, edge2, err := router.GetChannelByID(chanID)
|
2017-06-29 16:52:55 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if edge1 == nil || edge2 == nil {
|
|
|
|
return nil, errors.Errorf("unable to find "+
|
|
|
|
"channel by ShortChannelID(%v)", chanID)
|
|
|
|
}
|
|
|
|
|
|
|
|
var local *channeldb.ChannelEdgePolicy
|
|
|
|
if bytes.Compare(edge1.Node.PubKey.SerializeCompressed(),
|
|
|
|
pubKey[:]) == 0 {
|
|
|
|
local = edge2
|
|
|
|
} else {
|
|
|
|
local = edge1
|
|
|
|
}
|
|
|
|
|
2017-08-22 09:57:52 +03:00
|
|
|
update := &lnwire.ChannelUpdate{
|
2017-06-29 16:52:55 +03:00
|
|
|
Signature: local.Signature,
|
2017-08-22 09:57:52 +03:00
|
|
|
ChainHash: info.ChainHash,
|
2017-06-29 16:52:55 +03:00
|
|
|
ShortChannelID: lnwire.NewShortChanIDFromInt(local.ChannelID),
|
2017-08-22 09:57:52 +03:00
|
|
|
Timestamp: uint32(local.LastUpdate.Unix()),
|
2017-06-29 16:52:55 +03:00
|
|
|
Flags: local.Flags,
|
|
|
|
TimeLockDelta: local.TimeLockDelta,
|
2017-08-22 09:25:41 +03:00
|
|
|
HtlcMinimumMsat: local.MinHTLC,
|
2017-06-29 16:52:55 +03:00
|
|
|
BaseFee: uint32(local.FeeBaseMSat),
|
|
|
|
FeeRate: uint32(local.FeeProportionalMillionths),
|
2017-08-22 09:57:52 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
hswcLog.Debugf("Sending latest channel_update: %v",
|
|
|
|
spew.Sdump(update))
|
|
|
|
|
|
|
|
return update, nil
|
2017-06-29 16:52:55 +03:00
|
|
|
}
|
|
|
|
}
|