Merge pull request #4347 from Crypt-iQ/peer_pkg_0518
peer: Brontide Peer implementation
This commit is contained in:
commit
854a12e4c6
@ -166,6 +166,11 @@ func (m *mockChanEvent) NotifyPendingOpenChannelEvent(outpoint wire.OutPoint,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type newChannelMsg struct {
|
||||||
|
channel *channeldb.OpenChannel
|
||||||
|
err chan error
|
||||||
|
}
|
||||||
|
|
||||||
type testNode struct {
|
type testNode struct {
|
||||||
privKey *btcec.PrivateKey
|
privKey *btcec.PrivateKey
|
||||||
addr *lnwire.NetAddress
|
addr *lnwire.NetAddress
|
||||||
|
@ -9,8 +9,7 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lnwire"
|
"github.com/lightningnetwork/lnd/lnwire"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Peer is an interface which represents the remote lightning node inside our
|
// Peer is an interface which represents a remote lightning node.
|
||||||
// system.
|
|
||||||
type Peer interface {
|
type Peer interface {
|
||||||
// SendMessage sends a variadic number of high-priority message to
|
// SendMessage sends a variadic number of high-priority message to
|
||||||
// remote peer. The first argument denotes if the method should block
|
// remote peer. The first argument denotes if the method should block
|
||||||
|
@ -89,7 +89,7 @@ func (c *CommitSig) MaxPayloadLength(uint32) uint32 {
|
|||||||
// TargetChanID returns the channel id of the link for which this message is
|
// TargetChanID returns the channel id of the link for which this message is
|
||||||
// intended.
|
// intended.
|
||||||
//
|
//
|
||||||
// NOTE: Part of lnd.LinkUpdater interface.
|
// NOTE: Part of peer.LinkUpdater interface.
|
||||||
func (c *CommitSig) TargetChanID() ChannelID {
|
func (c *CommitSig) TargetChanID() ChannelID {
|
||||||
return c.ChanID
|
return c.ChanID
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,7 @@ func (c *RevokeAndAck) MaxPayloadLength(uint32) uint32 {
|
|||||||
// TargetChanID returns the channel id of the link for which this message is
|
// TargetChanID returns the channel id of the link for which this message is
|
||||||
// intended.
|
// intended.
|
||||||
//
|
//
|
||||||
// NOTE: Part of lnd.LinkUpdater interface.
|
// NOTE: Part of peer.LinkUpdater interface.
|
||||||
func (c *RevokeAndAck) TargetChanID() ChannelID {
|
func (c *RevokeAndAck) TargetChanID() ChannelID {
|
||||||
return c.ChanID
|
return c.ChanID
|
||||||
}
|
}
|
||||||
|
@ -113,7 +113,7 @@ func (c *UpdateAddHTLC) MaxPayloadLength(uint32) uint32 {
|
|||||||
// TargetChanID returns the channel id of the link for which this message is
|
// TargetChanID returns the channel id of the link for which this message is
|
||||||
// intended.
|
// intended.
|
||||||
//
|
//
|
||||||
// NOTE: Part of lnd.LinkUpdater interface.
|
// NOTE: Part of peer.LinkUpdater interface.
|
||||||
func (c *UpdateAddHTLC) TargetChanID() ChannelID {
|
func (c *UpdateAddHTLC) TargetChanID() ChannelID {
|
||||||
return c.ChanID
|
return c.ChanID
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ func (c *UpdateFailHTLC) MaxPayloadLength(uint32) uint32 {
|
|||||||
// TargetChanID returns the channel id of the link for which this message is
|
// TargetChanID returns the channel id of the link for which this message is
|
||||||
// intended.
|
// intended.
|
||||||
//
|
//
|
||||||
// NOTE: Part of lnd.LinkUpdater interface.
|
// NOTE: Part of peer.LinkUpdater interface.
|
||||||
func (c *UpdateFailHTLC) TargetChanID() ChannelID {
|
func (c *UpdateFailHTLC) TargetChanID() ChannelID {
|
||||||
return c.ChanID
|
return c.ChanID
|
||||||
}
|
}
|
||||||
|
@ -77,7 +77,7 @@ func (c *UpdateFailMalformedHTLC) MaxPayloadLength(uint32) uint32 {
|
|||||||
// TargetChanID returns the channel id of the link for which this message is
|
// TargetChanID returns the channel id of the link for which this message is
|
||||||
// intended.
|
// intended.
|
||||||
//
|
//
|
||||||
// NOTE: Part of lnd.LinkUpdater interface.
|
// NOTE: Part of peer.LinkUpdater interface.
|
||||||
func (c *UpdateFailMalformedHTLC) TargetChanID() ChannelID {
|
func (c *UpdateFailMalformedHTLC) TargetChanID() ChannelID {
|
||||||
return c.ChanID
|
return c.ChanID
|
||||||
}
|
}
|
||||||
|
@ -72,7 +72,7 @@ func (c *UpdateFee) MaxPayloadLength(uint32) uint32 {
|
|||||||
// TargetChanID returns the channel id of the link for which this message is
|
// TargetChanID returns the channel id of the link for which this message is
|
||||||
// intended.
|
// intended.
|
||||||
//
|
//
|
||||||
// NOTE: Part of lnd.LinkUpdater interface.
|
// NOTE: Part of peer.LinkUpdater interface.
|
||||||
func (c *UpdateFee) TargetChanID() ChannelID {
|
func (c *UpdateFee) TargetChanID() ChannelID {
|
||||||
return c.ChanID
|
return c.ChanID
|
||||||
}
|
}
|
||||||
|
@ -82,7 +82,7 @@ func (c *UpdateFulfillHTLC) MaxPayloadLength(uint32) uint32 {
|
|||||||
// TargetChanID returns the channel id of the link for which this message is
|
// TargetChanID returns the channel id of the link for which this message is
|
||||||
// intended.
|
// intended.
|
||||||
//
|
//
|
||||||
// NOTE: Part of lnd.LinkUpdater interface.
|
// NOTE: Part of peer.LinkUpdater interface.
|
||||||
func (c *UpdateFulfillHTLC) TargetChanID() ChannelID {
|
func (c *UpdateFulfillHTLC) TargetChanID() ChannelID {
|
||||||
return c.ChanID
|
return c.ChanID
|
||||||
}
|
}
|
||||||
|
3
log.go
3
log.go
@ -30,6 +30,7 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lnwallet/chanfunding"
|
"github.com/lightningnetwork/lnd/lnwallet/chanfunding"
|
||||||
"github.com/lightningnetwork/lnd/monitoring"
|
"github.com/lightningnetwork/lnd/monitoring"
|
||||||
"github.com/lightningnetwork/lnd/netann"
|
"github.com/lightningnetwork/lnd/netann"
|
||||||
|
"github.com/lightningnetwork/lnd/peer"
|
||||||
"github.com/lightningnetwork/lnd/peernotifier"
|
"github.com/lightningnetwork/lnd/peernotifier"
|
||||||
"github.com/lightningnetwork/lnd/routing"
|
"github.com/lightningnetwork/lnd/routing"
|
||||||
"github.com/lightningnetwork/lnd/routing/localchans"
|
"github.com/lightningnetwork/lnd/routing/localchans"
|
||||||
@ -75,7 +76,6 @@ var (
|
|||||||
// function should always be called as soon as possible to finish
|
// function should always be called as soon as possible to finish
|
||||||
// setting them up properly with a root logger.
|
// setting them up properly with a root logger.
|
||||||
ltndLog = addLndPkgLogger("LTND")
|
ltndLog = addLndPkgLogger("LTND")
|
||||||
peerLog = addLndPkgLogger("PEER")
|
|
||||||
rpcsLog = addLndPkgLogger("RPCS")
|
rpcsLog = addLndPkgLogger("RPCS")
|
||||||
srvrLog = addLndPkgLogger("SRVR")
|
srvrLog = addLndPkgLogger("SRVR")
|
||||||
fndgLog = addLndPkgLogger("FNDG")
|
fndgLog = addLndPkgLogger("FNDG")
|
||||||
@ -122,6 +122,7 @@ func SetupLoggers(root *build.RotatingLogWriter) {
|
|||||||
AddSubLogger(root, "WTCL", wtclient.UseLogger)
|
AddSubLogger(root, "WTCL", wtclient.UseLogger)
|
||||||
AddSubLogger(root, "PRNF", peernotifier.UseLogger)
|
AddSubLogger(root, "PRNF", peernotifier.UseLogger)
|
||||||
AddSubLogger(root, "CHFD", chanfunding.UseLogger)
|
AddSubLogger(root, "CHFD", chanfunding.UseLogger)
|
||||||
|
AddSubLogger(root, "PEER", peer.UseLogger)
|
||||||
AddSubLogger(root, "CHCL", chancloser.UseLogger)
|
AddSubLogger(root, "CHCL", chancloser.UseLogger)
|
||||||
|
|
||||||
AddSubLogger(root, routing.Subsystem, routing.UseLogger, localchans.UseLogger)
|
AddSubLogger(root, routing.Subsystem, routing.UseLogger, localchans.UseLogger)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
package lnd
|
package peer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
@ -29,7 +29,6 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lnwallet"
|
"github.com/lightningnetwork/lnd/lnwallet"
|
||||||
"github.com/lightningnetwork/lnd/lnwallet/chancloser"
|
"github.com/lightningnetwork/lnd/lnwallet/chancloser"
|
||||||
"github.com/lightningnetwork/lnd/lnwire"
|
"github.com/lightningnetwork/lnd/lnwire"
|
||||||
"github.com/lightningnetwork/lnd/pool"
|
|
||||||
"github.com/lightningnetwork/lnd/queue"
|
"github.com/lightningnetwork/lnd/queue"
|
||||||
"github.com/lightningnetwork/lnd/ticker"
|
"github.com/lightningnetwork/lnd/ticker"
|
||||||
)
|
)
|
||||||
@ -41,14 +40,16 @@ const (
|
|||||||
// idleTimeout is the duration of inactivity before we time out a peer.
|
// idleTimeout is the duration of inactivity before we time out a peer.
|
||||||
idleTimeout = 5 * time.Minute
|
idleTimeout = 5 * time.Minute
|
||||||
|
|
||||||
// writeMessageTimeout is the timeout used when writing a message to peer.
|
// writeMessageTimeout is the timeout used when writing a message to the
|
||||||
|
// peer.
|
||||||
writeMessageTimeout = 5 * time.Second
|
writeMessageTimeout = 5 * time.Second
|
||||||
|
|
||||||
// readMessageTimeout is the timeout used when reading a message from a
|
// readMessageTimeout is the timeout used when reading a message from a
|
||||||
// peer.
|
// peer.
|
||||||
readMessageTimeout = 5 * time.Second
|
readMessageTimeout = 5 * time.Second
|
||||||
|
|
||||||
// handshakeTimeout is the timeout used when waiting for peer init message.
|
// handshakeTimeout is the timeout used when waiting for the peer's init
|
||||||
|
// message.
|
||||||
handshakeTimeout = 15 * time.Second
|
handshakeTimeout = 15 * time.Second
|
||||||
|
|
||||||
// outgoingQueueLen is the buffer size of the channel which houses
|
// outgoingQueueLen is the buffer size of the channel which houses
|
||||||
@ -56,8 +57,15 @@ const (
|
|||||||
// this struct.
|
// this struct.
|
||||||
outgoingQueueLen = 50
|
outgoingQueueLen = 50
|
||||||
|
|
||||||
// errorBufferSize is the number of historic peer errors that we store.
|
// ErrorBufferSize is the number of historic peer errors that we store.
|
||||||
errorBufferSize = 10
|
ErrorBufferSize = 10
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrChannelNotFound is an error returned when a channel is queried and
|
||||||
|
// either the Brontide doesn't know of it, or the channel in question
|
||||||
|
// is pending.
|
||||||
|
ErrChannelNotFound = fmt.Errorf("channel not found")
|
||||||
)
|
)
|
||||||
|
|
||||||
// outgoingMsg packages an lnwire.Message to be sent out on the wire, along with
|
// outgoingMsg packages an lnwire.Message to be sent out on the wire, along with
|
||||||
@ -70,14 +78,14 @@ type outgoingMsg struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newChannelMsg packages a channeldb.OpenChannel with a channel that allows
|
// newChannelMsg packages a channeldb.OpenChannel with a channel that allows
|
||||||
// the receiver of the request to report when the funding transaction has been
|
// the receiver of the request to report when the channel creation process has
|
||||||
// confirmed and the channel creation process completed.
|
// completed.
|
||||||
type newChannelMsg struct {
|
type newChannelMsg struct {
|
||||||
channel *channeldb.OpenChannel
|
channel *channeldb.OpenChannel
|
||||||
err chan error
|
err chan error
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeMsgs is a wrapper struct around any wire messages that deal with the
|
// closeMsg is a wrapper struct around any wire messages that deal with the
|
||||||
// cooperative channel closure negotiation process. This struct includes the
|
// cooperative channel closure negotiation process. This struct includes the
|
||||||
// raw channel ID targeted along with the original message.
|
// raw channel ID targeted along with the original message.
|
||||||
type closeMsg struct {
|
type closeMsg struct {
|
||||||
@ -85,42 +93,42 @@ type closeMsg struct {
|
|||||||
msg lnwire.Message
|
msg lnwire.Message
|
||||||
}
|
}
|
||||||
|
|
||||||
// pendingUpdate describes the pending state of a closing channel.
|
// PendingUpdate describes the pending state of a closing channel.
|
||||||
type pendingUpdate struct {
|
type PendingUpdate struct {
|
||||||
Txid []byte
|
Txid []byte
|
||||||
OutputIndex uint32
|
OutputIndex uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
// channelCloseUpdate contains the outcome of the close channel operation.
|
// ChannelCloseUpdate contains the outcome of the close channel operation.
|
||||||
type channelCloseUpdate struct {
|
type ChannelCloseUpdate struct {
|
||||||
ClosingTxid []byte
|
ClosingTxid []byte
|
||||||
Success bool
|
Success bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// timestampedError is a timestamped error that is used to store the most recent
|
// TimestampedError is a timestamped error that is used to store the most recent
|
||||||
// errors we have experienced with our peers.
|
// errors we have experienced with our peers.
|
||||||
type timestampedError struct {
|
type TimestampedError struct {
|
||||||
error error
|
Error error
|
||||||
timestamp time.Time
|
Timestamp time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
// peer is an active peer on the Lightning Network. This struct is responsible
|
// Brontide is an active peer on the Lightning Network. This struct is responsible
|
||||||
// for managing any channel state related to this peer. To do so, it has
|
// for managing any channel state related to this peer. To do so, it has
|
||||||
// several helper goroutines to handle events such as HTLC timeouts, new
|
// several helper goroutines to handle events such as HTLC timeouts, new
|
||||||
// funding workflow, and detecting an uncooperative closure of any active
|
// funding workflow, and detecting an uncooperative closure of any active
|
||||||
// channels.
|
// channels.
|
||||||
// TODO(roasbeef): proper reconnection logic
|
// TODO(roasbeef): proper reconnection logic
|
||||||
type peer struct {
|
type Brontide struct {
|
||||||
// MUST be used atomically.
|
// MUST be used atomically.
|
||||||
started int32
|
started int32
|
||||||
disconnect int32
|
disconnect int32
|
||||||
|
|
||||||
// The following fields are only meant to be used *atomically*
|
// MUST be used atomically.
|
||||||
bytesReceived uint64
|
bytesReceived uint64
|
||||||
bytesSent uint64
|
bytesSent uint64
|
||||||
|
|
||||||
// pingTime is a rough estimate of the RTT (round-trip-time) between us
|
// pingTime is a rough estimate of the RTT (round-trip-time) between us
|
||||||
// and the connected peer. This time is expressed in micro seconds.
|
// and the connected peer. This time is expressed in microseconds.
|
||||||
// To be used atomically.
|
// To be used atomically.
|
||||||
// TODO(roasbeef): also use a WMA or EMA?
|
// TODO(roasbeef): also use a WMA or EMA?
|
||||||
pingTime int64
|
pingTime int64
|
||||||
@ -129,26 +137,17 @@ type peer struct {
|
|||||||
// our last ping message. To be used atomically.
|
// our last ping message. To be used atomically.
|
||||||
pingLastSend int64
|
pingLastSend int64
|
||||||
|
|
||||||
cfg *Config
|
cfg Config
|
||||||
|
|
||||||
connReq *connmgr.ConnReq
|
|
||||||
conn net.Conn
|
|
||||||
|
|
||||||
addr *lnwire.NetAddress
|
|
||||||
pubKeyBytes [33]byte
|
|
||||||
|
|
||||||
// activeSignal when closed signals that the peer is now active and
|
// activeSignal when closed signals that the peer is now active and
|
||||||
// ready to process messages.
|
// ready to process messages.
|
||||||
activeSignal chan struct{}
|
activeSignal chan struct{}
|
||||||
|
|
||||||
// startTime is the time this peer connection was successfully
|
// startTime is the time this peer connection was successfully established.
|
||||||
// established. It will be zero for peers that did not successfully
|
// It will be zero for peers that did not successfully call Start().
|
||||||
// Start().
|
|
||||||
startTime time.Time
|
startTime time.Time
|
||||||
|
|
||||||
inbound bool
|
// sendQueue is the channel which is used to queue outgoing messages to be
|
||||||
|
|
||||||
// sendQueue is the channel which is used to queue outgoing to be
|
|
||||||
// written onto the wire. Note that this channel is unbuffered.
|
// written onto the wire. Note that this channel is unbuffered.
|
||||||
sendQueue chan outgoingMsg
|
sendQueue chan outgoingMsg
|
||||||
|
|
||||||
@ -157,7 +156,7 @@ type peer struct {
|
|||||||
outgoingQueue chan outgoingMsg
|
outgoingQueue chan outgoingMsg
|
||||||
|
|
||||||
// activeChanMtx protects access to the activeChannels and
|
// activeChanMtx protects access to the activeChannels and
|
||||||
// addeddChannels maps.
|
// addedChannels maps.
|
||||||
activeChanMtx sync.RWMutex
|
activeChanMtx sync.RWMutex
|
||||||
|
|
||||||
// activeChannels is a map which stores the state machines of all
|
// activeChannels is a map which stores the state machines of all
|
||||||
@ -186,11 +185,10 @@ type peer struct {
|
|||||||
// proxy messages to individual, active links.
|
// proxy messages to individual, active links.
|
||||||
activeMsgStreams map[lnwire.ChannelID]*msgStream
|
activeMsgStreams map[lnwire.ChannelID]*msgStream
|
||||||
|
|
||||||
// activeChanCloses is a map that keep track of all the active
|
// activeChanCloses is a map that keeps track of all the active
|
||||||
// cooperative channel closures that are active. Any channel closing
|
// cooperative channel closures. Any channel closing messages are directed
|
||||||
// messages are directed to one of these active state machines. Once
|
// to one of these active state machines. Once the channel has been closed,
|
||||||
// the channel has been closed, the state machine will be delete from
|
// the state machine will be deleted from the map.
|
||||||
// the map.
|
|
||||||
activeChanCloses map[lnwire.ChannelID]*chancloser.ChanCloser
|
activeChanCloses map[lnwire.ChannelID]*chancloser.ChanCloser
|
||||||
|
|
||||||
// localCloseChanReqs is a channel in which any local requests to close
|
// localCloseChanReqs is a channel in which any local requests to close
|
||||||
@ -206,28 +204,6 @@ type peer struct {
|
|||||||
// well as lnwire.ClosingSigned messages.
|
// well as lnwire.ClosingSigned messages.
|
||||||
chanCloseMsgs chan *closeMsg
|
chanCloseMsgs chan *closeMsg
|
||||||
|
|
||||||
// chanActiveTimeout specifies the duration the peer will wait to
|
|
||||||
// request a channel reenable, beginning from the time the peer was
|
|
||||||
// started.
|
|
||||||
chanActiveTimeout time.Duration
|
|
||||||
|
|
||||||
server *server
|
|
||||||
|
|
||||||
// features is the set of features that we advertised to the remote
|
|
||||||
// node.
|
|
||||||
features *lnwire.FeatureVector
|
|
||||||
|
|
||||||
// legacyFeatures is the set of features that we advertised to the remote
|
|
||||||
// node for backwards compatibility. Nodes that have not implemented
|
|
||||||
// flat featurs will still be able to read our feature bits from the
|
|
||||||
// legacy global field, but we will also advertise everything in the
|
|
||||||
// default features field.
|
|
||||||
legacyFeatures *lnwire.FeatureVector
|
|
||||||
|
|
||||||
// outgoingCltvRejectDelta defines the number of blocks before expiry of
|
|
||||||
// an htlc where we don't offer an htlc anymore.
|
|
||||||
outgoingCltvRejectDelta uint32
|
|
||||||
|
|
||||||
// remoteFeatures is the feature vector received from the peer during
|
// remoteFeatures is the feature vector received from the peer during
|
||||||
// the connection handshake.
|
// the connection handshake.
|
||||||
remoteFeatures *lnwire.FeatureVector
|
remoteFeatures *lnwire.FeatureVector
|
||||||
@ -238,95 +214,41 @@ type peer struct {
|
|||||||
// peer's chansync message with its own over and over again.
|
// peer's chansync message with its own over and over again.
|
||||||
resentChanSyncMsg map[lnwire.ChannelID]struct{}
|
resentChanSyncMsg map[lnwire.ChannelID]struct{}
|
||||||
|
|
||||||
// errorBuffer stores a set of errors related to a peer. It contains
|
|
||||||
// error messages that our peer has recently sent us over the wire and
|
|
||||||
// records of unknown messages that were sent to us and, so that we can
|
|
||||||
// track a full record of the communication errors we have had with our
|
|
||||||
// peer. If we choose to disconnect from a peer, it also stores the
|
|
||||||
// reason we had for disconnecting.
|
|
||||||
errorBuffer *queue.CircularBuffer
|
|
||||||
|
|
||||||
// writePool is the task pool to that manages reuse of write buffers.
|
|
||||||
// Write tasks are submitted to the pool in order to conserve the total
|
|
||||||
// number of write buffers allocated at any one time, and decouple write
|
|
||||||
// buffer allocation from the peer life cycle.
|
|
||||||
writePool *pool.Write
|
|
||||||
|
|
||||||
readPool *pool.Read
|
|
||||||
|
|
||||||
queueQuit chan struct{}
|
queueQuit chan struct{}
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
// A compile-time check to ensure that peer satisfies the lnpeer.Peer interface.
|
// A compile-time check to ensure that Brontide satisfies the lnpeer.Peer interface.
|
||||||
var _ lnpeer.Peer = (*peer)(nil)
|
var _ lnpeer.Peer = (*Brontide)(nil)
|
||||||
|
|
||||||
// newPeer creates a new peer from an establish connection object, and a
|
|
||||||
// pointer to the main server. It takes an error buffer which may contain errors
|
|
||||||
// from a previous connection with the peer if we have been connected to them
|
|
||||||
// before.
|
|
||||||
func newPeer(cfg *Config, conn net.Conn, connReq *connmgr.ConnReq, server *server,
|
|
||||||
addr *lnwire.NetAddress, inbound bool,
|
|
||||||
features, legacyFeatures *lnwire.FeatureVector,
|
|
||||||
chanActiveTimeout time.Duration,
|
|
||||||
outgoingCltvRejectDelta uint32,
|
|
||||||
errBuffer *queue.CircularBuffer) (
|
|
||||||
*peer, error) {
|
|
||||||
|
|
||||||
nodePub := addr.IdentityKey
|
|
||||||
|
|
||||||
p := &peer{
|
|
||||||
conn: conn,
|
|
||||||
addr: addr,
|
|
||||||
|
|
||||||
|
// NewBrontide creates a new Brontide from a peer.Config struct.
|
||||||
|
func NewBrontide(cfg Config) *Brontide {
|
||||||
|
p := &Brontide{
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
|
|
||||||
activeSignal: make(chan struct{}),
|
activeSignal: make(chan struct{}),
|
||||||
|
|
||||||
inbound: inbound,
|
|
||||||
connReq: connReq,
|
|
||||||
|
|
||||||
server: server,
|
|
||||||
|
|
||||||
features: features,
|
|
||||||
legacyFeatures: legacyFeatures,
|
|
||||||
|
|
||||||
outgoingCltvRejectDelta: outgoingCltvRejectDelta,
|
|
||||||
|
|
||||||
sendQueue: make(chan outgoingMsg),
|
sendQueue: make(chan outgoingMsg),
|
||||||
outgoingQueue: make(chan outgoingMsg),
|
outgoingQueue: make(chan outgoingMsg),
|
||||||
|
|
||||||
addedChannels: make(map[lnwire.ChannelID]struct{}),
|
addedChannels: make(map[lnwire.ChannelID]struct{}),
|
||||||
activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
|
activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
|
||||||
newChannels: make(chan *newChannelMsg, 1),
|
newChannels: make(chan *newChannelMsg, 1),
|
||||||
|
|
||||||
activeMsgStreams: make(map[lnwire.ChannelID]*msgStream),
|
activeMsgStreams: make(map[lnwire.ChannelID]*msgStream),
|
||||||
|
|
||||||
activeChanCloses: make(map[lnwire.ChannelID]*chancloser.ChanCloser),
|
activeChanCloses: make(map[lnwire.ChannelID]*chancloser.ChanCloser),
|
||||||
localCloseChanReqs: make(chan *htlcswitch.ChanClose),
|
localCloseChanReqs: make(chan *htlcswitch.ChanClose),
|
||||||
linkFailures: make(chan linkFailureReport),
|
linkFailures: make(chan linkFailureReport),
|
||||||
chanCloseMsgs: make(chan *closeMsg),
|
chanCloseMsgs: make(chan *closeMsg),
|
||||||
resentChanSyncMsg: make(map[lnwire.ChannelID]struct{}),
|
resentChanSyncMsg: make(map[lnwire.ChannelID]struct{}),
|
||||||
|
|
||||||
chanActiveTimeout: chanActiveTimeout,
|
|
||||||
|
|
||||||
errorBuffer: errBuffer,
|
|
||||||
|
|
||||||
writePool: server.writePool,
|
|
||||||
readPool: server.readPool,
|
|
||||||
|
|
||||||
queueQuit: make(chan struct{}),
|
queueQuit: make(chan struct{}),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
}
|
}
|
||||||
copy(p.pubKeyBytes[:], nodePub.SerializeCompressed())
|
|
||||||
|
|
||||||
return p, nil
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts all helper goroutines the peer needs for normal operations. In
|
// Start starts all helper goroutines the peer needs for normal operations. In
|
||||||
// the case this peer has already been started, then this function is a loop.
|
// the case this peer has already been started, then this function is a loop.
|
||||||
func (p *peer) Start() error {
|
func (p *Brontide) Start() error {
|
||||||
if atomic.AddInt32(&p.started, 1) != 1 {
|
if atomic.AddInt32(&p.started, 1) != 1 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -385,7 +307,7 @@ func (p *peer) Start() error {
|
|||||||
|
|
||||||
// Fetch and then load all the active channels we have with this remote
|
// Fetch and then load all the active channels we have with this remote
|
||||||
// peer from the database.
|
// peer from the database.
|
||||||
activeChans, err := p.server.chanDB.FetchOpenChannels(p.addr.IdentityKey)
|
activeChans, err := p.cfg.ChannelDB.FetchOpenChannels(p.cfg.Addr.IdentityKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
peerLog.Errorf("unable to fetch active chans "+
|
peerLog.Errorf("unable to fetch active chans "+
|
||||||
"for peer %v: %v", p, err)
|
"for peer %v: %v", p, err)
|
||||||
@ -393,7 +315,7 @@ func (p *peer) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(activeChans) == 0 {
|
if len(activeChans) == 0 {
|
||||||
p.server.prunePersistentPeerConnection(p.pubKeyBytes)
|
p.cfg.PrunePersistentPeerConnection(p.cfg.PubKeyBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next, load all the active channels we have with this peer,
|
// Next, load all the active channels we have with this peer,
|
||||||
@ -448,17 +370,16 @@ func (p *peer) Start() error {
|
|||||||
|
|
||||||
// initGossipSync initializes either a gossip syncer or an initial routing
|
// initGossipSync initializes either a gossip syncer or an initial routing
|
||||||
// dump, depending on the negotiated synchronization method.
|
// dump, depending on the negotiated synchronization method.
|
||||||
func (p *peer) initGossipSync() {
|
func (p *Brontide) initGossipSync() {
|
||||||
switch {
|
|
||||||
|
|
||||||
// If the remote peer knows of the new gossip queries feature, then
|
// If the remote peer knows of the new gossip queries feature, then
|
||||||
// we'll create a new gossipSyncer in the AuthenticatedGossiper for it.
|
// we'll create a new gossipSyncer in the AuthenticatedGossiper for it.
|
||||||
case p.remoteFeatures.HasFeature(lnwire.GossipQueriesOptional):
|
if p.remoteFeatures.HasFeature(lnwire.GossipQueriesOptional) {
|
||||||
srvrLog.Infof("Negotiated chan series queries with %x",
|
peerLog.Infof("Negotiated chan series queries with %x",
|
||||||
p.pubKeyBytes[:])
|
p.cfg.PubKeyBytes[:])
|
||||||
|
|
||||||
// Register the this peer's for gossip syncer with the gossiper.
|
// Register the peer's gossip syncer with the gossiper.
|
||||||
// This is blocks synchronously to ensure the gossip syncer is
|
// This blocks synchronously to ensure the gossip syncer is
|
||||||
// registered with the gossiper before attempting to read
|
// registered with the gossiper before attempting to read
|
||||||
// messages from the remote peer.
|
// messages from the remote peer.
|
||||||
//
|
//
|
||||||
@ -466,7 +387,7 @@ func (p *peer) initGossipSync() {
|
|||||||
// requires an improved version of the current network
|
// requires an improved version of the current network
|
||||||
// bootstrapper to ensure we can find and connect to non-channel
|
// bootstrapper to ensure we can find and connect to non-channel
|
||||||
// peers.
|
// peers.
|
||||||
p.server.authGossiper.InitSyncState(p)
|
p.cfg.AuthGossiper.InitSyncState(p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -476,7 +397,7 @@ func (p *peer) initGossipSync() {
|
|||||||
// exits.
|
// exits.
|
||||||
//
|
//
|
||||||
// NOTE: Part of the lnpeer.Peer interface.
|
// NOTE: Part of the lnpeer.Peer interface.
|
||||||
func (p *peer) QuitSignal() <-chan struct{} {
|
func (p *Brontide) QuitSignal() <-chan struct{} {
|
||||||
return p.quit
|
return p.quit
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -484,7 +405,7 @@ func (p *peer) QuitSignal() <-chan struct{} {
|
|||||||
// channels returned by the database. It returns a slice of channel reestablish
|
// channels returned by the database. It returns a slice of channel reestablish
|
||||||
// messages that should be sent to the peer immediately, in case we have borked
|
// messages that should be sent to the peer immediately, in case we have borked
|
||||||
// channels that haven't been closed yet.
|
// channels that haven't been closed yet.
|
||||||
func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) (
|
func (p *Brontide) loadActiveChannels(chans []*channeldb.OpenChannel) (
|
||||||
[]lnwire.Message, error) {
|
[]lnwire.Message, error) {
|
||||||
|
|
||||||
// Return a slice of messages to send to the peers in case the channel
|
// Return a slice of messages to send to the peers in case the channel
|
||||||
@ -493,7 +414,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) (
|
|||||||
|
|
||||||
for _, dbChan := range chans {
|
for _, dbChan := range chans {
|
||||||
lnChan, err := lnwallet.NewLightningChannel(
|
lnChan, err := lnwallet.NewLightningChannel(
|
||||||
p.server.cc.signer, dbChan, p.server.sigPool,
|
p.cfg.Signer, dbChan, p.cfg.SigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -508,9 +429,8 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) (
|
|||||||
|
|
||||||
// Skip adding any permanently irreconcilable channels to the
|
// Skip adding any permanently irreconcilable channels to the
|
||||||
// htlcswitch.
|
// htlcswitch.
|
||||||
switch {
|
if !dbChan.HasChanStatus(channeldb.ChanStatusDefault) &&
|
||||||
case !dbChan.HasChanStatus(channeldb.ChanStatusDefault) &&
|
!dbChan.HasChanStatus(channeldb.ChanStatusRestored) {
|
||||||
!dbChan.HasChanStatus(channeldb.ChanStatusRestored):
|
|
||||||
|
|
||||||
peerLog.Warnf("ChannelPoint(%v) has status %v, won't "+
|
peerLog.Warnf("ChannelPoint(%v) has status %v, won't "+
|
||||||
"start.", chanPoint, dbChan.ChanStatus())
|
"start.", chanPoint, dbChan.ChanStatus())
|
||||||
@ -533,15 +453,10 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) (
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
_, currentHeight, err := p.server.cc.chainIO.GetBestBlock()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Before we register this new link with the HTLC Switch, we'll
|
// Before we register this new link with the HTLC Switch, we'll
|
||||||
// need to fetch its current link-layer forwarding policy from
|
// need to fetch its current link-layer forwarding policy from
|
||||||
// the database.
|
// the database.
|
||||||
graph := p.server.chanDB.ChannelGraph()
|
graph := p.cfg.ChannelDB.ChannelGraph()
|
||||||
info, p1, p2, err := graph.FetchChannelEdgesByOutpoint(chanPoint)
|
info, p1, p2, err := graph.FetchChannelEdgesByOutpoint(chanPoint)
|
||||||
if err != nil && err != channeldb.ErrEdgeNotFound {
|
if err != nil && err != channeldb.ErrEdgeNotFound {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -556,7 +471,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) (
|
|||||||
// particular channel.
|
// particular channel.
|
||||||
var selfPolicy *channeldb.ChannelEdgePolicy
|
var selfPolicy *channeldb.ChannelEdgePolicy
|
||||||
if info != nil && bytes.Equal(info.NodeKey1Bytes[:],
|
if info != nil && bytes.Equal(info.NodeKey1Bytes[:],
|
||||||
p.server.identityECDH.PubKey().SerializeCompressed()) {
|
p.cfg.ServerPubKey[:]) {
|
||||||
|
|
||||||
selfPolicy = p1
|
selfPolicy = p1
|
||||||
} else {
|
} else {
|
||||||
@ -579,7 +494,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) (
|
|||||||
peerLog.Warnf("Unable to find our forwarding policy "+
|
peerLog.Warnf("Unable to find our forwarding policy "+
|
||||||
"for channel %v, using default values",
|
"for channel %v, using default values",
|
||||||
chanPoint)
|
chanPoint)
|
||||||
forwardingPolicy = &p.server.cc.routingPolicy
|
forwardingPolicy = &p.cfg.RoutingPolicy
|
||||||
}
|
}
|
||||||
|
|
||||||
peerLog.Tracef("Using link policy of: %v",
|
peerLog.Tracef("Using link policy of: %v",
|
||||||
@ -600,7 +515,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) (
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Subscribe to the set of on-chain events for this channel.
|
// Subscribe to the set of on-chain events for this channel.
|
||||||
chainEvents, err := p.server.chainArb.SubscribeChannelEvents(
|
chainEvents, err := p.cfg.ChainArb.SubscribeChannelEvents(
|
||||||
*chanPoint,
|
*chanPoint,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -609,7 +524,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) (
|
|||||||
|
|
||||||
err = p.addLink(
|
err = p.addLink(
|
||||||
chanPoint, lnChan, forwardingPolicy, chainEvents,
|
chanPoint, lnChan, forwardingPolicy, chainEvents,
|
||||||
currentHeight, true,
|
true,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to add link %v to "+
|
return nil, fmt.Errorf("unable to add link %v to "+
|
||||||
@ -624,12 +539,12 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) (
|
|||||||
return msgs, nil
|
return msgs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// addLink creates and adds a new link from the specified channel.
|
// addLink creates and adds a new ChannelLink from the specified channel.
|
||||||
func (p *peer) addLink(chanPoint *wire.OutPoint,
|
func (p *Brontide) addLink(chanPoint *wire.OutPoint,
|
||||||
lnChan *lnwallet.LightningChannel,
|
lnChan *lnwallet.LightningChannel,
|
||||||
forwardingPolicy *htlcswitch.ForwardingPolicy,
|
forwardingPolicy *htlcswitch.ForwardingPolicy,
|
||||||
chainEvents *contractcourt.ChainEventSubscription,
|
chainEvents *contractcourt.ChainEventSubscription,
|
||||||
currentHeight int32, syncStates bool) error {
|
syncStates bool) error {
|
||||||
|
|
||||||
// onChannelFailure will be called by the link in case the channel
|
// onChannelFailure will be called by the link in case the channel
|
||||||
// fails for some reason.
|
// fails for some reason.
|
||||||
@ -647,29 +562,29 @@ func (p *peer) addLink(chanPoint *wire.OutPoint,
|
|||||||
select {
|
select {
|
||||||
case p.linkFailures <- failure:
|
case p.linkFailures <- failure:
|
||||||
case <-p.quit:
|
case <-p.quit:
|
||||||
case <-p.server.quit:
|
case <-p.cfg.Quit:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
updateContractSignals := func(signals *contractcourt.ContractSignals) error {
|
||||||
|
return p.cfg.ChainArb.UpdateContractSignals(*chanPoint, signals)
|
||||||
|
}
|
||||||
|
|
||||||
linkCfg := htlcswitch.ChannelLinkConfig{
|
linkCfg := htlcswitch.ChannelLinkConfig{
|
||||||
Peer: p,
|
Peer: p,
|
||||||
DecodeHopIterators: p.server.sphinx.DecodeHopIterators,
|
DecodeHopIterators: p.cfg.Sphinx.DecodeHopIterators,
|
||||||
ExtractErrorEncrypter: p.server.sphinx.ExtractErrorEncrypter,
|
ExtractErrorEncrypter: p.cfg.Sphinx.ExtractErrorEncrypter,
|
||||||
FetchLastChannelUpdate: p.server.fetchLastChanUpdate(),
|
FetchLastChannelUpdate: p.cfg.FetchLastChanUpdate,
|
||||||
HodlMask: p.cfg.Hodl.Mask(),
|
HodlMask: p.cfg.Hodl.Mask(),
|
||||||
Registry: p.server.invoices,
|
Registry: p.cfg.Invoices,
|
||||||
Switch: p.server.htlcSwitch,
|
Switch: p.cfg.Switch,
|
||||||
Circuits: p.server.htlcSwitch.CircuitModifier(),
|
Circuits: p.cfg.Switch.CircuitModifier(),
|
||||||
ForwardPackets: p.server.interceptableSwitch.ForwardPackets,
|
ForwardPackets: p.cfg.InterceptSwitch.ForwardPackets,
|
||||||
FwrdingPolicy: *forwardingPolicy,
|
FwrdingPolicy: *forwardingPolicy,
|
||||||
FeeEstimator: p.server.cc.feeEstimator,
|
FeeEstimator: p.cfg.FeeEstimator,
|
||||||
PreimageCache: p.server.witnessBeacon,
|
PreimageCache: p.cfg.WitnessBeacon,
|
||||||
ChainEvents: chainEvents,
|
ChainEvents: chainEvents,
|
||||||
UpdateContractSignals: func(signals *contractcourt.ContractSignals) error {
|
UpdateContractSignals: updateContractSignals,
|
||||||
return p.server.chainArb.UpdateContractSignals(
|
|
||||||
*chanPoint, signals,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
OnChannelFailure: onChannelFailure,
|
OnChannelFailure: onChannelFailure,
|
||||||
SyncStates: syncStates,
|
SyncStates: syncStates,
|
||||||
BatchTicker: ticker.New(50 * time.Millisecond),
|
BatchTicker: ticker.New(50 * time.Millisecond),
|
||||||
@ -679,14 +594,14 @@ func (p *peer) addLink(chanPoint *wire.OutPoint,
|
|||||||
UnsafeReplay: p.cfg.UnsafeReplay,
|
UnsafeReplay: p.cfg.UnsafeReplay,
|
||||||
MinFeeUpdateTimeout: htlcswitch.DefaultMinLinkFeeUpdateTimeout,
|
MinFeeUpdateTimeout: htlcswitch.DefaultMinLinkFeeUpdateTimeout,
|
||||||
MaxFeeUpdateTimeout: htlcswitch.DefaultMaxLinkFeeUpdateTimeout,
|
MaxFeeUpdateTimeout: htlcswitch.DefaultMaxLinkFeeUpdateTimeout,
|
||||||
OutgoingCltvRejectDelta: p.outgoingCltvRejectDelta,
|
OutgoingCltvRejectDelta: p.cfg.OutgoingCltvRejectDelta,
|
||||||
TowerClient: p.server.towerClient,
|
TowerClient: p.cfg.TowerClient,
|
||||||
MaxOutgoingCltvExpiry: p.cfg.MaxOutgoingCltvExpiry,
|
MaxOutgoingCltvExpiry: p.cfg.MaxOutgoingCltvExpiry,
|
||||||
MaxFeeAllocation: p.cfg.MaxChannelFeeAllocation,
|
MaxFeeAllocation: p.cfg.MaxChannelFeeAllocation,
|
||||||
NotifyActiveLink: p.server.channelNotifier.NotifyActiveLinkEvent,
|
NotifyActiveLink: p.cfg.ChannelNotifier.NotifyActiveLinkEvent,
|
||||||
NotifyActiveChannel: p.server.channelNotifier.NotifyActiveChannelEvent,
|
NotifyActiveChannel: p.cfg.ChannelNotifier.NotifyActiveChannelEvent,
|
||||||
NotifyInactiveChannel: p.server.channelNotifier.NotifyInactiveChannelEvent,
|
NotifyInactiveChannel: p.cfg.ChannelNotifier.NotifyInactiveChannelEvent,
|
||||||
HtlcNotifier: p.server.htlcNotifier,
|
HtlcNotifier: p.cfg.HtlcNotifier,
|
||||||
}
|
}
|
||||||
|
|
||||||
link := htlcswitch.NewChannelLink(linkCfg, lnChan)
|
link := htlcswitch.NewChannelLink(linkCfg, lnChan)
|
||||||
@ -695,17 +610,17 @@ func (p *peer) addLink(chanPoint *wire.OutPoint,
|
|||||||
// links going by the same channel id. If one is found, we'll shut it
|
// links going by the same channel id. If one is found, we'll shut it
|
||||||
// down to ensure that the mailboxes are only ever under the control of
|
// down to ensure that the mailboxes are only ever under the control of
|
||||||
// one link.
|
// one link.
|
||||||
p.server.htlcSwitch.RemoveLink(link.ChanID())
|
p.cfg.Switch.RemoveLink(link.ChanID())
|
||||||
|
|
||||||
// With the channel link created, we'll now notify the htlc switch so
|
// With the channel link created, we'll now notify the htlc switch so
|
||||||
// this channel can be used to dispatch local payments and also
|
// this channel can be used to dispatch local payments and also
|
||||||
// passively forward payments.
|
// passively forward payments.
|
||||||
return p.server.htlcSwitch.AddLink(link)
|
return p.cfg.Switch.AddLink(link)
|
||||||
}
|
}
|
||||||
|
|
||||||
// maybeSendNodeAnn sends our node announcement to the remote peer if at least
|
// maybeSendNodeAnn sends our node announcement to the remote peer if at least
|
||||||
// one confirmed advertised channel exists with them.
|
// one confirmed public channel exists with them.
|
||||||
func (p *peer) maybeSendNodeAnn(channels []*channeldb.OpenChannel) {
|
func (p *Brontide) maybeSendNodeAnn(channels []*channeldb.OpenChannel) {
|
||||||
hasConfirmedPublicChan := false
|
hasConfirmedPublicChan := false
|
||||||
for _, channel := range channels {
|
for _, channel := range channels {
|
||||||
if channel.IsPending {
|
if channel.IsPending {
|
||||||
@ -722,27 +637,27 @@ func (p *peer) maybeSendNodeAnn(channels []*channeldb.OpenChannel) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ourNodeAnn, err := p.server.genNodeAnnouncement(false)
|
ourNodeAnn, err := p.cfg.GenNodeAnnouncement(false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
srvrLog.Debugf("Unable to retrieve node announcement: %v", err)
|
peerLog.Debugf("Unable to retrieve node announcement: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := p.SendMessageLazy(false, &ourNodeAnn); err != nil {
|
if err := p.SendMessageLazy(false, &ourNodeAnn); err != nil {
|
||||||
srvrLog.Debugf("Unable to resend node announcement to %x: %v",
|
peerLog.Debugf("Unable to resend node announcement to %x: %v",
|
||||||
p.pubKeyBytes, err)
|
p.cfg.PubKeyBytes, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForDisconnect waits until the peer has disconnected. A peer may be
|
// WaitForDisconnect waits until the peer has disconnected. A peer may be
|
||||||
// disconnected if the local or remote side terminating the connection, or an
|
// disconnected if the local or remote side terminates the connection, or an
|
||||||
// irrecoverable protocol error has been encountered. This method will only
|
// irrecoverable protocol error has been encountered. This method will only
|
||||||
// begin watching the peer's waitgroup after the ready channel or the peer's
|
// begin watching the peer's waitgroup after the ready channel or the peer's
|
||||||
// quit channel are signaled. The ready channel should only be signaled if a
|
// quit channel are signaled. The ready channel should only be signaled if a
|
||||||
// call to Start returns no error. Otherwise, if the peer fails to start,
|
// call to Start returns no error. Otherwise, if the peer fails to start,
|
||||||
// calling Disconnect will signal the quit channel and the method will not
|
// calling Disconnect will signal the quit channel and the method will not
|
||||||
// block, since no goroutines were spawned.
|
// block, since no goroutines were spawned.
|
||||||
func (p *peer) WaitForDisconnect(ready chan struct{}) {
|
func (p *Brontide) WaitForDisconnect(ready chan struct{}) {
|
||||||
select {
|
select {
|
||||||
case <-ready:
|
case <-ready:
|
||||||
case <-p.quit:
|
case <-p.quit:
|
||||||
@ -754,7 +669,7 @@ func (p *peer) WaitForDisconnect(ready chan struct{}) {
|
|||||||
// Disconnect terminates the connection with the remote peer. Additionally, a
|
// Disconnect terminates the connection with the remote peer. Additionally, a
|
||||||
// signal is sent to the server and htlcSwitch indicating the resources
|
// signal is sent to the server and htlcSwitch indicating the resources
|
||||||
// allocated to the peer can now be cleaned up.
|
// allocated to the peer can now be cleaned up.
|
||||||
func (p *peer) Disconnect(reason error) {
|
func (p *Brontide) Disconnect(reason error) {
|
||||||
if !atomic.CompareAndSwapInt32(&p.disconnect, 0, 1) {
|
if !atomic.CompareAndSwapInt32(&p.disconnect, 0, 1) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -765,20 +680,20 @@ func (p *peer) Disconnect(reason error) {
|
|||||||
peerLog.Infof(err.Error())
|
peerLog.Infof(err.Error())
|
||||||
|
|
||||||
// Ensure that the TCP connection is properly closed before continuing.
|
// Ensure that the TCP connection is properly closed before continuing.
|
||||||
p.conn.Close()
|
p.cfg.Conn.Close()
|
||||||
|
|
||||||
close(p.quit)
|
close(p.quit)
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the string representation of this peer.
|
// String returns the string representation of this peer.
|
||||||
func (p *peer) String() string {
|
func (p *Brontide) String() string {
|
||||||
return fmt.Sprintf("%x@%s", p.pubKeyBytes, p.conn.RemoteAddr())
|
return fmt.Sprintf("%x@%s", p.cfg.PubKeyBytes, p.cfg.Conn.RemoteAddr())
|
||||||
}
|
}
|
||||||
|
|
||||||
// readNextMessage reads, and returns the next message on the wire along with
|
// readNextMessage reads, and returns the next message on the wire along with
|
||||||
// any additional raw payload.
|
// any additional raw payload.
|
||||||
func (p *peer) readNextMessage() (lnwire.Message, error) {
|
func (p *Brontide) readNextMessage() (lnwire.Message, error) {
|
||||||
noiseConn, ok := p.conn.(*brontide.Conn)
|
noiseConn, ok := p.cfg.Conn.(*brontide.Conn)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, fmt.Errorf("brontide.Conn required to read messages")
|
return nil, fmt.Errorf("brontide.Conn required to read messages")
|
||||||
}
|
}
|
||||||
@ -798,7 +713,7 @@ func (p *peer) readNextMessage() (lnwire.Message, error) {
|
|||||||
// is message oriented and allows nodes to pad on additional data to
|
// is message oriented and allows nodes to pad on additional data to
|
||||||
// the message stream.
|
// the message stream.
|
||||||
var rawMsg []byte
|
var rawMsg []byte
|
||||||
err = p.readPool.Submit(func(buf *buffer.Read) error {
|
err = p.cfg.ReadPool.Submit(func(buf *buffer.Read) error {
|
||||||
// Before reading the body of the message, set the read timeout
|
// Before reading the body of the message, set the read timeout
|
||||||
// accordingly to ensure we don't block other readers using the
|
// accordingly to ensure we don't block other readers using the
|
||||||
// pool. We do so only after the task has been scheduled to
|
// pool. We do so only after the task has been scheduled to
|
||||||
@ -813,7 +728,6 @@ func (p *peer) readNextMessage() (lnwire.Message, error) {
|
|||||||
rawMsg, readErr = noiseConn.ReadNextBody(buf[:pktLen])
|
rawMsg, readErr = noiseConn.ReadNextBody(buf[:pktLen])
|
||||||
return readErr
|
return readErr
|
||||||
})
|
})
|
||||||
|
|
||||||
atomic.AddUint64(&p.bytesReceived, uint64(len(rawMsg)))
|
atomic.AddUint64(&p.bytesReceived, uint64(len(rawMsg)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -840,7 +754,7 @@ func (p *peer) readNextMessage() (lnwire.Message, error) {
|
|||||||
type msgStream struct {
|
type msgStream struct {
|
||||||
streamShutdown int32 // To be used atomically.
|
streamShutdown int32 // To be used atomically.
|
||||||
|
|
||||||
peer *peer
|
peer *Brontide
|
||||||
|
|
||||||
apply func(lnwire.Message)
|
apply func(lnwire.Message)
|
||||||
|
|
||||||
@ -863,7 +777,7 @@ type msgStream struct {
|
|||||||
// that should be buffered in the internal queue. Callers should set this to a
|
// that should be buffered in the internal queue. Callers should set this to a
|
||||||
// sane value that avoids blocking unnecessarily, but doesn't allow an
|
// sane value that avoids blocking unnecessarily, but doesn't allow an
|
||||||
// unbounded amount of memory to be allocated to buffer incoming messages.
|
// unbounded amount of memory to be allocated to buffer incoming messages.
|
||||||
func newMsgStream(p *peer, startMsg, stopMsg string, bufSize uint32,
|
func newMsgStream(p *Brontide, startMsg, stopMsg string, bufSize uint32,
|
||||||
apply func(lnwire.Message)) *msgStream {
|
apply func(lnwire.Message)) *msgStream {
|
||||||
|
|
||||||
stream := &msgStream{
|
stream := &msgStream{
|
||||||
@ -994,7 +908,9 @@ func (ms *msgStream) AddMsg(msg lnwire.Message) {
|
|||||||
// waitUntilLinkActive waits until the target link is active and returns a
|
// waitUntilLinkActive waits until the target link is active and returns a
|
||||||
// ChannelLink to pass messages to. It accomplishes this by subscribing to
|
// ChannelLink to pass messages to. It accomplishes this by subscribing to
|
||||||
// an ActiveLinkEvent which is emitted by the link when it first starts up.
|
// an ActiveLinkEvent which is emitted by the link when it first starts up.
|
||||||
func waitUntilLinkActive(p *peer, cid lnwire.ChannelID) htlcswitch.ChannelLink {
|
func waitUntilLinkActive(p *Brontide,
|
||||||
|
cid lnwire.ChannelID) htlcswitch.ChannelLink {
|
||||||
|
|
||||||
// Subscribe to receive channel events.
|
// Subscribe to receive channel events.
|
||||||
//
|
//
|
||||||
// NOTE: If the link is already active by SubscribeChannelEvents, then
|
// NOTE: If the link is already active by SubscribeChannelEvents, then
|
||||||
@ -1004,7 +920,7 @@ func waitUntilLinkActive(p *peer, cid lnwire.ChannelID) htlcswitch.ChannelLink {
|
|||||||
// we will get an ActiveLinkEvent notification and retrieve the link. If
|
// we will get an ActiveLinkEvent notification and retrieve the link. If
|
||||||
// the call to GetLink is before SubscribeChannelEvents, however, there
|
// the call to GetLink is before SubscribeChannelEvents, however, there
|
||||||
// will be a race condition.
|
// will be a race condition.
|
||||||
sub, err := p.server.channelNotifier.SubscribeChannelEvents()
|
sub, err := p.cfg.ChannelNotifier.SubscribeChannelEvents()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If we have a non-nil error, then the server is shutting down and we
|
// If we have a non-nil error, then the server is shutting down and we
|
||||||
// can exit here and return nil. This means no message will be delivered
|
// can exit here and return nil. This means no message will be delivered
|
||||||
@ -1015,7 +931,7 @@ func waitUntilLinkActive(p *peer, cid lnwire.ChannelID) htlcswitch.ChannelLink {
|
|||||||
|
|
||||||
// The link may already be active by this point, and we may have missed the
|
// The link may already be active by this point, and we may have missed the
|
||||||
// ActiveLinkEvent. Check if the link exists.
|
// ActiveLinkEvent. Check if the link exists.
|
||||||
link, _ := p.server.htlcSwitch.GetLink(cid)
|
link, _ := p.cfg.Switch.GetLink(cid)
|
||||||
if link != nil {
|
if link != nil {
|
||||||
return link
|
return link
|
||||||
}
|
}
|
||||||
@ -1045,7 +961,7 @@ func waitUntilLinkActive(p *peer, cid lnwire.ChannelID) htlcswitch.ChannelLink {
|
|||||||
// The link shouldn't be nil as we received an
|
// The link shouldn't be nil as we received an
|
||||||
// ActiveLinkEvent. If it is nil, we return nil and the
|
// ActiveLinkEvent. If it is nil, we return nil and the
|
||||||
// calling function should catch it.
|
// calling function should catch it.
|
||||||
link, _ = p.server.htlcSwitch.GetLink(cid)
|
link, _ = p.cfg.Switch.GetLink(cid)
|
||||||
return link
|
return link
|
||||||
|
|
||||||
case <-p.quit:
|
case <-p.quit:
|
||||||
@ -1060,15 +976,11 @@ func waitUntilLinkActive(p *peer, cid lnwire.ChannelID) htlcswitch.ChannelLink {
|
|||||||
// dispatch a message to a channel before it is fully active. A reference to the
|
// dispatch a message to a channel before it is fully active. A reference to the
|
||||||
// channel this stream forwards to his held in scope to prevent unnecessary
|
// channel this stream forwards to his held in scope to prevent unnecessary
|
||||||
// lookups.
|
// lookups.
|
||||||
func newChanMsgStream(p *peer, cid lnwire.ChannelID) *msgStream {
|
func newChanMsgStream(p *Brontide, cid lnwire.ChannelID) *msgStream {
|
||||||
|
|
||||||
var chanLink htlcswitch.ChannelLink
|
var chanLink htlcswitch.ChannelLink
|
||||||
|
|
||||||
return newMsgStream(p,
|
apply := func(msg lnwire.Message) {
|
||||||
fmt.Sprintf("Update stream for ChannelID(%x) created", cid[:]),
|
|
||||||
fmt.Sprintf("Update stream for ChannelID(%x) exiting", cid[:]),
|
|
||||||
1000,
|
|
||||||
func(msg lnwire.Message) {
|
|
||||||
// This check is fine because if the link no longer exists, it will
|
// This check is fine because if the link no longer exists, it will
|
||||||
// be removed from the activeChannels map and subsequent messages
|
// be removed from the activeChannels map and subsequent messages
|
||||||
// shouldn't reach the chan msg stream.
|
// shouldn't reach the chan msg stream.
|
||||||
@ -1092,21 +1004,30 @@ func newChanMsgStream(p *peer, cid lnwire.ChannelID) *msgStream {
|
|||||||
}
|
}
|
||||||
|
|
||||||
chanLink.HandleChannelUpdate(msg)
|
chanLink.HandleChannelUpdate(msg)
|
||||||
},
|
}
|
||||||
|
|
||||||
|
return newMsgStream(p,
|
||||||
|
fmt.Sprintf("Update stream for ChannelID(%x) created", cid[:]),
|
||||||
|
fmt.Sprintf("Update stream for ChannelID(%x) exiting", cid[:]),
|
||||||
|
1000,
|
||||||
|
apply,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDiscMsgStream is used to setup a msgStream between the peer and the
|
// newDiscMsgStream is used to setup a msgStream between the peer and the
|
||||||
// authenticated gossiper. This stream should be used to forward all remote
|
// authenticated gossiper. This stream should be used to forward all remote
|
||||||
// channel announcements.
|
// channel announcements.
|
||||||
func newDiscMsgStream(p *peer) *msgStream {
|
func newDiscMsgStream(p *Brontide) *msgStream {
|
||||||
return newMsgStream(p,
|
apply := func(msg lnwire.Message) {
|
||||||
|
p.cfg.AuthGossiper.ProcessRemoteAnnouncement(msg, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
return newMsgStream(
|
||||||
|
p,
|
||||||
"Update stream for gossiper created",
|
"Update stream for gossiper created",
|
||||||
"Update stream for gossiper exited",
|
"Update stream for gossiper exited",
|
||||||
1000,
|
1000,
|
||||||
func(msg lnwire.Message) {
|
apply,
|
||||||
p.server.authGossiper.ProcessRemoteAnnouncement(msg, p)
|
|
||||||
},
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1114,7 +1035,7 @@ func newDiscMsgStream(p *peer) *msgStream {
|
|||||||
// properly dispatching the handling of the message to the proper subsystem.
|
// properly dispatching the handling of the message to the proper subsystem.
|
||||||
//
|
//
|
||||||
// NOTE: This method MUST be run as a goroutine.
|
// NOTE: This method MUST be run as a goroutine.
|
||||||
func (p *peer) readHandler() {
|
func (p *Brontide) readHandler() {
|
||||||
defer p.wg.Done()
|
defer p.wg.Done()
|
||||||
|
|
||||||
// We'll stop the timer after a new messages is received, and also
|
// We'll stop the timer after a new messages is received, and also
|
||||||
@ -1209,15 +1130,15 @@ out:
|
|||||||
p.queueMsg(lnwire.NewPong(pongBytes), nil)
|
p.queueMsg(lnwire.NewPong(pongBytes), nil)
|
||||||
|
|
||||||
case *lnwire.OpenChannel:
|
case *lnwire.OpenChannel:
|
||||||
p.server.fundingMgr.processFundingOpen(msg, p)
|
p.cfg.ProcessFundingOpen(msg, p)
|
||||||
case *lnwire.AcceptChannel:
|
case *lnwire.AcceptChannel:
|
||||||
p.server.fundingMgr.processFundingAccept(msg, p)
|
p.cfg.ProcessFundingAccept(msg, p)
|
||||||
case *lnwire.FundingCreated:
|
case *lnwire.FundingCreated:
|
||||||
p.server.fundingMgr.processFundingCreated(msg, p)
|
p.cfg.ProcessFundingCreated(msg, p)
|
||||||
case *lnwire.FundingSigned:
|
case *lnwire.FundingSigned:
|
||||||
p.server.fundingMgr.processFundingSigned(msg, p)
|
p.cfg.ProcessFundingSigned(msg, p)
|
||||||
case *lnwire.FundingLocked:
|
case *lnwire.FundingLocked:
|
||||||
p.server.fundingMgr.processFundingLocked(msg, p)
|
p.cfg.ProcessFundingLocked(msg, p)
|
||||||
|
|
||||||
case *lnwire.Shutdown:
|
case *lnwire.Shutdown:
|
||||||
select {
|
select {
|
||||||
@ -1310,7 +1231,7 @@ out:
|
|||||||
|
|
||||||
// isActiveChannel returns true if the provided channel id is active, otherwise
|
// isActiveChannel returns true if the provided channel id is active, otherwise
|
||||||
// returns false.
|
// returns false.
|
||||||
func (p *peer) isActiveChannel(chanID lnwire.ChannelID) bool {
|
func (p *Brontide) isActiveChannel(chanID lnwire.ChannelID) bool {
|
||||||
p.activeChanMtx.RLock()
|
p.activeChanMtx.RLock()
|
||||||
_, ok := p.activeChannels[chanID]
|
_, ok := p.activeChannels[chanID]
|
||||||
p.activeChanMtx.RUnlock()
|
p.activeChanMtx.RUnlock()
|
||||||
@ -1319,9 +1240,9 @@ func (p *peer) isActiveChannel(chanID lnwire.ChannelID) bool {
|
|||||||
|
|
||||||
// storeError stores an error in our peer's buffer of recent errors with the
|
// storeError stores an error in our peer's buffer of recent errors with the
|
||||||
// current timestamp. Errors are only stored if we have at least one active
|
// current timestamp. Errors are only stored if we have at least one active
|
||||||
// channel with the peer to mitigate dos attack vectors where a peer costlessly
|
// channel with the peer to mitigate a dos vector where a peer costlessly
|
||||||
// connects to us and spams us with errors.
|
// connects to us and spams us with errors.
|
||||||
func (p *peer) storeError(err error) {
|
func (p *Brontide) storeError(err error) {
|
||||||
var haveChannels bool
|
var haveChannels bool
|
||||||
|
|
||||||
p.activeChanMtx.RLock()
|
p.activeChanMtx.RLock()
|
||||||
@ -1343,8 +1264,8 @@ func (p *peer) storeError(err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p.errorBuffer.Add(
|
p.cfg.ErrorBuffer.Add(
|
||||||
×tampedError{timestamp: time.Now(), error: err},
|
&TimestampedError{Timestamp: time.Now(), Error: err},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1354,8 +1275,8 @@ func (p *peer) storeError(err error) {
|
|||||||
// open with the peer.
|
// open with the peer.
|
||||||
//
|
//
|
||||||
// NOTE: This method should only be called from within the readHandler.
|
// NOTE: This method should only be called from within the readHandler.
|
||||||
func (p *peer) handleError(msg *lnwire.Error) bool {
|
func (p *Brontide) handleError(msg *lnwire.Error) bool {
|
||||||
key := p.addr.IdentityKey
|
key := p.cfg.Addr.IdentityKey
|
||||||
|
|
||||||
// Store the error we have received.
|
// Store the error we have received.
|
||||||
p.storeError(msg)
|
p.storeError(msg)
|
||||||
@ -1372,8 +1293,8 @@ func (p *peer) handleError(msg *lnwire.Error) bool {
|
|||||||
|
|
||||||
// If the channel ID for the error message corresponds to a pending
|
// If the channel ID for the error message corresponds to a pending
|
||||||
// channel, then the funding manager will handle the error.
|
// channel, then the funding manager will handle the error.
|
||||||
case p.server.fundingMgr.IsPendingChannel(msg.ChanID, key):
|
case p.cfg.IsPendingChannel(msg.ChanID, key):
|
||||||
p.server.fundingMgr.processFundingError(msg, key)
|
p.cfg.ProcessFundingError(msg, key)
|
||||||
return false
|
return false
|
||||||
|
|
||||||
// If not we hand the error to the channel link for this channel.
|
// If not we hand the error to the channel link for this channel.
|
||||||
@ -1522,7 +1443,7 @@ func messageSummary(msg lnwire.Message) string {
|
|||||||
// less spammy log messages in trace mode by setting the 'Curve" parameter to
|
// less spammy log messages in trace mode by setting the 'Curve" parameter to
|
||||||
// nil. Doing this avoids printing out each of the field elements in the curve
|
// nil. Doing this avoids printing out each of the field elements in the curve
|
||||||
// parameters for secp256k1.
|
// parameters for secp256k1.
|
||||||
func (p *peer) logWireMessage(msg lnwire.Message, read bool) {
|
func (p *Brontide) logWireMessage(msg lnwire.Message, read bool) {
|
||||||
summaryPrefix := "Received"
|
summaryPrefix := "Received"
|
||||||
if !read {
|
if !read {
|
||||||
summaryPrefix = "Sending"
|
summaryPrefix = "Sending"
|
||||||
@ -1581,10 +1502,10 @@ func (p *peer) logWireMessage(msg lnwire.Message, read bool) {
|
|||||||
|
|
||||||
// writeMessage writes and flushes the target lnwire.Message to the remote peer.
|
// writeMessage writes and flushes the target lnwire.Message to the remote peer.
|
||||||
// If the passed message is nil, this method will only try to flush an existing
|
// If the passed message is nil, this method will only try to flush an existing
|
||||||
// message buffered on the connection. It is safe to recall this method with a
|
// message buffered on the connection. It is safe to call this method again
|
||||||
// nil message iff a timeout error is returned. This will continue to flush the
|
// with a nil message iff a timeout error is returned. This will continue to
|
||||||
// pending message to the wire.
|
// flush the pending message to the wire.
|
||||||
func (p *peer) writeMessage(msg lnwire.Message) error {
|
func (p *Brontide) writeMessage(msg lnwire.Message) error {
|
||||||
// Simply exit if we're shutting down.
|
// Simply exit if we're shutting down.
|
||||||
if atomic.LoadInt32(&p.disconnect) != 0 {
|
if atomic.LoadInt32(&p.disconnect) != 0 {
|
||||||
return lnpeer.ErrPeerExiting
|
return lnpeer.ErrPeerExiting
|
||||||
@ -1595,7 +1516,7 @@ func (p *peer) writeMessage(msg lnwire.Message) error {
|
|||||||
p.logWireMessage(msg, false)
|
p.logWireMessage(msg, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
noiseConn, ok := p.conn.(*brontide.Conn)
|
noiseConn, ok := p.cfg.Conn.(*brontide.Conn)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("brontide.Conn required to write messages")
|
return fmt.Errorf("brontide.Conn required to write messages")
|
||||||
}
|
}
|
||||||
@ -1631,7 +1552,7 @@ func (p *peer) writeMessage(msg lnwire.Message) error {
|
|||||||
|
|
||||||
// Otherwise, this is a new message. We'll acquire a write buffer to
|
// Otherwise, this is a new message. We'll acquire a write buffer to
|
||||||
// serialize the message and buffer the ciphertext on the connection.
|
// serialize the message and buffer the ciphertext on the connection.
|
||||||
err := p.writePool.Submit(func(buf *bytes.Buffer) error {
|
err := p.cfg.WritePool.Submit(func(buf *bytes.Buffer) error {
|
||||||
// Using a buffer allocated by the write pool, encode the
|
// Using a buffer allocated by the write pool, encode the
|
||||||
// message directly into the buffer.
|
// message directly into the buffer.
|
||||||
_, writeErr := lnwire.WriteMessage(buf, msg, 0)
|
_, writeErr := lnwire.WriteMessage(buf, msg, 0)
|
||||||
@ -1658,7 +1579,7 @@ func (p *peer) writeMessage(msg lnwire.Message) error {
|
|||||||
// drained.
|
// drained.
|
||||||
//
|
//
|
||||||
// NOTE: This method MUST be run as a goroutine.
|
// NOTE: This method MUST be run as a goroutine.
|
||||||
func (p *peer) writeHandler() {
|
func (p *Brontide) writeHandler() {
|
||||||
// We'll stop the timer after a new messages is sent, and also reset it
|
// We'll stop the timer after a new messages is sent, and also reset it
|
||||||
// after we process the next message.
|
// after we process the next message.
|
||||||
idleTimer := time.AfterFunc(idleTimeout, func() {
|
idleTimer := time.AfterFunc(idleTimeout, func() {
|
||||||
@ -1751,7 +1672,7 @@ out:
|
|||||||
// to be eventually sent out on the wire by the writeHandler.
|
// to be eventually sent out on the wire by the writeHandler.
|
||||||
//
|
//
|
||||||
// NOTE: This method MUST be run as a goroutine.
|
// NOTE: This method MUST be run as a goroutine.
|
||||||
func (p *peer) queueHandler() {
|
func (p *Brontide) queueHandler() {
|
||||||
defer p.wg.Done()
|
defer p.wg.Done()
|
||||||
|
|
||||||
// priorityMsgs holds an in order list of messages deemed high-priority
|
// priorityMsgs holds an in order list of messages deemed high-priority
|
||||||
@ -1819,7 +1740,7 @@ func (p *peer) queueHandler() {
|
|||||||
// connection is still active.
|
// connection is still active.
|
||||||
//
|
//
|
||||||
// NOTE: This method MUST be run as a goroutine.
|
// NOTE: This method MUST be run as a goroutine.
|
||||||
func (p *peer) pingHandler() {
|
func (p *Brontide) pingHandler() {
|
||||||
defer p.wg.Done()
|
defer p.wg.Done()
|
||||||
|
|
||||||
pingTicker := time.NewTicker(pingInterval)
|
pingTicker := time.NewTicker(pingInterval)
|
||||||
@ -1840,32 +1761,35 @@ out:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PingTime returns the estimated ping time to the peer in microseconds.
|
// PingTime returns the estimated ping time to the peer in microseconds.
|
||||||
func (p *peer) PingTime() int64 {
|
func (p *Brontide) PingTime() int64 {
|
||||||
return atomic.LoadInt64(&p.pingTime)
|
return atomic.LoadInt64(&p.pingTime)
|
||||||
}
|
}
|
||||||
|
|
||||||
// queueMsg adds the lnwire.Message to the back of the high priority send queue.
|
// queueMsg adds the lnwire.Message to the back of the high priority send queue.
|
||||||
// If the errChan is non-nil, an error is sent back if the msg failed to queue
|
// If the errChan is non-nil, an error is sent back if the msg failed to queue
|
||||||
// or failed to write, and nil otherwise.
|
// or failed to write, and nil otherwise.
|
||||||
func (p *peer) queueMsg(msg lnwire.Message, errChan chan error) {
|
func (p *Brontide) queueMsg(msg lnwire.Message, errChan chan error) {
|
||||||
p.queue(true, msg, errChan)
|
p.queue(true, msg, errChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
// queueMsgLazy adds the lnwire.Message to the back of the low priority send
|
// queueMsgLazy adds the lnwire.Message to the back of the low priority send
|
||||||
// queue. If the errChan is non-nil, an error is sent back if the msg failed to
|
// queue. If the errChan is non-nil, an error is sent back if the msg failed to
|
||||||
// queue or failed to write, and nil otherwise.
|
// queue or failed to write, and nil otherwise.
|
||||||
func (p *peer) queueMsgLazy(msg lnwire.Message, errChan chan error) {
|
func (p *Brontide) queueMsgLazy(msg lnwire.Message, errChan chan error) {
|
||||||
p.queue(false, msg, errChan)
|
p.queue(false, msg, errChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
// queue sends a given message to the queueHandler using the passed priority. If
|
// queue sends a given message to the queueHandler using the passed priority. If
|
||||||
// the errChan is non-nil, an error is sent back if the msg failed to queue or
|
// the errChan is non-nil, an error is sent back if the msg failed to queue or
|
||||||
// failed to write, and nil otherwise.
|
// failed to write, and nil otherwise.
|
||||||
func (p *peer) queue(priority bool, msg lnwire.Message, errChan chan error) {
|
func (p *Brontide) queue(priority bool, msg lnwire.Message,
|
||||||
|
errChan chan error) {
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case p.outgoingQueue <- outgoingMsg{priority, msg, errChan}:
|
case p.outgoingQueue <- outgoingMsg{priority, msg, errChan}:
|
||||||
case <-p.quit:
|
case <-p.quit:
|
||||||
peerLog.Tracef("Peer shutting down, could not enqueue msg.")
|
peerLog.Tracef("Peer shutting down, could not enqueue msg: %v.",
|
||||||
|
spew.Sdump(msg))
|
||||||
if errChan != nil {
|
if errChan != nil {
|
||||||
errChan <- lnpeer.ErrPeerExiting
|
errChan <- lnpeer.ErrPeerExiting
|
||||||
}
|
}
|
||||||
@ -1874,7 +1798,7 @@ func (p *peer) queue(priority bool, msg lnwire.Message, errChan chan error) {
|
|||||||
|
|
||||||
// ChannelSnapshots returns a slice of channel snapshots detailing all
|
// ChannelSnapshots returns a slice of channel snapshots detailing all
|
||||||
// currently active channels maintained with the remote peer.
|
// currently active channels maintained with the remote peer.
|
||||||
func (p *peer) ChannelSnapshots() []*channeldb.ChannelSnapshot {
|
func (p *Brontide) ChannelSnapshots() []*channeldb.ChannelSnapshot {
|
||||||
p.activeChanMtx.RLock()
|
p.activeChanMtx.RLock()
|
||||||
defer p.activeChanMtx.RUnlock()
|
defer p.activeChanMtx.RUnlock()
|
||||||
|
|
||||||
@ -1900,8 +1824,8 @@ func (p *peer) ChannelSnapshots() []*channeldb.ChannelSnapshot {
|
|||||||
|
|
||||||
// genDeliveryScript returns a new script to be used to send our funds to in
|
// genDeliveryScript returns a new script to be used to send our funds to in
|
||||||
// the case of a cooperative channel close negotiation.
|
// the case of a cooperative channel close negotiation.
|
||||||
func (p *peer) genDeliveryScript() ([]byte, error) {
|
func (p *Brontide) genDeliveryScript() ([]byte, error) {
|
||||||
deliveryAddr, err := p.server.cc.wallet.NewAddress(
|
deliveryAddr, err := p.cfg.Wallet.NewAddress(
|
||||||
lnwallet.WitnessPubKey, false,
|
lnwallet.WitnessPubKey, false,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1918,13 +1842,13 @@ func (p *peer) genDeliveryScript() ([]byte, error) {
|
|||||||
// channels maintained with the remote peer.
|
// channels maintained with the remote peer.
|
||||||
//
|
//
|
||||||
// NOTE: This method MUST be run as a goroutine.
|
// NOTE: This method MUST be run as a goroutine.
|
||||||
func (p *peer) channelManager() {
|
func (p *Brontide) channelManager() {
|
||||||
defer p.wg.Done()
|
defer p.wg.Done()
|
||||||
|
|
||||||
// reenableTimeout will fire once after the configured channel status
|
// reenableTimeout will fire once after the configured channel status
|
||||||
// interval has elapsed. This will trigger us to sign new channel
|
// interval has elapsed. This will trigger us to sign new channel
|
||||||
// updates and broadcast them with the "disabled" flag unset.
|
// updates and broadcast them with the "disabled" flag unset.
|
||||||
reenableTimeout := time.After(p.chanActiveTimeout)
|
reenableTimeout := time.After(p.cfg.ChanActiveTimeout)
|
||||||
|
|
||||||
out:
|
out:
|
||||||
for {
|
for {
|
||||||
@ -1976,7 +1900,7 @@ out:
|
|||||||
// set of active channels, so we can look it up later
|
// set of active channels, so we can look it up later
|
||||||
// easily according to its channel ID.
|
// easily according to its channel ID.
|
||||||
lnChan, err := lnwallet.NewLightningChannel(
|
lnChan, err := lnwallet.NewLightningChannel(
|
||||||
p.server.cc.signer, newChan, p.server.sigPool,
|
p.cfg.Signer, newChan, p.cfg.SigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.activeChanMtx.Unlock()
|
p.activeChanMtx.Unlock()
|
||||||
@ -2001,16 +1925,7 @@ out:
|
|||||||
// necessary items it needs to function.
|
// necessary items it needs to function.
|
||||||
//
|
//
|
||||||
// TODO(roasbeef): panic on below?
|
// TODO(roasbeef): panic on below?
|
||||||
_, currentHeight, err := p.server.cc.chainIO.GetBestBlock()
|
chainEvents, err := p.cfg.ChainArb.SubscribeChannelEvents(
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("unable to get best "+
|
|
||||||
"block: %v", err)
|
|
||||||
peerLog.Errorf(err.Error())
|
|
||||||
|
|
||||||
newChanReq.err <- err
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
chainEvents, err := p.server.chainArb.SubscribeChannelEvents(
|
|
||||||
*chanPoint,
|
*chanPoint,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2029,7 +1944,7 @@ out:
|
|||||||
// at initial channel creation. Note that the maximum HTLC value
|
// at initial channel creation. Note that the maximum HTLC value
|
||||||
// defaults to the cap on the total value of outstanding HTLCs.
|
// defaults to the cap on the total value of outstanding HTLCs.
|
||||||
fwdMinHtlc := lnChan.FwdMinHtlc()
|
fwdMinHtlc := lnChan.FwdMinHtlc()
|
||||||
defaultPolicy := p.server.cc.routingPolicy
|
defaultPolicy := p.cfg.RoutingPolicy
|
||||||
forwardingPolicy := &htlcswitch.ForwardingPolicy{
|
forwardingPolicy := &htlcswitch.ForwardingPolicy{
|
||||||
MinHTLCOut: fwdMinHtlc,
|
MinHTLCOut: fwdMinHtlc,
|
||||||
MaxHTLC: newChan.LocalChanCfg.MaxPendingAmount,
|
MaxHTLC: newChan.LocalChanCfg.MaxPendingAmount,
|
||||||
@ -2048,7 +1963,7 @@ out:
|
|||||||
// Create the link and add it to the switch.
|
// Create the link and add it to the switch.
|
||||||
err = p.addLink(
|
err = p.addLink(
|
||||||
chanPoint, lnChan, forwardingPolicy,
|
chanPoint, lnChan, forwardingPolicy,
|
||||||
chainEvents, currentHeight, shouldReestablish,
|
chainEvents, shouldReestablish,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err := fmt.Errorf("can't register new channel "+
|
err := fmt.Errorf("can't register new channel "+
|
||||||
@ -2079,69 +1994,7 @@ out:
|
|||||||
// message from the remote peer, we'll use this message to
|
// message from the remote peer, we'll use this message to
|
||||||
// advance the chan closer state machine.
|
// advance the chan closer state machine.
|
||||||
case closeMsg := <-p.chanCloseMsgs:
|
case closeMsg := <-p.chanCloseMsgs:
|
||||||
// We'll now fetch the matching closing state machine
|
p.handleCloseMsg(closeMsg)
|
||||||
// in order to continue, or finalize the channel
|
|
||||||
// closure process.
|
|
||||||
chanCloser, err := p.fetchActiveChanCloser(closeMsg.cid)
|
|
||||||
if err != nil {
|
|
||||||
// If the channel is not known to us, we'll
|
|
||||||
// simply ignore this message.
|
|
||||||
if err == ErrChannelNotFound {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
peerLog.Errorf("Unable to respond to remote "+
|
|
||||||
"close msg: %v", err)
|
|
||||||
|
|
||||||
errMsg := &lnwire.Error{
|
|
||||||
ChanID: closeMsg.cid,
|
|
||||||
Data: lnwire.ErrorData(err.Error()),
|
|
||||||
}
|
|
||||||
p.queueMsg(errMsg, nil)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next, we'll process the next message using the
|
|
||||||
// target state machine. We'll either continue
|
|
||||||
// negotiation, or halt.
|
|
||||||
msgs, closeFin, err := chanCloser.ProcessCloseMsg(
|
|
||||||
closeMsg.msg,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("unable to process close "+
|
|
||||||
"msg: %v", err)
|
|
||||||
peerLog.Error(err)
|
|
||||||
|
|
||||||
// As the negotiations failed, we'll reset the
|
|
||||||
// channel state to ensure we act to on-chain
|
|
||||||
// events as normal.
|
|
||||||
chanCloser.Channel().ResetState()
|
|
||||||
|
|
||||||
if chanCloser.CloseRequest() != nil {
|
|
||||||
chanCloser.CloseRequest().Err <- err
|
|
||||||
}
|
|
||||||
delete(p.activeChanCloses, closeMsg.cid)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Queue any messages to the remote peer that need to
|
|
||||||
// be sent as a part of this latest round of
|
|
||||||
// negotiations.
|
|
||||||
for _, msg := range msgs {
|
|
||||||
p.queueMsg(msg, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we haven't finished close negotiations, then
|
|
||||||
// we'll continue as we can't yet finalize the closure.
|
|
||||||
if !closeFin {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, we've agreed on a closing fee! In this
|
|
||||||
// case, we'll wrap up the channel closure by notifying
|
|
||||||
// relevant sub-systems and launching a goroutine to
|
|
||||||
// wait for close tx conf.
|
|
||||||
p.finalizeChanClosure(chanCloser)
|
|
||||||
|
|
||||||
// The channel reannounce delay has elapsed, broadcast the
|
// The channel reannounce delay has elapsed, broadcast the
|
||||||
// reenabled channel updates to the network. This should only
|
// reenabled channel updates to the network. This should only
|
||||||
@ -2162,7 +2015,6 @@ out:
|
|||||||
reenableTimeout = nil
|
reenableTimeout = nil
|
||||||
|
|
||||||
case <-p.quit:
|
case <-p.quit:
|
||||||
|
|
||||||
// As, we've been signalled to exit, we'll reset all
|
// As, we've been signalled to exit, we'll reset all
|
||||||
// our active channel back to their default state.
|
// our active channel back to their default state.
|
||||||
p.activeChanMtx.Lock()
|
p.activeChanMtx.Lock()
|
||||||
@ -2185,7 +2037,7 @@ out:
|
|||||||
// peer, and reenables each public, non-pending channel. This is done at the
|
// peer, and reenables each public, non-pending channel. This is done at the
|
||||||
// gossip level by broadcasting a new ChannelUpdate with the disabled bit unset.
|
// gossip level by broadcasting a new ChannelUpdate with the disabled bit unset.
|
||||||
// No message will be sent if the channel is already enabled.
|
// No message will be sent if the channel is already enabled.
|
||||||
func (p *peer) reenableActiveChannels() {
|
func (p *Brontide) reenableActiveChannels() {
|
||||||
// First, filter all known channels with this peer for ones that are
|
// First, filter all known channels with this peer for ones that are
|
||||||
// both public and not pending.
|
// both public and not pending.
|
||||||
var activePublicChans []wire.OutPoint
|
var activePublicChans []wire.OutPoint
|
||||||
@ -2221,9 +2073,9 @@ func (p *peer) reenableActiveChannels() {
|
|||||||
// disabled bit to false and send out a new ChannelUpdate. If this
|
// disabled bit to false and send out a new ChannelUpdate. If this
|
||||||
// channel is already active, the update won't be sent.
|
// channel is already active, the update won't be sent.
|
||||||
for _, chanPoint := range activePublicChans {
|
for _, chanPoint := range activePublicChans {
|
||||||
err := p.server.chanStatusMgr.RequestEnable(chanPoint)
|
err := p.cfg.ChanStatusMgr.RequestEnable(chanPoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
srvrLog.Errorf("Unable to enable channel %v: %v",
|
peerLog.Errorf("Unable to enable channel %v: %v",
|
||||||
chanPoint, err)
|
chanPoint, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2233,7 +2085,7 @@ func (p *peer) reenableActiveChannels() {
|
|||||||
// for the target channel ID. If the channel isn't active an error is returned.
|
// for the target channel ID. If the channel isn't active an error is returned.
|
||||||
// Otherwise, either an existing state machine will be returned, or a new one
|
// Otherwise, either an existing state machine will be returned, or a new one
|
||||||
// will be created.
|
// will be created.
|
||||||
func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (
|
func (p *Brontide) fetchActiveChanCloser(chanID lnwire.ChannelID) (
|
||||||
*chancloser.ChanCloser, error) {
|
*chancloser.ChanCloser, error) {
|
||||||
|
|
||||||
// First, we'll ensure that we actually know of the target channel. If
|
// First, we'll ensure that we actually know of the target channel. If
|
||||||
@ -2278,14 +2130,14 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (
|
|||||||
// In order to begin fee negotiations, we'll first compute our
|
// In order to begin fee negotiations, we'll first compute our
|
||||||
// target ideal fee-per-kw. We'll set this to a lax value, as
|
// target ideal fee-per-kw. We'll set this to a lax value, as
|
||||||
// we weren't the ones that initiated the channel closure.
|
// we weren't the ones that initiated the channel closure.
|
||||||
feePerKw, err := p.server.cc.feeEstimator.EstimateFeePerKW(6)
|
feePerKw, err := p.cfg.FeeEstimator.EstimateFeePerKW(6)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
peerLog.Errorf("unable to query fee estimator: %v", err)
|
peerLog.Errorf("unable to query fee estimator: %v", err)
|
||||||
|
|
||||||
return nil, fmt.Errorf("unable to estimate fee")
|
return nil, fmt.Errorf("unable to estimate fee")
|
||||||
}
|
}
|
||||||
|
|
||||||
_, startingHeight, err := p.server.cc.chainIO.GetBestBlock()
|
_, startingHeight, err := p.cfg.ChainIO.GetBestBlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
peerLog.Errorf("unable to obtain best block: %v", err)
|
peerLog.Errorf("unable to obtain best block: %v", err)
|
||||||
return nil, fmt.Errorf("cannot obtain best block")
|
return nil, fmt.Errorf("cannot obtain best block")
|
||||||
@ -2294,11 +2146,11 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (
|
|||||||
chanCloser = chancloser.NewChanCloser(
|
chanCloser = chancloser.NewChanCloser(
|
||||||
chancloser.ChanCloseCfg{
|
chancloser.ChanCloseCfg{
|
||||||
Channel: channel,
|
Channel: channel,
|
||||||
UnregisterChannel: p.server.htlcSwitch.RemoveLink,
|
UnregisterChannel: p.cfg.Switch.RemoveLink,
|
||||||
BroadcastTx: p.server.cc.wallet.PublishTransaction,
|
BroadcastTx: p.cfg.Wallet.PublishTransaction,
|
||||||
DisableChannel: p.server.chanStatusMgr.RequestDisable,
|
DisableChannel: p.cfg.ChanStatusMgr.RequestDisable,
|
||||||
Disconnect: func() error {
|
Disconnect: func() error {
|
||||||
return p.server.DisconnectPeer(p.IdentityKey())
|
return p.cfg.DisconnectPeer(p.IdentityKey())
|
||||||
},
|
},
|
||||||
Quit: p.quit,
|
Quit: p.quit,
|
||||||
},
|
},
|
||||||
@ -2347,7 +2199,7 @@ func chooseDeliveryScript(upfront,
|
|||||||
|
|
||||||
// handleLocalCloseReq kicks-off the workflow to execute a cooperative or
|
// handleLocalCloseReq kicks-off the workflow to execute a cooperative or
|
||||||
// forced unilateral closure of the channel initiated by a local subsystem.
|
// forced unilateral closure of the channel initiated by a local subsystem.
|
||||||
func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) {
|
func (p *Brontide) handleLocalCloseReq(req *htlcswitch.ChanClose) {
|
||||||
chanID := lnwire.NewChanIDFromOutPoint(req.ChanPoint)
|
chanID := lnwire.NewChanIDFromOutPoint(req.ChanPoint)
|
||||||
|
|
||||||
p.activeChanMtx.RLock()
|
p.activeChanMtx.RLock()
|
||||||
@ -2400,7 +2252,7 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) {
|
|||||||
|
|
||||||
// Next, we'll create a new channel closer state machine to
|
// Next, we'll create a new channel closer state machine to
|
||||||
// handle the close negotiation.
|
// handle the close negotiation.
|
||||||
_, startingHeight, err := p.server.cc.chainIO.GetBestBlock()
|
_, startingHeight, err := p.cfg.ChainIO.GetBestBlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
peerLog.Errorf(err.Error())
|
peerLog.Errorf(err.Error())
|
||||||
req.Err <- err
|
req.Err <- err
|
||||||
@ -2410,11 +2262,11 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) {
|
|||||||
chanCloser := chancloser.NewChanCloser(
|
chanCloser := chancloser.NewChanCloser(
|
||||||
chancloser.ChanCloseCfg{
|
chancloser.ChanCloseCfg{
|
||||||
Channel: channel,
|
Channel: channel,
|
||||||
UnregisterChannel: p.server.htlcSwitch.RemoveLink,
|
UnregisterChannel: p.cfg.Switch.RemoveLink,
|
||||||
BroadcastTx: p.server.cc.wallet.PublishTransaction,
|
BroadcastTx: p.cfg.Wallet.PublishTransaction,
|
||||||
DisableChannel: p.server.chanStatusMgr.RequestDisable,
|
DisableChannel: p.cfg.ChanStatusMgr.RequestDisable,
|
||||||
Disconnect: func() error {
|
Disconnect: func() error {
|
||||||
return p.server.DisconnectPeer(p.IdentityKey())
|
return p.cfg.DisconnectPeer(p.IdentityKey())
|
||||||
},
|
},
|
||||||
Quit: p.quit,
|
Quit: p.quit,
|
||||||
},
|
},
|
||||||
@ -2453,10 +2305,10 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// linkFailureReport is sent to the channelManager whenever a link that was
|
// linkFailureReport is sent to the channelManager whenever a link reports a
|
||||||
// added to the switch reports a link failure, and is forced to exit. The report
|
// link failure, and is forced to exit. The report houses the necessary
|
||||||
// houses the necessary information to cleanup the channel state, send back the
|
// information to clean up the channel state, send back the error message, and
|
||||||
// error message, and force close if necessary.
|
// force close if necessary.
|
||||||
type linkFailureReport struct {
|
type linkFailureReport struct {
|
||||||
chanPoint wire.OutPoint
|
chanPoint wire.OutPoint
|
||||||
chanID lnwire.ChannelID
|
chanID lnwire.ChannelID
|
||||||
@ -2465,10 +2317,10 @@ type linkFailureReport struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// handleLinkFailure processes a link failure report when a link in the switch
|
// handleLinkFailure processes a link failure report when a link in the switch
|
||||||
// fails. It handles facilitates removal of all channel state within the peer,
|
// fails. It facilitates the removal of all channel state within the peer,
|
||||||
// force closing the channel depending on severity, and sending the error
|
// force closing the channel depending on severity, and sending the error
|
||||||
// message back to the remote party.
|
// message back to the remote party.
|
||||||
func (p *peer) handleLinkFailure(failure linkFailureReport) {
|
func (p *Brontide) handleLinkFailure(failure linkFailureReport) {
|
||||||
// We begin by wiping the link, which will remove it from the switch,
|
// We begin by wiping the link, which will remove it from the switch,
|
||||||
// such that it won't be attempted used for any more updates.
|
// such that it won't be attempted used for any more updates.
|
||||||
//
|
//
|
||||||
@ -2484,7 +2336,7 @@ func (p *peer) handleLinkFailure(failure linkFailureReport) {
|
|||||||
peerLog.Warnf("Force closing link(%v)",
|
peerLog.Warnf("Force closing link(%v)",
|
||||||
failure.shortChanID)
|
failure.shortChanID)
|
||||||
|
|
||||||
closeTx, err := p.server.chainArb.ForceCloseContract(
|
closeTx, err := p.cfg.ChainArb.ForceCloseContract(
|
||||||
failure.chanPoint,
|
failure.chanPoint,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2523,7 +2375,7 @@ func (p *peer) handleLinkFailure(failure linkFailureReport) {
|
|||||||
// machine should be passed in. Once the transaction has been sufficiently
|
// machine should be passed in. Once the transaction has been sufficiently
|
||||||
// confirmed, the channel will be marked as fully closed within the database,
|
// confirmed, the channel will be marked as fully closed within the database,
|
||||||
// and any clients will be notified of updates to the closing state.
|
// and any clients will be notified of updates to the closing state.
|
||||||
func (p *peer) finalizeChanClosure(chanCloser *chancloser.ChanCloser) {
|
func (p *Brontide) finalizeChanClosure(chanCloser *chancloser.ChanCloser) {
|
||||||
closeReq := chanCloser.CloseRequest()
|
closeReq := chanCloser.CloseRequest()
|
||||||
|
|
||||||
// First, we'll clear all indexes related to the channel in question.
|
// First, we'll clear all indexes related to the channel in question.
|
||||||
@ -2533,7 +2385,7 @@ func (p *peer) finalizeChanClosure(chanCloser *chancloser.ChanCloser) {
|
|||||||
// Next, we'll launch a goroutine which will request to be notified by
|
// Next, we'll launch a goroutine which will request to be notified by
|
||||||
// the ChainNotifier once the closure transaction obtains a single
|
// the ChainNotifier once the closure transaction obtains a single
|
||||||
// confirmation.
|
// confirmation.
|
||||||
notifier := p.server.cc.chainNotifier
|
notifier := p.cfg.ChainNotifier
|
||||||
|
|
||||||
// If any error happens during waitForChanToClose, forward it to
|
// If any error happens during waitForChanToClose, forward it to
|
||||||
// closeReq. If this channel closure is not locally initiated, closeReq
|
// closeReq. If this channel closure is not locally initiated, closeReq
|
||||||
@ -2556,18 +2408,18 @@ func (p *peer) finalizeChanClosure(chanCloser *chancloser.ChanCloser) {
|
|||||||
// If this is a locally requested shutdown, update the caller with a
|
// If this is a locally requested shutdown, update the caller with a
|
||||||
// new event detailing the current pending state of this request.
|
// new event detailing the current pending state of this request.
|
||||||
if closeReq != nil {
|
if closeReq != nil {
|
||||||
closeReq.Updates <- &pendingUpdate{
|
closeReq.Updates <- &PendingUpdate{
|
||||||
Txid: closingTxid[:],
|
Txid: closingTxid[:],
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
go waitForChanToClose(chanCloser.NegotiationHeight(), notifier, errChan,
|
go WaitForChanToClose(chanCloser.NegotiationHeight(), notifier, errChan,
|
||||||
chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, func() {
|
chanPoint, &closingTxid, closingTx.TxOut[0].PkScript, func() {
|
||||||
|
|
||||||
// Respond to the local subsystem which requested the
|
// Respond to the local subsystem which requested the
|
||||||
// channel closure.
|
// channel closure.
|
||||||
if closeReq != nil {
|
if closeReq != nil {
|
||||||
closeReq.Updates <- &channelCloseUpdate{
|
closeReq.Updates <- &ChannelCloseUpdate{
|
||||||
ClosingTxid: closingTxid[:],
|
ClosingTxid: closingTxid[:],
|
||||||
Success: true,
|
Success: true,
|
||||||
}
|
}
|
||||||
@ -2575,12 +2427,12 @@ func (p *peer) finalizeChanClosure(chanCloser *chancloser.ChanCloser) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// waitForChanToClose uses the passed notifier to wait until the channel has
|
// WaitForChanToClose uses the passed notifier to wait until the channel has
|
||||||
// been detected as closed on chain and then concludes by executing the
|
// been detected as closed on chain and then concludes by executing the
|
||||||
// following actions: the channel point will be sent over the settleChan, and
|
// following actions: the channel point will be sent over the settleChan, and
|
||||||
// finally the callback will be executed. If any error is encountered within
|
// finally the callback will be executed. If any error is encountered within
|
||||||
// the function, then it will be sent over the errChan.
|
// the function, then it will be sent over the errChan.
|
||||||
func waitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier,
|
func WaitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier,
|
||||||
errChan chan error, chanPoint *wire.OutPoint,
|
errChan chan error, chanPoint *wire.OutPoint,
|
||||||
closingTxID *chainhash.Hash, closeScript []byte, cb func()) {
|
closingTxID *chainhash.Hash, closeScript []byte, cb func()) {
|
||||||
|
|
||||||
@ -2617,8 +2469,8 @@ func waitForChanToClose(bestHeight uint32, notifier chainntnfs.ChainNotifier,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WipeChannel removes the passed channel point from all indexes associated with
|
// WipeChannel removes the passed channel point from all indexes associated with
|
||||||
// the peer, and the switch.
|
// the peer and the switch.
|
||||||
func (p *peer) WipeChannel(chanPoint *wire.OutPoint) {
|
func (p *Brontide) WipeChannel(chanPoint *wire.OutPoint) {
|
||||||
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
||||||
|
|
||||||
p.activeChanMtx.Lock()
|
p.activeChanMtx.Lock()
|
||||||
@ -2627,12 +2479,12 @@ func (p *peer) WipeChannel(chanPoint *wire.OutPoint) {
|
|||||||
|
|
||||||
// Instruct the HtlcSwitch to close this link as the channel is no
|
// Instruct the HtlcSwitch to close this link as the channel is no
|
||||||
// longer active.
|
// longer active.
|
||||||
p.server.htlcSwitch.RemoveLink(chanID)
|
p.cfg.Switch.RemoveLink(chanID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleInitMsg handles the incoming init message which contains global and
|
// handleInitMsg handles the incoming init message which contains global and
|
||||||
// local features vectors. If feature vectors are incompatible then disconnect.
|
// local feature vectors. If feature vectors are incompatible then disconnect.
|
||||||
func (p *peer) handleInitMsg(msg *lnwire.Init) error {
|
func (p *Brontide) handleInitMsg(msg *lnwire.Init) error {
|
||||||
// First, merge any features from the legacy global features field into
|
// First, merge any features from the legacy global features field into
|
||||||
// those presented in the local features fields.
|
// those presented in the local features fields.
|
||||||
err := msg.Features.Merge(msg.GlobalFeatures)
|
err := msg.Features.Merge(msg.GlobalFeatures)
|
||||||
@ -2641,7 +2493,7 @@ func (p *peer) handleInitMsg(msg *lnwire.Init) error {
|
|||||||
err)
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Then, finalize the remote feature vector providing the flatteneed
|
// Then, finalize the remote feature vector providing the flattened
|
||||||
// feature bit namespace.
|
// feature bit namespace.
|
||||||
p.remoteFeatures = lnwire.NewFeatureVector(
|
p.remoteFeatures = lnwire.NewFeatureVector(
|
||||||
msg.Features, lnwire.Features,
|
msg.Features, lnwire.Features,
|
||||||
@ -2654,8 +2506,8 @@ func (p *peer) handleInitMsg(msg *lnwire.Init) error {
|
|||||||
return fmt.Errorf("invalid remote features: %v", err)
|
return fmt.Errorf("invalid remote features: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure the remote party's feature vector contains all transistive
|
// Ensure the remote party's feature vector contains all transitive
|
||||||
// dependencies. We know ours are are correct since they are validated
|
// dependencies. We know ours are correct since they are validated
|
||||||
// during the feature manager's instantiation.
|
// during the feature manager's instantiation.
|
||||||
err = feature.ValidateDeps(p.remoteFeatures)
|
err = feature.ValidateDeps(p.remoteFeatures)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2664,8 +2516,7 @@ func (p *peer) handleInitMsg(msg *lnwire.Init) error {
|
|||||||
|
|
||||||
// Now that we know we understand their requirements, we'll check to
|
// Now that we know we understand their requirements, we'll check to
|
||||||
// see if they don't support anything that we deem to be mandatory.
|
// see if they don't support anything that we deem to be mandatory.
|
||||||
switch {
|
if !p.remoteFeatures.HasFeature(lnwire.DataLossProtectRequired) {
|
||||||
case !p.remoteFeatures.HasFeature(lnwire.DataLossProtectRequired):
|
|
||||||
return fmt.Errorf("data loss protection required")
|
return fmt.Errorf("data loss protection required")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2677,8 +2528,8 @@ func (p *peer) handleInitMsg(msg *lnwire.Init) error {
|
|||||||
// behavior off the set of negotiated feature bits.
|
// behavior off the set of negotiated feature bits.
|
||||||
//
|
//
|
||||||
// NOTE: Part of the lnpeer.Peer interface.
|
// NOTE: Part of the lnpeer.Peer interface.
|
||||||
func (p *peer) LocalFeatures() *lnwire.FeatureVector {
|
func (p *Brontide) LocalFeatures() *lnwire.FeatureVector {
|
||||||
return p.features
|
return p.cfg.Features
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoteFeatures returns the set of global features that has been advertised by
|
// RemoteFeatures returns the set of global features that has been advertised by
|
||||||
@ -2686,16 +2537,16 @@ func (p *peer) LocalFeatures() *lnwire.FeatureVector {
|
|||||||
// their behavior off the set of negotiated feature bits.
|
// their behavior off the set of negotiated feature bits.
|
||||||
//
|
//
|
||||||
// NOTE: Part of the lnpeer.Peer interface.
|
// NOTE: Part of the lnpeer.Peer interface.
|
||||||
func (p *peer) RemoteFeatures() *lnwire.FeatureVector {
|
func (p *Brontide) RemoteFeatures() *lnwire.FeatureVector {
|
||||||
return p.remoteFeatures
|
return p.remoteFeatures
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendInitMsg sends init message to remote peer which contains our currently
|
// sendInitMsg sends the Init message to the remote peer. This message contains our
|
||||||
// supported local and global features.
|
// currently supported local and global features.
|
||||||
func (p *peer) sendInitMsg() error {
|
func (p *Brontide) sendInitMsg() error {
|
||||||
msg := lnwire.NewInitMessage(
|
msg := lnwire.NewInitMessage(
|
||||||
p.legacyFeatures.RawFeatureVector,
|
p.cfg.LegacyFeatures.RawFeatureVector,
|
||||||
p.features.RawFeatureVector,
|
p.cfg.Features.RawFeatureVector,
|
||||||
)
|
)
|
||||||
|
|
||||||
return p.writeMessage(msg)
|
return p.writeMessage(msg)
|
||||||
@ -2703,7 +2554,7 @@ func (p *peer) sendInitMsg() error {
|
|||||||
|
|
||||||
// resendChanSyncMsg will attempt to find a channel sync message for the closed
|
// resendChanSyncMsg will attempt to find a channel sync message for the closed
|
||||||
// channel and resend it to our peer.
|
// channel and resend it to our peer.
|
||||||
func (p *peer) resendChanSyncMsg(cid lnwire.ChannelID) error {
|
func (p *Brontide) resendChanSyncMsg(cid lnwire.ChannelID) error {
|
||||||
// If we already re-sent the mssage for this channel, we won't do it
|
// If we already re-sent the mssage for this channel, we won't do it
|
||||||
// again.
|
// again.
|
||||||
if _, ok := p.resentChanSyncMsg[cid]; ok {
|
if _, ok := p.resentChanSyncMsg[cid]; ok {
|
||||||
@ -2711,7 +2562,7 @@ func (p *peer) resendChanSyncMsg(cid lnwire.ChannelID) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if we have any channel sync messages stored for this channel.
|
// Check if we have any channel sync messages stored for this channel.
|
||||||
c, err := p.server.chanDB.FetchClosedChannelForID(cid)
|
c, err := p.cfg.ChannelDB.FetchClosedChannelForID(cid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to fetch channel sync messages for "+
|
return fmt.Errorf("unable to fetch channel sync messages for "+
|
||||||
"peer %v: %v", p, err)
|
"peer %v: %v", p, err)
|
||||||
@ -2745,23 +2596,23 @@ func (p *peer) resendChanSyncMsg(cid lnwire.ChannelID) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendMessage sends a variadic number of high-priority message to remote peer.
|
// SendMessage sends a variadic number of high-priority messages to the remote
|
||||||
// The first argument denotes if the method should block until the messages have
|
// peer. The first argument denotes if the method should block until the
|
||||||
// been sent to the remote peer or an error is returned, otherwise it returns
|
// messages have been sent to the remote peer or an error is returned,
|
||||||
// immediately after queuing.
|
// otherwise it returns immediately after queuing.
|
||||||
//
|
//
|
||||||
// NOTE: Part of the lnpeer.Peer interface.
|
// NOTE: Part of the lnpeer.Peer interface.
|
||||||
func (p *peer) SendMessage(sync bool, msgs ...lnwire.Message) error {
|
func (p *Brontide) SendMessage(sync bool, msgs ...lnwire.Message) error {
|
||||||
return p.sendMessage(sync, true, msgs...)
|
return p.sendMessage(sync, true, msgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendMessageLazy sends a variadic number of low-priority message to remote
|
// SendMessageLazy sends a variadic number of low-priority messages to the
|
||||||
// peer. The first argument denotes if the method should block until the
|
// remote peer. The first argument denotes if the method should block until
|
||||||
// messages have been sent to the remote peer or an error is returned, otherwise
|
// the messages have been sent to the remote peer or an error is returned,
|
||||||
// it returns immediately after queueing.
|
// otherwise it returns immediately after queueing.
|
||||||
//
|
//
|
||||||
// NOTE: Part of the lnpeer.Peer interface.
|
// NOTE: Part of the lnpeer.Peer interface.
|
||||||
func (p *peer) SendMessageLazy(sync bool, msgs ...lnwire.Message) error {
|
func (p *Brontide) SendMessageLazy(sync bool, msgs ...lnwire.Message) error {
|
||||||
return p.sendMessage(sync, false, msgs...)
|
return p.sendMessage(sync, false, msgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2769,7 +2620,7 @@ func (p *peer) SendMessageLazy(sync bool, msgs ...lnwire.Message) error {
|
|||||||
// to the remote peer. If sync is true, this method will block until the
|
// to the remote peer. If sync is true, this method will block until the
|
||||||
// messages have been sent to the remote peer or an error is returned, otherwise
|
// messages have been sent to the remote peer or an error is returned, otherwise
|
||||||
// it returns immediately after queueing.
|
// it returns immediately after queueing.
|
||||||
func (p *peer) sendMessage(sync, priority bool, msgs ...lnwire.Message) error {
|
func (p *Brontide) sendMessage(sync, priority bool, msgs ...lnwire.Message) error {
|
||||||
// Add all incoming messages to the outgoing queue. A list of error
|
// Add all incoming messages to the outgoing queue. A list of error
|
||||||
// chans is populated for each message if the caller requested a sync
|
// chans is populated for each message if the caller requested a sync
|
||||||
// send.
|
// send.
|
||||||
@ -2801,7 +2652,7 @@ func (p *peer) sendMessage(sync, priority bool, msgs ...lnwire.Message) error {
|
|||||||
return err
|
return err
|
||||||
case <-p.quit:
|
case <-p.quit:
|
||||||
return lnpeer.ErrPeerExiting
|
return lnpeer.ErrPeerExiting
|
||||||
case <-p.server.quit:
|
case <-p.cfg.Quit:
|
||||||
return lnpeer.ErrPeerExiting
|
return lnpeer.ErrPeerExiting
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2812,29 +2663,29 @@ func (p *peer) sendMessage(sync, priority bool, msgs ...lnwire.Message) error {
|
|||||||
// PubKey returns the pubkey of the peer in compressed serialized format.
|
// PubKey returns the pubkey of the peer in compressed serialized format.
|
||||||
//
|
//
|
||||||
// NOTE: Part of the lnpeer.Peer interface.
|
// NOTE: Part of the lnpeer.Peer interface.
|
||||||
func (p *peer) PubKey() [33]byte {
|
func (p *Brontide) PubKey() [33]byte {
|
||||||
return p.pubKeyBytes
|
return p.cfg.PubKeyBytes
|
||||||
}
|
}
|
||||||
|
|
||||||
// IdentityKey returns the public key of the remote peer.
|
// IdentityKey returns the public key of the remote peer.
|
||||||
//
|
//
|
||||||
// NOTE: Part of the lnpeer.Peer interface.
|
// NOTE: Part of the lnpeer.Peer interface.
|
||||||
func (p *peer) IdentityKey() *btcec.PublicKey {
|
func (p *Brontide) IdentityKey() *btcec.PublicKey {
|
||||||
return p.addr.IdentityKey
|
return p.cfg.Addr.IdentityKey
|
||||||
}
|
}
|
||||||
|
|
||||||
// Address returns the network address of the remote peer.
|
// Address returns the network address of the remote peer.
|
||||||
//
|
//
|
||||||
// NOTE: Part of the lnpeer.Peer interface.
|
// NOTE: Part of the lnpeer.Peer interface.
|
||||||
func (p *peer) Address() net.Addr {
|
func (p *Brontide) Address() net.Addr {
|
||||||
return p.addr.Address
|
return p.cfg.Addr.Address
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddNewChannel adds a new channel to the peer. The channel should fail to be
|
// AddNewChannel adds a new channel to the peer. The channel should fail to be
|
||||||
// added if the cancel channel is closed.
|
// added if the cancel channel is closed.
|
||||||
//
|
//
|
||||||
// NOTE: Part of the lnpeer.Peer interface.
|
// NOTE: Part of the lnpeer.Peer interface.
|
||||||
func (p *peer) AddNewChannel(channel *channeldb.OpenChannel,
|
func (p *Brontide) AddNewChannel(channel *channeldb.OpenChannel,
|
||||||
cancel <-chan struct{}) error {
|
cancel <-chan struct{}) error {
|
||||||
|
|
||||||
errChan := make(chan error, 1)
|
errChan := make(chan error, 1)
|
||||||
@ -2863,16 +2714,126 @@ func (p *peer) AddNewChannel(channel *channeldb.OpenChannel,
|
|||||||
|
|
||||||
// StartTime returns the time at which the connection was established if the
|
// StartTime returns the time at which the connection was established if the
|
||||||
// peer started successfully, and zero otherwise.
|
// peer started successfully, and zero otherwise.
|
||||||
func (p *peer) StartTime() time.Time {
|
func (p *Brontide) StartTime() time.Time {
|
||||||
return p.startTime
|
return p.startTime
|
||||||
}
|
}
|
||||||
|
|
||||||
// LinkUpdater is an interface implemented by most messages in BOLT 2 that are
|
// handleCloseMsg is called when a new cooperative channel closure related
|
||||||
// allowed to update the channel state.
|
// message is received from the remote peer. We'll use this message to advance
|
||||||
type LinkUpdater interface {
|
// the chan closer state machine.
|
||||||
// TargetChanID returns the channel id of the link for which this
|
func (p *Brontide) handleCloseMsg(msg *closeMsg) {
|
||||||
// message is intended.
|
// We'll now fetch the matching closing state machine in order to continue,
|
||||||
TargetChanID() lnwire.ChannelID
|
// or finalize the channel closure process.
|
||||||
|
chanCloser, err := p.fetchActiveChanCloser(msg.cid)
|
||||||
|
if err != nil {
|
||||||
|
// If the channel is not known to us, we'll simply ignore this message.
|
||||||
|
if err == ErrChannelNotFound {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
peerLog.Errorf("Unable to respond to remote close msg: %v", err)
|
||||||
|
|
||||||
|
errMsg := &lnwire.Error{
|
||||||
|
ChanID: msg.cid,
|
||||||
|
Data: lnwire.ErrorData(err.Error()),
|
||||||
|
}
|
||||||
|
p.queueMsg(errMsg, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next, we'll process the next message using the target state machine.
|
||||||
|
// We'll either continue negotiation, or halt.
|
||||||
|
msgs, closeFin, err := chanCloser.ProcessCloseMsg(
|
||||||
|
msg.msg,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
err := fmt.Errorf("unable to process close msg: %v", err)
|
||||||
|
peerLog.Error(err)
|
||||||
|
|
||||||
|
// As the negotiations failed, we'll reset the channel state machine to
|
||||||
|
// ensure we act to on-chain events as normal.
|
||||||
|
chanCloser.Channel().ResetState()
|
||||||
|
|
||||||
|
if chanCloser.CloseRequest() != nil {
|
||||||
|
chanCloser.CloseRequest().Err <- err
|
||||||
|
}
|
||||||
|
delete(p.activeChanCloses, msg.cid)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queue any messages to the remote peer that need to be sent as a part of
|
||||||
|
// this latest round of negotiations.
|
||||||
|
for _, msg := range msgs {
|
||||||
|
p.queueMsg(msg, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we haven't finished close negotiations, then we'll continue as we
|
||||||
|
// can't yet finalize the closure.
|
||||||
|
if !closeFin {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise, we've agreed on a closing fee! In this case, we'll wrap up
|
||||||
|
// the channel closure by notifying relevant sub-systems and launching a
|
||||||
|
// goroutine to wait for close tx conf.
|
||||||
|
p.finalizeChanClosure(chanCloser)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(roasbeef): make all start/stop mutexes a CAS
|
// HandleLocalCloseChanReqs accepts a *htlcswitch.ChanClose and passes it onto
|
||||||
|
// the channelManager goroutine, which will shut down the link and possibly
|
||||||
|
// close the channel.
|
||||||
|
func (p *Brontide) HandleLocalCloseChanReqs(req *htlcswitch.ChanClose) {
|
||||||
|
select {
|
||||||
|
case p.localCloseChanReqs <- req:
|
||||||
|
peerLog.Infof("Local close channel request delivered to peer: %v",
|
||||||
|
p.PubKey())
|
||||||
|
case <-p.quit:
|
||||||
|
peerLog.Infof("Unable to deliver local close channel request to peer "+
|
||||||
|
"%x", p.PubKey())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetAddress returns the network of the remote peer as an lnwire.NetAddress.
|
||||||
|
func (p *Brontide) NetAddress() *lnwire.NetAddress {
|
||||||
|
return p.cfg.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inbound is a getter for the Brontide's Inbound boolean in cfg.
|
||||||
|
func (p *Brontide) Inbound() bool {
|
||||||
|
return p.cfg.Inbound
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnReq is a getter for the Brontide's connReq in cfg.
|
||||||
|
func (p *Brontide) ConnReq() *connmgr.ConnReq {
|
||||||
|
return p.cfg.ConnReq
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorBuffer is a getter for the Brontide's errorBuffer in cfg.
|
||||||
|
func (p *Brontide) ErrorBuffer() *queue.CircularBuffer {
|
||||||
|
return p.cfg.ErrorBuffer
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAddress sets the remote peer's address given an address.
|
||||||
|
func (p *Brontide) SetAddress(address net.Addr) {
|
||||||
|
p.cfg.Addr.Address = address
|
||||||
|
}
|
||||||
|
|
||||||
|
// ActiveSignal returns the peer's active signal.
|
||||||
|
func (p *Brontide) ActiveSignal() chan struct{} {
|
||||||
|
return p.activeSignal
|
||||||
|
}
|
||||||
|
|
||||||
|
// Conn returns a pointer to the peer's connection struct.
|
||||||
|
func (p *Brontide) Conn() net.Conn {
|
||||||
|
return p.cfg.Conn
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesReceived returns the number of bytes received from the peer.
|
||||||
|
func (p *Brontide) BytesReceived() uint64 {
|
||||||
|
return atomic.LoadUint64(&p.bytesReceived)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesSent returns the number of bytes sent to the peer.
|
||||||
|
func (p *Brontide) BytesSent() uint64 {
|
||||||
|
return atomic.LoadUint64(&p.bytesSent)
|
||||||
|
}
|
@ -1,19 +1,17 @@
|
|||||||
// +build !rpctest
|
package peer
|
||||||
|
|
||||||
package lnd
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
"github.com/btcsuite/btcd/txscript"
|
"github.com/btcsuite/btcd/txscript"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
"github.com/lightningnetwork/lnd/chainntnfs"
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||||
"github.com/lightningnetwork/lnd/channeldb"
|
"github.com/lightningnetwork/lnd/channeldb"
|
||||||
"github.com/lightningnetwork/lnd/htlcswitch"
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
||||||
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
|
||||||
"github.com/lightningnetwork/lnd/lnwallet/chancloser"
|
"github.com/lightningnetwork/lnd/lnwallet/chancloser"
|
||||||
"github.com/lightningnetwork/lnd/lnwire"
|
"github.com/lightningnetwork/lnd/lnwire"
|
||||||
)
|
)
|
||||||
@ -35,12 +33,12 @@ var (
|
|||||||
func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) {
|
func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
notifier := &mockNotfier{
|
notifier := &mockNotifier{
|
||||||
confChannel: make(chan *chainntnfs.TxConfirmation),
|
confChannel: make(chan *chainntnfs.TxConfirmation),
|
||||||
}
|
}
|
||||||
broadcastTxChan := make(chan *wire.MsgTx)
|
broadcastTxChan := make(chan *wire.MsgTx)
|
||||||
|
|
||||||
responder, responderChan, initiatorChan, cleanUp, err := createTestPeer(
|
alicePeer, bobChan, cleanUp, err := createTestPeer(
|
||||||
notifier, broadcastTxChan, noUpdate,
|
notifier, broadcastTxChan, noUpdate,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -48,19 +46,19 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer cleanUp()
|
defer cleanUp()
|
||||||
|
|
||||||
chanID := lnwire.NewChanIDFromOutPoint(responderChan.ChannelPoint())
|
chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint())
|
||||||
|
|
||||||
// We send a shutdown request to Alice. She will now be the responding
|
// We send a shutdown request to Alice. She will now be the responding
|
||||||
// node in this shutdown procedure. We first expect Alice to answer
|
// node in this shutdown procedure. We first expect Alice to answer
|
||||||
// this shutdown request with a Shutdown message.
|
// this shutdown request with a Shutdown message.
|
||||||
responder.chanCloseMsgs <- &closeMsg{
|
alicePeer.chanCloseMsgs <- &closeMsg{
|
||||||
cid: chanID,
|
cid: chanID,
|
||||||
msg: lnwire.NewShutdown(chanID, dummyDeliveryScript),
|
msg: lnwire.NewShutdown(chanID, dummyDeliveryScript),
|
||||||
}
|
}
|
||||||
|
|
||||||
var msg lnwire.Message
|
var msg lnwire.Message
|
||||||
select {
|
select {
|
||||||
case outMsg := <-responder.outgoingQueue:
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
msg = outMsg.msg
|
msg = outMsg.msg
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("did not receive shutdown message")
|
t.Fatalf("did not receive shutdown message")
|
||||||
@ -73,49 +71,61 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) {
|
|||||||
|
|
||||||
respDeliveryScript := shutdownMsg.Address
|
respDeliveryScript := shutdownMsg.Address
|
||||||
|
|
||||||
// Alice will thereafter send a ClosingSigned message, indicating her
|
// Alice will then send a ClosingSigned message, indicating her proposed
|
||||||
// proposed closing transaction fee.
|
// closing transaction fee. Alice sends the ClosingSigned message as she is
|
||||||
|
// the initiator of the channel.
|
||||||
select {
|
select {
|
||||||
case outMsg := <-responder.outgoingQueue:
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
msg = outMsg.msg
|
msg = outMsg.msg
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("did not receive ClosingSigned message")
|
t.Fatalf("did not receive ClosingSigned message")
|
||||||
}
|
}
|
||||||
|
|
||||||
responderClosingSigned, ok := msg.(*lnwire.ClosingSigned)
|
respClosingSigned, ok := msg.(*lnwire.ClosingSigned)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// We accept the fee, and send a ClosingSigned with the same fee back,
|
// We accept the fee, and send a ClosingSigned with the same fee back,
|
||||||
// so she knows we agreed.
|
// so she knows we agreed.
|
||||||
peerFee := responderClosingSigned.FeeSatoshis
|
aliceFee := respClosingSigned.FeeSatoshis
|
||||||
initiatorSig, _, _, err := initiatorChan.CreateCloseProposal(
|
bobSig, _, _, err := bobChan.CreateCloseProposal(
|
||||||
peerFee, dummyDeliveryScript, respDeliveryScript,
|
aliceFee, dummyDeliveryScript, respDeliveryScript,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating close proposal: %v", err)
|
t.Fatalf("error creating close proposal: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
parsedSig, err := lnwire.NewSigFromSignature(initiatorSig)
|
parsedSig, err := lnwire.NewSigFromSignature(bobSig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error parsing signature: %v", err)
|
t.Fatalf("error parsing signature: %v", err)
|
||||||
}
|
}
|
||||||
closingSigned := lnwire.NewClosingSigned(chanID, peerFee, parsedSig)
|
closingSigned := lnwire.NewClosingSigned(chanID, aliceFee, parsedSig)
|
||||||
responder.chanCloseMsgs <- &closeMsg{
|
alicePeer.chanCloseMsgs <- &closeMsg{
|
||||||
cid: chanID,
|
cid: chanID,
|
||||||
msg: closingSigned,
|
msg: closingSigned,
|
||||||
}
|
}
|
||||||
|
|
||||||
// The responder will now see that we agreed on the fee, and broadcast
|
// Alice should now see that we agreed on the fee, and should broadcast the
|
||||||
// the closing transaction.
|
// closing transaction.
|
||||||
select {
|
select {
|
||||||
case <-broadcastTxChan:
|
case <-broadcastTxChan:
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("closing tx not broadcast")
|
t.Fatalf("closing tx not broadcast")
|
||||||
}
|
}
|
||||||
|
|
||||||
// And the initiator should be waiting for a confirmation notification.
|
// Need to pull the remaining message off of Alice's outgoing queue.
|
||||||
|
select {
|
||||||
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
|
msg = outMsg.msg
|
||||||
|
case <-time.After(timeout):
|
||||||
|
t.Fatalf("did not receive ClosingSigned message")
|
||||||
|
}
|
||||||
|
if _, ok := msg.(*lnwire.ClosingSigned); !ok {
|
||||||
|
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alice should be waiting in a goroutine for a confirmation.
|
||||||
notifier.confChannel <- &chainntnfs.TxConfirmation{}
|
notifier.confChannel <- &chainntnfs.TxConfirmation{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,12 +134,12 @@ func TestPeerChannelClosureAcceptFeeResponder(t *testing.T) {
|
|||||||
func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) {
|
func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
notifier := &mockNotfier{
|
notifier := &mockNotifier{
|
||||||
confChannel: make(chan *chainntnfs.TxConfirmation),
|
confChannel: make(chan *chainntnfs.TxConfirmation),
|
||||||
}
|
}
|
||||||
broadcastTxChan := make(chan *wire.MsgTx)
|
broadcastTxChan := make(chan *wire.MsgTx)
|
||||||
|
|
||||||
initiator, initiatorChan, responderChan, cleanUp, err := createTestPeer(
|
alicePeer, bobChan, cleanUp, err := createTestPeer(
|
||||||
notifier, broadcastTxChan, noUpdate,
|
notifier, broadcastTxChan, noUpdate,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -137,22 +147,22 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer cleanUp()
|
defer cleanUp()
|
||||||
|
|
||||||
// We make the initiator send a shutdown request.
|
// We make Alice send a shutdown request.
|
||||||
updateChan := make(chan interface{}, 1)
|
updateChan := make(chan interface{}, 1)
|
||||||
errChan := make(chan error, 1)
|
errChan := make(chan error, 1)
|
||||||
closeCommand := &htlcswitch.ChanClose{
|
closeCommand := &htlcswitch.ChanClose{
|
||||||
CloseType: htlcswitch.CloseRegular,
|
CloseType: htlcswitch.CloseRegular,
|
||||||
ChanPoint: initiatorChan.ChannelPoint(),
|
ChanPoint: bobChan.ChannelPoint(),
|
||||||
Updates: updateChan,
|
Updates: updateChan,
|
||||||
TargetFeePerKw: 12500,
|
TargetFeePerKw: 12500,
|
||||||
Err: errChan,
|
Err: errChan,
|
||||||
}
|
}
|
||||||
initiator.localCloseChanReqs <- closeCommand
|
alicePeer.localCloseChanReqs <- closeCommand
|
||||||
|
|
||||||
// We should now be getting the shutdown request.
|
// We can now pull a Shutdown message off of Alice's outgoingQueue.
|
||||||
var msg lnwire.Message
|
var msg lnwire.Message
|
||||||
select {
|
select {
|
||||||
case outMsg := <-initiator.outgoingQueue:
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
msg = outMsg.msg
|
msg = outMsg.msg
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("did not receive shutdown request")
|
t.Fatalf("did not receive shutdown request")
|
||||||
@ -163,68 +173,78 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) {
|
|||||||
t.Fatalf("expected Shutdown message, got %T", msg)
|
t.Fatalf("expected Shutdown message, got %T", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
initiatorDeliveryScript := shutdownMsg.Address
|
aliceDeliveryScript := shutdownMsg.Address
|
||||||
|
|
||||||
// We'll answer the shutdown message with our own Shutdown, and then a
|
// Bob will respond with his own Shutdown message.
|
||||||
// ClosingSigned message.
|
|
||||||
chanID := shutdownMsg.ChannelID
|
chanID := shutdownMsg.ChannelID
|
||||||
initiator.chanCloseMsgs <- &closeMsg{
|
alicePeer.chanCloseMsgs <- &closeMsg{
|
||||||
cid: chanID,
|
cid: chanID,
|
||||||
msg: lnwire.NewShutdown(chanID,
|
msg: lnwire.NewShutdown(chanID,
|
||||||
dummyDeliveryScript),
|
dummyDeliveryScript),
|
||||||
}
|
}
|
||||||
|
|
||||||
estimator := chainfee.NewStaticEstimator(12500, 0)
|
// Alice will reply with a ClosingSigned here.
|
||||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
select {
|
||||||
if err != nil {
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
t.Fatalf("unable to query fee estimator: %v", err)
|
msg = outMsg.msg
|
||||||
|
case <-time.After(timeout):
|
||||||
|
t.Fatalf("did not receive closing signed message")
|
||||||
}
|
}
|
||||||
fee := responderChan.CalcFee(feePerKw)
|
closingSignedMsg, ok := msg.(*lnwire.ClosingSigned)
|
||||||
closeSig, _, _, err := responderChan.CreateCloseProposal(fee,
|
if !ok {
|
||||||
dummyDeliveryScript, initiatorDeliveryScript)
|
t.Fatalf("expected to receive closing signed message, got %T", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bob should reply with the exact same fee in his next ClosingSigned
|
||||||
|
// message.
|
||||||
|
bobFee := closingSignedMsg.FeeSatoshis
|
||||||
|
bobSig, _, _, err := bobChan.CreateCloseProposal(
|
||||||
|
bobFee, dummyDeliveryScript, aliceDeliveryScript,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create close proposal: %v", err)
|
t.Fatalf("unable to create close proposal: %v", err)
|
||||||
}
|
}
|
||||||
parsedSig, err := lnwire.NewSigFromSignature(closeSig)
|
parsedSig, err := lnwire.NewSigFromSignature(bobSig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to parse signature: %v", err)
|
t.Fatalf("unable to parse signature: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
closingSigned := lnwire.NewClosingSigned(shutdownMsg.ChannelID,
|
closingSigned := lnwire.NewClosingSigned(shutdownMsg.ChannelID,
|
||||||
fee, parsedSig)
|
bobFee, parsedSig)
|
||||||
initiator.chanCloseMsgs <- &closeMsg{
|
alicePeer.chanCloseMsgs <- &closeMsg{
|
||||||
cid: chanID,
|
cid: chanID,
|
||||||
msg: closingSigned,
|
msg: closingSigned,
|
||||||
}
|
}
|
||||||
|
|
||||||
// And we expect the initiator to accept the fee, and broadcast the
|
// Alice should accept Bob's fee, broadcast the cooperative close tx, and
|
||||||
// closing transaction.
|
// send a ClosingSigned message back to Bob.
|
||||||
select {
|
|
||||||
case outMsg := <-initiator.outgoingQueue:
|
|
||||||
msg = outMsg.msg
|
|
||||||
case <-time.After(timeout):
|
|
||||||
t.Fatalf("did not receive closing signed message")
|
|
||||||
}
|
|
||||||
|
|
||||||
closingSignedMsg, ok := msg.(*lnwire.ClosingSigned)
|
// Alice should now broadcast the closing transaction.
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
if closingSignedMsg.FeeSatoshis != fee {
|
|
||||||
t.Fatalf("expected ClosingSigned fee to be %v, instead got %v",
|
|
||||||
fee, closingSignedMsg.FeeSatoshis)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The initiator will now see that we agreed on the fee, and broadcast
|
|
||||||
// the closing transaction.
|
|
||||||
select {
|
select {
|
||||||
case <-broadcastTxChan:
|
case <-broadcastTxChan:
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("closing tx not broadcast")
|
t.Fatalf("closing tx not broadcast")
|
||||||
}
|
}
|
||||||
|
|
||||||
// And the initiator should be waiting for a confirmation notification.
|
// Alice should respond with the ClosingSigned they both agreed upon.
|
||||||
|
select {
|
||||||
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
|
msg = outMsg.msg
|
||||||
|
case <-time.After(timeout):
|
||||||
|
t.Fatalf("did not receive closing signed message")
|
||||||
|
}
|
||||||
|
|
||||||
|
closingSignedMsg, ok = msg.(*lnwire.ClosingSigned)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
if closingSignedMsg.FeeSatoshis != bobFee {
|
||||||
|
t.Fatalf("expected ClosingSigned fee to be %v, instead got %v",
|
||||||
|
bobFee, closingSignedMsg.FeeSatoshis)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alice should be waiting on a single confirmation for the coop close tx.
|
||||||
notifier.confChannel <- &chainntnfs.TxConfirmation{}
|
notifier.confChannel <- &chainntnfs.TxConfirmation{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -234,12 +254,12 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) {
|
|||||||
func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
|
func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
notifier := &mockNotfier{
|
notifier := &mockNotifier{
|
||||||
confChannel: make(chan *chainntnfs.TxConfirmation),
|
confChannel: make(chan *chainntnfs.TxConfirmation),
|
||||||
}
|
}
|
||||||
broadcastTxChan := make(chan *wire.MsgTx)
|
broadcastTxChan := make(chan *wire.MsgTx)
|
||||||
|
|
||||||
responder, responderChan, initiatorChan, cleanUp, err := createTestPeer(
|
alicePeer, bobChan, cleanUp, err := createTestPeer(
|
||||||
notifier, broadcastTxChan, noUpdate,
|
notifier, broadcastTxChan, noUpdate,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -247,12 +267,12 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer cleanUp()
|
defer cleanUp()
|
||||||
|
|
||||||
chanID := lnwire.NewChanIDFromOutPoint(responderChan.ChannelPoint())
|
chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint())
|
||||||
|
|
||||||
// We send a shutdown request to Alice. She will now be the responding
|
// Bob sends a shutdown request to Alice. She will now be the responding
|
||||||
// node in this shutdown procedure. We first expect Alice to answer
|
// node in this shutdown procedure. We first expect Alice to answer this
|
||||||
// this shutdown request with a Shutdown message.
|
// Shutdown request with a Shutdown message.
|
||||||
responder.chanCloseMsgs <- &closeMsg{
|
alicePeer.chanCloseMsgs <- &closeMsg{
|
||||||
cid: chanID,
|
cid: chanID,
|
||||||
msg: lnwire.NewShutdown(chanID,
|
msg: lnwire.NewShutdown(chanID,
|
||||||
dummyDeliveryScript),
|
dummyDeliveryScript),
|
||||||
@ -260,7 +280,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
|
|||||||
|
|
||||||
var msg lnwire.Message
|
var msg lnwire.Message
|
||||||
select {
|
select {
|
||||||
case outMsg := <-responder.outgoingQueue:
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
msg = outMsg.msg
|
msg = outMsg.msg
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("did not receive shutdown message")
|
t.Fatalf("did not receive shutdown message")
|
||||||
@ -271,140 +291,152 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
|
|||||||
t.Fatalf("expected Shutdown message, got %T", msg)
|
t.Fatalf("expected Shutdown message, got %T", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
respDeliveryScript := shutdownMsg.Address
|
aliceDeliveryScript := shutdownMsg.Address
|
||||||
|
|
||||||
// Alice will thereafter send a ClosingSigned message, indicating her
|
// As Alice is the channel initiator, she will send her ClosingSigned
|
||||||
// proposed closing transaction fee.
|
// message.
|
||||||
select {
|
select {
|
||||||
case outMsg := <-responder.outgoingQueue:
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
msg = outMsg.msg
|
msg = outMsg.msg
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("did not receive closing signed message")
|
t.Fatalf("did not receive closing signed message")
|
||||||
}
|
}
|
||||||
|
|
||||||
responderClosingSigned, ok := msg.(*lnwire.ClosingSigned)
|
aliceClosingSigned, ok := msg.(*lnwire.ClosingSigned)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// We don't agree with the fee, and will send back one that's 2.5x.
|
// Bob doesn't agree with the fee and will send one back that's 2.5x.
|
||||||
preferredRespFee := responderClosingSigned.FeeSatoshis
|
preferredRespFee := aliceClosingSigned.FeeSatoshis
|
||||||
increasedFee := btcutil.Amount(float64(preferredRespFee) * 2.5)
|
increasedFee := btcutil.Amount(float64(preferredRespFee) * 2.5)
|
||||||
initiatorSig, _, _, err := initiatorChan.CreateCloseProposal(
|
bobSig, _, _, err := bobChan.CreateCloseProposal(
|
||||||
increasedFee, dummyDeliveryScript, respDeliveryScript,
|
increasedFee, dummyDeliveryScript, aliceDeliveryScript,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating close proposal: %v", err)
|
t.Fatalf("error creating close proposal: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
parsedSig, err := lnwire.NewSigFromSignature(initiatorSig)
|
parsedSig, err := lnwire.NewSigFromSignature(bobSig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error parsing signature: %v", err)
|
t.Fatalf("error parsing signature: %v", err)
|
||||||
}
|
}
|
||||||
closingSigned := lnwire.NewClosingSigned(chanID, increasedFee, parsedSig)
|
closingSigned := lnwire.NewClosingSigned(chanID, increasedFee, parsedSig)
|
||||||
responder.chanCloseMsgs <- &closeMsg{
|
alicePeer.chanCloseMsgs <- &closeMsg{
|
||||||
cid: chanID,
|
cid: chanID,
|
||||||
msg: closingSigned,
|
msg: closingSigned,
|
||||||
}
|
}
|
||||||
|
|
||||||
// The responder will see the new fee we propose, but with current
|
// Alice will now see the new fee we propose, but with current settings it
|
||||||
// settings it won't accept it immediately as it differs too much by
|
// won't accept it immediately as it differs too much by its ideal fee. We
|
||||||
// its ideal fee. We should get a new proposal back, which should have
|
// should get a new proposal back, which should have the average fee rate
|
||||||
// the average fee rate proposed.
|
// proposed.
|
||||||
select {
|
select {
|
||||||
case outMsg := <-responder.outgoingQueue:
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
msg = outMsg.msg
|
msg = outMsg.msg
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("did not receive closing signed message")
|
t.Fatalf("did not receive closing signed message")
|
||||||
}
|
}
|
||||||
|
|
||||||
responderClosingSigned, ok = msg.(*lnwire.ClosingSigned)
|
aliceClosingSigned, ok = msg.(*lnwire.ClosingSigned)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The fee sent by the responder should be less than the fee we just
|
// The fee sent by Alice should be less than the fee Bob just sent as Alice
|
||||||
// sent as it should attempt to compromise.
|
// should attempt to compromise.
|
||||||
peerFee := responderClosingSigned.FeeSatoshis
|
aliceFee := aliceClosingSigned.FeeSatoshis
|
||||||
if peerFee > increasedFee {
|
if aliceFee > increasedFee {
|
||||||
t.Fatalf("new fee should be less than our fee: new=%v, "+
|
t.Fatalf("new fee should be less than our fee: new=%v, "+
|
||||||
"prior=%v", peerFee, increasedFee)
|
"prior=%v", aliceFee, increasedFee)
|
||||||
}
|
}
|
||||||
lastFeeResponder := peerFee
|
lastFeeResponder := aliceFee
|
||||||
|
|
||||||
// We try negotiating a 2.1x fee, which should also be rejected.
|
// We try negotiating a 2.1x fee, which should also be rejected.
|
||||||
increasedFee = btcutil.Amount(float64(preferredRespFee) * 2.1)
|
increasedFee = btcutil.Amount(float64(preferredRespFee) * 2.1)
|
||||||
initiatorSig, _, _, err = initiatorChan.CreateCloseProposal(
|
bobSig, _, _, err = bobChan.CreateCloseProposal(
|
||||||
increasedFee, dummyDeliveryScript, respDeliveryScript,
|
increasedFee, dummyDeliveryScript, aliceDeliveryScript,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating close proposal: %v", err)
|
t.Fatalf("error creating close proposal: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
parsedSig, err = lnwire.NewSigFromSignature(initiatorSig)
|
parsedSig, err = lnwire.NewSigFromSignature(bobSig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error parsing signature: %v", err)
|
t.Fatalf("error parsing signature: %v", err)
|
||||||
}
|
}
|
||||||
closingSigned = lnwire.NewClosingSigned(chanID, increasedFee, parsedSig)
|
closingSigned = lnwire.NewClosingSigned(chanID, increasedFee, parsedSig)
|
||||||
responder.chanCloseMsgs <- &closeMsg{
|
alicePeer.chanCloseMsgs <- &closeMsg{
|
||||||
cid: chanID,
|
cid: chanID,
|
||||||
msg: closingSigned,
|
msg: closingSigned,
|
||||||
}
|
}
|
||||||
|
|
||||||
// It still won't be accepted, and we should get a new proposal, the
|
// Bob's latest proposal still won't be accepted and Alice should send over
|
||||||
// average of what we proposed, and what they proposed last time.
|
// a new ClosingSigned message. It should be the average of what Bob and
|
||||||
|
// Alice each proposed last time.
|
||||||
select {
|
select {
|
||||||
case outMsg := <-responder.outgoingQueue:
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
msg = outMsg.msg
|
msg = outMsg.msg
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("did not receive closing signed message")
|
t.Fatalf("did not receive closing signed message")
|
||||||
}
|
}
|
||||||
|
|
||||||
responderClosingSigned, ok = msg.(*lnwire.ClosingSigned)
|
aliceClosingSigned, ok = msg.(*lnwire.ClosingSigned)
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The peer should inch towards our fee, in order to compromise.
|
// Alice should inch towards Bob's fee, in order to compromise.
|
||||||
// Additionally, this fee should be less than the fee we sent prior.
|
// Additionally, this fee should be less than the fee Bob sent before.
|
||||||
peerFee = responderClosingSigned.FeeSatoshis
|
aliceFee = aliceClosingSigned.FeeSatoshis
|
||||||
if peerFee < lastFeeResponder {
|
if aliceFee < lastFeeResponder {
|
||||||
t.Fatalf("new fee should be greater than prior: new=%v, "+
|
t.Fatalf("new fee should be greater than prior: new=%v, "+
|
||||||
"prior=%v", peerFee, lastFeeResponder)
|
"prior=%v", aliceFee, lastFeeResponder)
|
||||||
}
|
}
|
||||||
if peerFee > increasedFee {
|
if aliceFee > increasedFee {
|
||||||
t.Fatalf("new fee should be less than our fee: new=%v, "+
|
t.Fatalf("new fee should be less than Bob's fee: new=%v, "+
|
||||||
"prior=%v", peerFee, increasedFee)
|
"prior=%v", aliceFee, increasedFee)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally, we'll accept the fee by echoing back the same fee that they
|
// Finally, Bob will accept the fee by echoing back the same fee that Alice
|
||||||
// sent to us.
|
// just sent over.
|
||||||
initiatorSig, _, _, err = initiatorChan.CreateCloseProposal(
|
bobSig, _, _, err = bobChan.CreateCloseProposal(
|
||||||
peerFee, dummyDeliveryScript, respDeliveryScript,
|
aliceFee, dummyDeliveryScript, aliceDeliveryScript,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating close proposal: %v", err)
|
t.Fatalf("error creating close proposal: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
parsedSig, err = lnwire.NewSigFromSignature(initiatorSig)
|
parsedSig, err = lnwire.NewSigFromSignature(bobSig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error parsing signature: %v", err)
|
t.Fatalf("error parsing signature: %v", err)
|
||||||
}
|
}
|
||||||
closingSigned = lnwire.NewClosingSigned(chanID, peerFee, parsedSig)
|
closingSigned = lnwire.NewClosingSigned(chanID, aliceFee, parsedSig)
|
||||||
responder.chanCloseMsgs <- &closeMsg{
|
alicePeer.chanCloseMsgs <- &closeMsg{
|
||||||
cid: chanID,
|
cid: chanID,
|
||||||
msg: closingSigned,
|
msg: closingSigned,
|
||||||
}
|
}
|
||||||
|
|
||||||
// The responder will now see that we agreed on the fee, and broadcast
|
// Alice will now see that Bob agreed on the fee, and broadcast the coop
|
||||||
// the closing transaction.
|
// close transaction.
|
||||||
select {
|
select {
|
||||||
case <-broadcastTxChan:
|
case <-broadcastTxChan:
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("closing tx not broadcast")
|
t.Fatalf("closing tx not broadcast")
|
||||||
}
|
}
|
||||||
|
|
||||||
// And the responder should be waiting for a confirmation notification.
|
// Alice should respond with the ClosingSigned they both agreed upon.
|
||||||
|
select {
|
||||||
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
|
msg = outMsg.msg
|
||||||
|
case <-time.After(timeout):
|
||||||
|
t.Fatalf("did not receive closing signed message")
|
||||||
|
}
|
||||||
|
if _, ok := msg.(*lnwire.ClosingSigned); !ok {
|
||||||
|
t.Fatalf("expected to receive closing signed message, got %T", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alice should be waiting on a single confirmation for the coop close tx.
|
||||||
notifier.confChannel <- &chainntnfs.TxConfirmation{}
|
notifier.confChannel <- &chainntnfs.TxConfirmation{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -414,12 +446,12 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
|
|||||||
func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
|
func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
notifier := &mockNotfier{
|
notifier := &mockNotifier{
|
||||||
confChannel: make(chan *chainntnfs.TxConfirmation),
|
confChannel: make(chan *chainntnfs.TxConfirmation),
|
||||||
}
|
}
|
||||||
broadcastTxChan := make(chan *wire.MsgTx)
|
broadcastTxChan := make(chan *wire.MsgTx)
|
||||||
|
|
||||||
initiator, initiatorChan, responderChan, cleanUp, err := createTestPeer(
|
alicePeer, bobChan, cleanUp, err := createTestPeer(
|
||||||
notifier, broadcastTxChan, noUpdate,
|
notifier, broadcastTxChan, noUpdate,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -432,18 +464,18 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
|
|||||||
errChan := make(chan error, 1)
|
errChan := make(chan error, 1)
|
||||||
closeCommand := &htlcswitch.ChanClose{
|
closeCommand := &htlcswitch.ChanClose{
|
||||||
CloseType: htlcswitch.CloseRegular,
|
CloseType: htlcswitch.CloseRegular,
|
||||||
ChanPoint: initiatorChan.ChannelPoint(),
|
ChanPoint: bobChan.ChannelPoint(),
|
||||||
Updates: updateChan,
|
Updates: updateChan,
|
||||||
TargetFeePerKw: 12500,
|
TargetFeePerKw: 12500,
|
||||||
Err: errChan,
|
Err: errChan,
|
||||||
}
|
}
|
||||||
|
|
||||||
initiator.localCloseChanReqs <- closeCommand
|
alicePeer.localCloseChanReqs <- closeCommand
|
||||||
|
|
||||||
// We should now be getting the shutdown request.
|
// Alice should now send a Shutdown request to Bob.
|
||||||
var msg lnwire.Message
|
var msg lnwire.Message
|
||||||
select {
|
select {
|
||||||
case outMsg := <-initiator.outgoingQueue:
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
msg = outMsg.msg
|
msg = outMsg.msg
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("did not receive shutdown request")
|
t.Fatalf("did not receive shutdown request")
|
||||||
@ -454,47 +486,20 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
|
|||||||
t.Fatalf("expected Shutdown message, got %T", msg)
|
t.Fatalf("expected Shutdown message, got %T", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
initiatorDeliveryScript := shutdownMsg.Address
|
aliceDeliveryScript := shutdownMsg.Address
|
||||||
|
|
||||||
// We'll answer the shutdown message with our own Shutdown, and then a
|
// Bob will answer the Shutdown message with his own Shutdown.
|
||||||
// ClosingSigned message.
|
chanID := lnwire.NewChanIDFromOutPoint(bobChan.ChannelPoint())
|
||||||
chanID := lnwire.NewChanIDFromOutPoint(initiatorChan.ChannelPoint())
|
|
||||||
respShutdown := lnwire.NewShutdown(chanID, dummyDeliveryScript)
|
respShutdown := lnwire.NewShutdown(chanID, dummyDeliveryScript)
|
||||||
initiator.chanCloseMsgs <- &closeMsg{
|
alicePeer.chanCloseMsgs <- &closeMsg{
|
||||||
cid: chanID,
|
cid: chanID,
|
||||||
msg: respShutdown,
|
msg: respShutdown,
|
||||||
}
|
}
|
||||||
|
|
||||||
estimator := chainfee.NewStaticEstimator(12500, 0)
|
// Alice should now respond with a ClosingSigned message with her ideal
|
||||||
initiatorIdealFeeRate, err := estimator.EstimateFeePerKW(1)
|
// fee rate.
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to query fee estimator: %v", err)
|
|
||||||
}
|
|
||||||
initiatorIdealFee := responderChan.CalcFee(initiatorIdealFeeRate)
|
|
||||||
increasedFee := btcutil.Amount(float64(initiatorIdealFee) * 2.5)
|
|
||||||
closeSig, _, _, err := responderChan.CreateCloseProposal(
|
|
||||||
increasedFee, dummyDeliveryScript, initiatorDeliveryScript,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to create close proposal: %v", err)
|
|
||||||
}
|
|
||||||
parsedSig, err := lnwire.NewSigFromSignature(closeSig)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to parse signature: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
closingSigned := lnwire.NewClosingSigned(
|
|
||||||
shutdownMsg.ChannelID, increasedFee, parsedSig,
|
|
||||||
)
|
|
||||||
initiator.chanCloseMsgs <- &closeMsg{
|
|
||||||
cid: chanID,
|
|
||||||
msg: closingSigned,
|
|
||||||
}
|
|
||||||
|
|
||||||
// We should get two closing signed messages, the first will be the
|
|
||||||
// ideal fee sent by the initiator in response to our shutdown request.
|
|
||||||
select {
|
select {
|
||||||
case outMsg := <-initiator.outgoingQueue:
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
msg = outMsg.msg
|
msg = outMsg.msg
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("did not receive closing signed")
|
t.Fatalf("did not receive closing signed")
|
||||||
@ -503,16 +508,35 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
|
|||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
||||||
}
|
}
|
||||||
if closingSignedMsg.FeeSatoshis != initiatorIdealFee {
|
|
||||||
t.Fatalf("expected ClosingSigned fee to be %v, instead got %v",
|
|
||||||
initiatorIdealFee, closingSignedMsg.FeeSatoshis)
|
|
||||||
}
|
|
||||||
lastFeeSent := closingSignedMsg.FeeSatoshis
|
|
||||||
|
|
||||||
// The second message should be the compromise fee sent in response to
|
idealFeeRate := closingSignedMsg.FeeSatoshis
|
||||||
// them receiving our fee proposal.
|
lastReceivedFee := idealFeeRate
|
||||||
|
|
||||||
|
increasedFee := btcutil.Amount(float64(idealFeeRate) * 2.1)
|
||||||
|
lastSentFee := increasedFee
|
||||||
|
|
||||||
|
bobSig, _, _, err := bobChan.CreateCloseProposal(
|
||||||
|
increasedFee, dummyDeliveryScript, aliceDeliveryScript,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error creating close proposal: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedSig, err := lnwire.NewSigFromSignature(bobSig)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to parse signature: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
closingSigned := lnwire.NewClosingSigned(chanID, increasedFee, parsedSig)
|
||||||
|
alicePeer.chanCloseMsgs <- &closeMsg{
|
||||||
|
cid: chanID,
|
||||||
|
msg: closingSigned,
|
||||||
|
}
|
||||||
|
|
||||||
|
// It still won't be accepted, and we should get a new proposal, the
|
||||||
|
// average of what we proposed, and what they proposed last time.
|
||||||
select {
|
select {
|
||||||
case outMsg := <-initiator.outgoingQueue:
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
msg = outMsg.msg
|
msg = outMsg.msg
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("did not receive closing signed")
|
t.Fatalf("did not receive closing signed")
|
||||||
@ -522,80 +546,79 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
|
|||||||
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The peer should inch towards our fee, in order to compromise.
|
aliceFee := closingSignedMsg.FeeSatoshis
|
||||||
// Additionally, this fee should be less than the fee we sent prior.
|
if aliceFee < lastReceivedFee {
|
||||||
peerFee := closingSignedMsg.FeeSatoshis
|
t.Fatalf("new fee should be greater than prior: new=%v, old=%v",
|
||||||
if peerFee < lastFeeSent {
|
aliceFee, lastReceivedFee)
|
||||||
t.Fatalf("new fee should be greater than prior: new=%v, "+
|
|
||||||
"prior=%v", peerFee, lastFeeSent)
|
|
||||||
}
|
}
|
||||||
if peerFee > increasedFee {
|
if aliceFee > lastSentFee {
|
||||||
t.Fatalf("new fee should be less than our fee: new=%v, "+
|
t.Fatalf("new fee should be less than our fee: new=%v, old=%v",
|
||||||
"prior=%v", peerFee, increasedFee)
|
aliceFee, lastSentFee)
|
||||||
}
|
}
|
||||||
lastFeeSent = closingSignedMsg.FeeSatoshis
|
|
||||||
|
|
||||||
// We try negotiating a 2.1x fee, which should also be rejected.
|
lastReceivedFee = aliceFee
|
||||||
increasedFee = btcutil.Amount(float64(initiatorIdealFee) * 2.1)
|
|
||||||
responderSig, _, _, err := responderChan.CreateCloseProposal(
|
// We'll try negotiating a 1.5x fee, which should also be rejected.
|
||||||
increasedFee, dummyDeliveryScript, initiatorDeliveryScript,
|
increasedFee = btcutil.Amount(float64(idealFeeRate) * 1.5)
|
||||||
|
lastSentFee = increasedFee
|
||||||
|
|
||||||
|
bobSig, _, _, err = bobChan.CreateCloseProposal(
|
||||||
|
increasedFee, dummyDeliveryScript, aliceDeliveryScript,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating close proposal: %v", err)
|
t.Fatalf("error creating close proposal: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
parsedSig, err = lnwire.NewSigFromSignature(responderSig)
|
parsedSig, err = lnwire.NewSigFromSignature(bobSig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error parsing signature: %v", err)
|
t.Fatalf("error parsing signature: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
closingSigned = lnwire.NewClosingSigned(chanID, increasedFee, parsedSig)
|
closingSigned = lnwire.NewClosingSigned(chanID, increasedFee, parsedSig)
|
||||||
initiator.chanCloseMsgs <- &closeMsg{
|
alicePeer.chanCloseMsgs <- &closeMsg{
|
||||||
cid: chanID,
|
cid: chanID,
|
||||||
msg: closingSigned,
|
msg: closingSigned,
|
||||||
}
|
}
|
||||||
|
|
||||||
// It still won't be accepted, and we should get a new proposal, the
|
// Alice won't accept Bob's new proposal, and Bob should receive a new
|
||||||
// average of what we proposed, and what they proposed last time.
|
// proposal which is the average of what Bob proposed and Alice proposed
|
||||||
|
// last time.
|
||||||
select {
|
select {
|
||||||
case outMsg := <-initiator.outgoingQueue:
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
msg = outMsg.msg
|
msg = outMsg.msg
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("did not receive closing signed")
|
t.Fatalf("did not receive closing signed")
|
||||||
}
|
}
|
||||||
|
closingSignedMsg, ok = msg.(*lnwire.ClosingSigned)
|
||||||
initiatorClosingSigned, ok := msg.(*lnwire.ClosingSigned)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
t.Fatalf("expected ClosingSigned message, got %T", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Once again, the fee sent by the initiator should be greater than the
|
aliceFee = closingSignedMsg.FeeSatoshis
|
||||||
// last fee they sent, but less than the last fee we sent.
|
if aliceFee < lastReceivedFee {
|
||||||
peerFee = initiatorClosingSigned.FeeSatoshis
|
t.Fatalf("new fee should be greater than prior: new=%v, old=%v",
|
||||||
if peerFee < lastFeeSent {
|
aliceFee, lastReceivedFee)
|
||||||
t.Fatalf("new fee should be greater than prior: new=%v, "+
|
|
||||||
"prior=%v", peerFee, lastFeeSent)
|
|
||||||
}
|
}
|
||||||
if peerFee > increasedFee {
|
if aliceFee > lastSentFee {
|
||||||
t.Fatalf("new fee should be less than our fee: new=%v, "+
|
t.Fatalf("new fee should be less than Bob's fee: new=%v, old=%v",
|
||||||
"prior=%v", peerFee, increasedFee)
|
aliceFee, lastSentFee)
|
||||||
}
|
}
|
||||||
|
|
||||||
// At this point, we'll accept their fee by sending back a CloseSigned
|
// Bob will now accept their fee by sending back a ClosingSigned message
|
||||||
// message with an identical fee.
|
// with an identical fee.
|
||||||
responderSig, _, _, err = responderChan.CreateCloseProposal(
|
bobSig, _, _, err = bobChan.CreateCloseProposal(
|
||||||
peerFee, dummyDeliveryScript, initiatorDeliveryScript,
|
aliceFee, dummyDeliveryScript, aliceDeliveryScript,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating close proposal: %v", err)
|
t.Fatalf("error creating close proposal: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
parsedSig, err = lnwire.NewSigFromSignature(responderSig)
|
parsedSig, err = lnwire.NewSigFromSignature(bobSig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error parsing signature: %v", err)
|
t.Fatalf("error parsing signature: %v", err)
|
||||||
}
|
}
|
||||||
closingSigned = lnwire.NewClosingSigned(chanID, peerFee, parsedSig)
|
closingSigned = lnwire.NewClosingSigned(chanID, aliceFee, parsedSig)
|
||||||
initiator.chanCloseMsgs <- &closeMsg{
|
alicePeer.chanCloseMsgs <- &closeMsg{
|
||||||
cid: chanID,
|
cid: chanID,
|
||||||
msg: closingSigned,
|
msg: closingSigned,
|
||||||
}
|
}
|
||||||
@ -606,6 +629,20 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
|
|||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("closing tx not broadcast")
|
t.Fatalf("closing tx not broadcast")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Alice should respond with the ClosingSigned they both agreed upon.
|
||||||
|
select {
|
||||||
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
|
msg = outMsg.msg
|
||||||
|
case <-time.After(timeout):
|
||||||
|
t.Fatalf("did not receive closing signed message")
|
||||||
|
}
|
||||||
|
if _, ok := msg.(*lnwire.ClosingSigned); !ok {
|
||||||
|
t.Fatalf("expected to receive closing signed message, got %T", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alice should be waiting on a single confirmation for the coop close tx.
|
||||||
|
notifier.confChannel <- &chainntnfs.TxConfirmation{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestChooseDeliveryScript tests that chooseDeliveryScript correctly errors
|
// TestChooseDeliveryScript tests that chooseDeliveryScript correctly errors
|
||||||
@ -742,13 +779,13 @@ func TestCustomShutdownScript(t *testing.T) {
|
|||||||
test := test
|
test := test
|
||||||
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
notifier := &mockNotfier{
|
notifier := &mockNotifier{
|
||||||
confChannel: make(chan *chainntnfs.TxConfirmation),
|
confChannel: make(chan *chainntnfs.TxConfirmation),
|
||||||
}
|
}
|
||||||
broadcastTxChan := make(chan *wire.MsgTx)
|
broadcastTxChan := make(chan *wire.MsgTx)
|
||||||
|
|
||||||
// Open a channel.
|
// Open a channel.
|
||||||
initiator, initiatorChan, _, cleanUp, err := createTestPeer(
|
alicePeer, bobChan, cleanUp, err := createTestPeer(
|
||||||
notifier, broadcastTxChan, test.update,
|
notifier, broadcastTxChan, test.update,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -760,7 +797,7 @@ func TestCustomShutdownScript(t *testing.T) {
|
|||||||
// a specified delivery address.
|
// a specified delivery address.
|
||||||
updateChan := make(chan interface{}, 1)
|
updateChan := make(chan interface{}, 1)
|
||||||
errChan := make(chan error, 1)
|
errChan := make(chan error, 1)
|
||||||
chanPoint := initiatorChan.ChannelPoint()
|
chanPoint := bobChan.ChannelPoint()
|
||||||
closeCommand := htlcswitch.ChanClose{
|
closeCommand := htlcswitch.ChanClose{
|
||||||
CloseType: htlcswitch.CloseRegular,
|
CloseType: htlcswitch.CloseRegular,
|
||||||
ChanPoint: chanPoint,
|
ChanPoint: chanPoint,
|
||||||
@ -772,11 +809,11 @@ func TestCustomShutdownScript(t *testing.T) {
|
|||||||
|
|
||||||
// Send the close command for the correct channel and check that a
|
// Send the close command for the correct channel and check that a
|
||||||
// shutdown message is sent.
|
// shutdown message is sent.
|
||||||
initiator.localCloseChanReqs <- &closeCommand
|
alicePeer.localCloseChanReqs <- &closeCommand
|
||||||
|
|
||||||
var msg lnwire.Message
|
var msg lnwire.Message
|
||||||
select {
|
select {
|
||||||
case outMsg := <-initiator.outgoingQueue:
|
case outMsg := <-alicePeer.outgoingQueue:
|
||||||
msg = outMsg.msg
|
msg = outMsg.msg
|
||||||
case <-time.After(timeout):
|
case <-time.After(timeout):
|
||||||
t.Fatalf("did not receive shutdown message")
|
t.Fatalf("did not receive shutdown message")
|
||||||
@ -820,7 +857,7 @@ func genScript(t *testing.T, address string) lnwire.DeliveryAddress {
|
|||||||
// Generate an address which can be used for testing.
|
// Generate an address which can be used for testing.
|
||||||
deliveryAddr, err := btcutil.DecodeAddress(
|
deliveryAddr, err := btcutil.DecodeAddress(
|
||||||
address,
|
address,
|
||||||
activeNetParams.Params,
|
&chaincfg.TestNet3Params,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("invalid delivery address: %v", err)
|
t.Fatalf("invalid delivery address: %v", err)
|
228
peer/config.go
Normal file
228
peer/config.go
Normal file
@ -0,0 +1,228 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/btcsuite/btcd/btcec"
|
||||||
|
"github.com/btcsuite/btcd/connmgr"
|
||||||
|
|
||||||
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||||
|
"github.com/lightningnetwork/lnd/channeldb"
|
||||||
|
"github.com/lightningnetwork/lnd/channelnotifier"
|
||||||
|
"github.com/lightningnetwork/lnd/contractcourt"
|
||||||
|
"github.com/lightningnetwork/lnd/discovery"
|
||||||
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
||||||
|
"github.com/lightningnetwork/lnd/htlcswitch/hodl"
|
||||||
|
"github.com/lightningnetwork/lnd/htlcswitch/hop"
|
||||||
|
"github.com/lightningnetwork/lnd/input"
|
||||||
|
"github.com/lightningnetwork/lnd/invoices"
|
||||||
|
"github.com/lightningnetwork/lnd/lnpeer"
|
||||||
|
"github.com/lightningnetwork/lnd/lnwallet"
|
||||||
|
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
||||||
|
"github.com/lightningnetwork/lnd/lnwire"
|
||||||
|
"github.com/lightningnetwork/lnd/netann"
|
||||||
|
"github.com/lightningnetwork/lnd/pool"
|
||||||
|
"github.com/lightningnetwork/lnd/queue"
|
||||||
|
"github.com/lightningnetwork/lnd/watchtower/wtclient"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Config defines configuration fields that are necessary for a peer object
|
||||||
|
// to function.
|
||||||
|
type Config struct {
|
||||||
|
// Conn is the underlying network connection for this peer.
|
||||||
|
Conn net.Conn
|
||||||
|
|
||||||
|
// ConnReq stores information related to the persistent connection request
|
||||||
|
// for this peer.
|
||||||
|
ConnReq *connmgr.ConnReq
|
||||||
|
|
||||||
|
// PubKeyBytes is the serialized, compressed public key of this peer.
|
||||||
|
PubKeyBytes [33]byte
|
||||||
|
|
||||||
|
// Addr is the network address of the peer.
|
||||||
|
Addr *lnwire.NetAddress
|
||||||
|
|
||||||
|
// Inbound indicates whether or not the peer is an inbound peer.
|
||||||
|
Inbound bool
|
||||||
|
|
||||||
|
// Features is the set of features that we advertise to the remote party.
|
||||||
|
Features *lnwire.FeatureVector
|
||||||
|
|
||||||
|
// LegacyFeatures is the set of features that we advertise to the remote
|
||||||
|
// peer for backwards compatibility. Nodes that have not implemented
|
||||||
|
// flat features will still be able to read our feature bits from the
|
||||||
|
// legacy global field, but we will also advertise everything in the
|
||||||
|
// default features field.
|
||||||
|
LegacyFeatures *lnwire.FeatureVector
|
||||||
|
|
||||||
|
// OutgoingCltvRejectDelta defines the number of blocks before expiry of
|
||||||
|
// an htlc where we don't offer it anymore.
|
||||||
|
OutgoingCltvRejectDelta uint32
|
||||||
|
|
||||||
|
// ChanActiveTimeout specifies the duration the peer will wait to request
|
||||||
|
// a channel reenable, beginning from the time the peer was started.
|
||||||
|
ChanActiveTimeout time.Duration
|
||||||
|
|
||||||
|
// ErrorBuffer stores a set of errors related to a peer. It contains error
|
||||||
|
// messages that our peer has recently sent us over the wire and records of
|
||||||
|
// unknown messages that were sent to us so that we can have a full track
|
||||||
|
// record of the communication errors we have had with our peer. If we
|
||||||
|
// choose to disconnect from a peer, it also stores the reason we had for
|
||||||
|
// disconnecting.
|
||||||
|
ErrorBuffer *queue.CircularBuffer
|
||||||
|
|
||||||
|
// WritePool is the task pool that manages reuse of write buffers. Write
|
||||||
|
// tasks are submitted to the pool in order to conserve the total number of
|
||||||
|
// write buffers allocated at any one time, and decouple write buffer
|
||||||
|
// allocation from the peer life cycle.
|
||||||
|
WritePool *pool.Write
|
||||||
|
|
||||||
|
// ReadPool is the task pool that manages reuse of read buffers.
|
||||||
|
ReadPool *pool.Read
|
||||||
|
|
||||||
|
// Switch is a pointer to the htlcswitch. It is used to setup, get, and
|
||||||
|
// tear-down ChannelLinks.
|
||||||
|
Switch *htlcswitch.Switch
|
||||||
|
|
||||||
|
// InterceptSwitch is a pointer to the InterceptableSwitch, a wrapper around
|
||||||
|
// the regular Switch. We only export it here to pass ForwardPackets to the
|
||||||
|
// ChannelLinkConfig.
|
||||||
|
InterceptSwitch *htlcswitch.InterceptableSwitch
|
||||||
|
|
||||||
|
// ChannelDB is used to fetch opened channels, closed channels, and the
|
||||||
|
// channel graph.
|
||||||
|
ChannelDB *channeldb.DB
|
||||||
|
|
||||||
|
// ChainArb is used to subscribe to channel events, update contract signals,
|
||||||
|
// and force close channels.
|
||||||
|
ChainArb *contractcourt.ChainArbitrator
|
||||||
|
|
||||||
|
// AuthGossiper is needed so that the Brontide impl can register with the
|
||||||
|
// gossiper and process remote channel announcements.
|
||||||
|
AuthGossiper *discovery.AuthenticatedGossiper
|
||||||
|
|
||||||
|
// ChanStatusMgr is used to set or un-set the disabled bit in channel
|
||||||
|
// updates.
|
||||||
|
ChanStatusMgr *netann.ChanStatusManager
|
||||||
|
|
||||||
|
// ChainIO is used to retrieve the best block.
|
||||||
|
ChainIO lnwallet.BlockChainIO
|
||||||
|
|
||||||
|
// FeeEstimator is used to compute our target ideal fee-per-kw when
|
||||||
|
// initializing the coop close process.
|
||||||
|
FeeEstimator chainfee.Estimator
|
||||||
|
|
||||||
|
// Signer is used when creating *lnwallet.LightningChannel instances.
|
||||||
|
Signer input.Signer
|
||||||
|
|
||||||
|
// SigPool is used when creating *lnwallet.LightningChannel instances.
|
||||||
|
SigPool *lnwallet.SigPool
|
||||||
|
|
||||||
|
// Wallet is used to publish transactions and generate delivery scripts
|
||||||
|
// during the coop close process.
|
||||||
|
Wallet *lnwallet.LightningWallet
|
||||||
|
|
||||||
|
// ChainNotifier is used to receive confirmations of a coop close
|
||||||
|
// transaction.
|
||||||
|
ChainNotifier chainntnfs.ChainNotifier
|
||||||
|
|
||||||
|
// RoutingPolicy is used to set the forwarding policy for links created by
|
||||||
|
// the Brontide.
|
||||||
|
RoutingPolicy htlcswitch.ForwardingPolicy
|
||||||
|
|
||||||
|
// Sphinx is used when setting up ChannelLinks so they can decode sphinx
|
||||||
|
// onion blobs.
|
||||||
|
Sphinx *hop.OnionProcessor
|
||||||
|
|
||||||
|
// WitnessBeacon is used when setting up ChannelLinks so they can add any
|
||||||
|
// preimages that they learn.
|
||||||
|
WitnessBeacon contractcourt.WitnessBeacon
|
||||||
|
|
||||||
|
// Invoices is passed to the ChannelLink on creation and handles all
|
||||||
|
// invoice-related logic.
|
||||||
|
Invoices *invoices.InvoiceRegistry
|
||||||
|
|
||||||
|
// ChannelNotifier is used by the link to notify other sub-systems about
|
||||||
|
// channel-related events and by the Brontide to subscribe to
|
||||||
|
// ActiveLinkEvents.
|
||||||
|
ChannelNotifier *channelnotifier.ChannelNotifier
|
||||||
|
|
||||||
|
// HtlcNotifier is used when creating a ChannelLink.
|
||||||
|
HtlcNotifier *htlcswitch.HtlcNotifier
|
||||||
|
|
||||||
|
// TowerClient is used when creating a ChannelLink.
|
||||||
|
TowerClient wtclient.Client
|
||||||
|
|
||||||
|
// DisconnectPeer is used to disconnect this peer if the cooperative close
|
||||||
|
// process fails.
|
||||||
|
DisconnectPeer func(*btcec.PublicKey) error
|
||||||
|
|
||||||
|
// GenNodeAnnouncement is used to send our node announcement to the remote
|
||||||
|
// on startup.
|
||||||
|
GenNodeAnnouncement func(bool,
|
||||||
|
...netann.NodeAnnModifier) (lnwire.NodeAnnouncement, error)
|
||||||
|
|
||||||
|
// PrunePersistentPeerConnection is used to remove all internal state
|
||||||
|
// related to this peer in the server.
|
||||||
|
PrunePersistentPeerConnection func([33]byte)
|
||||||
|
|
||||||
|
// FetchLastChanUpdate fetches our latest channel update for a target
|
||||||
|
// channel.
|
||||||
|
FetchLastChanUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate,
|
||||||
|
error)
|
||||||
|
|
||||||
|
// ProcessFundingOpen is used to hand off an OpenChannel message to the
|
||||||
|
// funding manager.
|
||||||
|
ProcessFundingOpen func(*lnwire.OpenChannel, lnpeer.Peer)
|
||||||
|
|
||||||
|
// ProcessFundingAccept is used to hand off an AcceptChannel message to the
|
||||||
|
// funding manager.
|
||||||
|
ProcessFundingAccept func(*lnwire.AcceptChannel, lnpeer.Peer)
|
||||||
|
|
||||||
|
// ProcessFundingCreated is used to hand off a FundingCreated message to
|
||||||
|
// the funding manager.
|
||||||
|
ProcessFundingCreated func(*lnwire.FundingCreated, lnpeer.Peer)
|
||||||
|
|
||||||
|
// ProcessFundingSigned is used to hand off a FundingSigned message to the
|
||||||
|
// funding manager.
|
||||||
|
ProcessFundingSigned func(*lnwire.FundingSigned, lnpeer.Peer)
|
||||||
|
|
||||||
|
// ProcessFundingLocked is used to hand off a FundingLocked message to the
|
||||||
|
// funding manager.
|
||||||
|
ProcessFundingLocked func(*lnwire.FundingLocked, lnpeer.Peer)
|
||||||
|
|
||||||
|
// ProcessFundingError is used to hand off an Error message to the funding
|
||||||
|
// manager.
|
||||||
|
ProcessFundingError func(*lnwire.Error, *btcec.PublicKey)
|
||||||
|
|
||||||
|
// IsPendingChannel is used to determine whether to send an Error message
|
||||||
|
// to the funding manager or not.
|
||||||
|
IsPendingChannel func([32]byte, *btcec.PublicKey) bool
|
||||||
|
|
||||||
|
// Hodl is used when creating ChannelLinks to specify HodlFlags as
|
||||||
|
// breakpoints in dev builds.
|
||||||
|
Hodl *hodl.Config
|
||||||
|
|
||||||
|
// UnsafeReplay is used when creating ChannelLinks to specify whether or
|
||||||
|
// not to replay adds on its commitment tx.
|
||||||
|
UnsafeReplay bool
|
||||||
|
|
||||||
|
// MaxOutgoingCltvExpiry is used when creating ChannelLinks and is the max
|
||||||
|
// number of blocks that funds could be locked up for when forwarding
|
||||||
|
// payments.
|
||||||
|
MaxOutgoingCltvExpiry uint32
|
||||||
|
|
||||||
|
// MaxChannelFeeAllocation is used when creating ChannelLinks and is the
|
||||||
|
// maximum percentage of total funds that can be allocated to a channel's
|
||||||
|
// commitment fee. This only applies for the initiator of the channel.
|
||||||
|
MaxChannelFeeAllocation float64
|
||||||
|
|
||||||
|
// ServerPubKey is the serialized, compressed public key of our lnd node.
|
||||||
|
// It is used to determine which policy (channel edge) to pass to the
|
||||||
|
// ChannelLink.
|
||||||
|
ServerPubKey [33]byte
|
||||||
|
|
||||||
|
// Quit is the server's quit channel. If this is closed, we halt operation.
|
||||||
|
Quit chan struct{}
|
||||||
|
}
|
11
peer/interfaces.go
Normal file
11
peer/interfaces.go
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import "github.com/lightningnetwork/lnd/lnwire"
|
||||||
|
|
||||||
|
// LinkUpdater is an interface implemented by most messages in BOLT 2 that are
|
||||||
|
// allowed to update the channel state.
|
||||||
|
type LinkUpdater interface {
|
||||||
|
// TargetChanID returns the channel id of the link for which this message
|
||||||
|
// is intended.
|
||||||
|
TargetChanID() lnwire.ChannelID
|
||||||
|
}
|
40
peer/log.go
Normal file
40
peer/log.go
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/btcsuite/btclog"
|
||||||
|
"github.com/lightningnetwork/lnd/build"
|
||||||
|
)
|
||||||
|
|
||||||
|
// peerLog is a logger that is initialized with the btclog.Disabled logger.
|
||||||
|
var peerLog btclog.Logger
|
||||||
|
|
||||||
|
// The default amount of logging is none.
|
||||||
|
func init() {
|
||||||
|
UseLogger(build.NewSubLogger("PEER", nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DisableLog disables all logging output.
|
||||||
|
func DisableLog() {
|
||||||
|
UseLogger(btclog.Disabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UseLogger uses a specified Logger to output package logging info.
|
||||||
|
func UseLogger(logger btclog.Logger) {
|
||||||
|
peerLog = logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// logClosure is used to provide a closure over expensive logging operations
|
||||||
|
// so they aren't performed when the logging level doesn't warrant it.
|
||||||
|
type logClosure func() string
|
||||||
|
|
||||||
|
// String invokes the underlying function and returns the result.
|
||||||
|
func (c logClosure) String() string {
|
||||||
|
return c()
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLogClosure returns a new closure over a function that returns a string
|
||||||
|
// which itself provides a Stringer interface so that it can be used with the
|
||||||
|
// logging system.
|
||||||
|
func newLogClosure(c func() string) logClosure {
|
||||||
|
return logClosure(c)
|
||||||
|
}
|
695
peer/test_utils.go
Normal file
695
peer/test_utils.go
Normal file
@ -0,0 +1,695 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
crand "crypto/rand"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/btcsuite/btcd/btcec"
|
||||||
|
"github.com/btcsuite/btcd/chaincfg"
|
||||||
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||||
|
"github.com/btcsuite/btcd/txscript"
|
||||||
|
"github.com/btcsuite/btcd/wire"
|
||||||
|
"github.com/btcsuite/btcutil"
|
||||||
|
"github.com/btcsuite/btcwallet/wallet/txauthor"
|
||||||
|
"github.com/btcsuite/btcwallet/wtxmgr"
|
||||||
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||||
|
"github.com/lightningnetwork/lnd/channeldb"
|
||||||
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
||||||
|
"github.com/lightningnetwork/lnd/input"
|
||||||
|
"github.com/lightningnetwork/lnd/keychain"
|
||||||
|
"github.com/lightningnetwork/lnd/lnwallet"
|
||||||
|
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
||||||
|
"github.com/lightningnetwork/lnd/lnwire"
|
||||||
|
"github.com/lightningnetwork/lnd/netann"
|
||||||
|
"github.com/lightningnetwork/lnd/queue"
|
||||||
|
"github.com/lightningnetwork/lnd/shachain"
|
||||||
|
"github.com/lightningnetwork/lnd/ticker"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
broadcastHeight = 100
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
alicesPrivKey = []byte{
|
||||||
|
0x2b, 0xd8, 0x06, 0xc9, 0x7f, 0x0e, 0x00, 0xaf,
|
||||||
|
0x1a, 0x1f, 0xc3, 0x32, 0x8f, 0xa7, 0x63, 0xa9,
|
||||||
|
0x26, 0x97, 0x23, 0xc8, 0xdb, 0x8f, 0xac, 0x4f,
|
||||||
|
0x93, 0xaf, 0x71, 0xdb, 0x18, 0x6d, 0x6e, 0x90,
|
||||||
|
}
|
||||||
|
|
||||||
|
bobsPrivKey = []byte{
|
||||||
|
0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
||||||
|
0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
||||||
|
0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
|
||||||
|
0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use a hard-coded HD seed.
|
||||||
|
testHdSeed = [32]byte{
|
||||||
|
0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
|
||||||
|
0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
|
||||||
|
0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
|
||||||
|
0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Just use some arbitrary bytes as delivery script.
|
||||||
|
dummyDeliveryScript = alicesPrivKey
|
||||||
|
|
||||||
|
// testTx is used as the default funding txn for single-funder channels.
|
||||||
|
testTx = &wire.MsgTx{
|
||||||
|
Version: 1,
|
||||||
|
TxIn: []*wire.TxIn{
|
||||||
|
{
|
||||||
|
PreviousOutPoint: wire.OutPoint{
|
||||||
|
Hash: chainhash.Hash{},
|
||||||
|
Index: 0xffffffff,
|
||||||
|
},
|
||||||
|
SignatureScript: []byte{0x04, 0x31, 0xdc, 0x00, 0x1b, 0x01, 0x62},
|
||||||
|
Sequence: 0xffffffff,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TxOut: []*wire.TxOut{
|
||||||
|
{
|
||||||
|
Value: 5000000000,
|
||||||
|
PkScript: []byte{
|
||||||
|
0x41, // OP_DATA_65
|
||||||
|
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
|
||||||
|
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
|
||||||
|
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
|
||||||
|
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
|
||||||
|
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
|
||||||
|
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
|
||||||
|
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
|
||||||
|
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
|
||||||
|
0xa6, // 65-byte signature
|
||||||
|
0xac, // OP_CHECKSIG
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
LockTime: 5,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// noUpdate is a function which can be used as a parameter in createTestPeer to
|
||||||
|
// call the setup code with no custom values on the channels set up.
|
||||||
|
var noUpdate = func(a, b *channeldb.OpenChannel) {}
|
||||||
|
|
||||||
|
type mockSigner struct {
|
||||||
|
key *btcec.PrivateKey
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockSigner) SignOutputRaw(tx *wire.MsgTx,
|
||||||
|
signDesc *input.SignDescriptor) (input.Signature, error) {
|
||||||
|
amt := signDesc.Output.Value
|
||||||
|
witnessScript := signDesc.WitnessScript
|
||||||
|
privKey := m.key
|
||||||
|
|
||||||
|
if !privKey.PubKey().IsEqual(signDesc.KeyDesc.PubKey) {
|
||||||
|
return nil, fmt.Errorf("incorrect key passed")
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case signDesc.SingleTweak != nil:
|
||||||
|
privKey = input.TweakPrivKey(privKey,
|
||||||
|
signDesc.SingleTweak)
|
||||||
|
case signDesc.DoubleTweak != nil:
|
||||||
|
privKey = input.DeriveRevocationPrivKey(privKey,
|
||||||
|
signDesc.DoubleTweak)
|
||||||
|
}
|
||||||
|
|
||||||
|
sig, err := txscript.RawTxInWitnessSignature(tx, signDesc.SigHashes,
|
||||||
|
signDesc.InputIndex, amt, witnessScript, signDesc.HashType,
|
||||||
|
privKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return btcec.ParseDERSignature(sig[:len(sig)-1], btcec.S256())
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockSigner) ComputeInputScript(tx *wire.MsgTx,
|
||||||
|
signDesc *input.SignDescriptor) (*input.Script, error) {
|
||||||
|
|
||||||
|
// TODO(roasbeef): expose tweaked signer from lnwallet so don't need to
|
||||||
|
// duplicate this code?
|
||||||
|
|
||||||
|
privKey := m.key
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case signDesc.SingleTweak != nil:
|
||||||
|
privKey = input.TweakPrivKey(privKey,
|
||||||
|
signDesc.SingleTweak)
|
||||||
|
case signDesc.DoubleTweak != nil:
|
||||||
|
privKey = input.DeriveRevocationPrivKey(privKey,
|
||||||
|
signDesc.DoubleTweak)
|
||||||
|
}
|
||||||
|
|
||||||
|
witnessScript, err := txscript.WitnessSignature(tx, signDesc.SigHashes,
|
||||||
|
signDesc.InputIndex, signDesc.Output.Value, signDesc.Output.PkScript,
|
||||||
|
signDesc.HashType, privKey, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &input.Script{
|
||||||
|
Witness: witnessScript,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ input.Signer = (*mockSigner)(nil)
|
||||||
|
|
||||||
|
type mockChainIO struct {
|
||||||
|
bestHeight int32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockChainIO) GetBestBlock() (*chainhash.Hash, int32, error) {
|
||||||
|
return nil, m.bestHeight, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockChainIO) GetUtxo(op *wire.OutPoint, _ []byte,
|
||||||
|
heightHint uint32, _ <-chan struct{}) (*wire.TxOut, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockChainIO) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockChainIO) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ lnwallet.BlockChainIO = (*mockChainIO)(nil)
|
||||||
|
|
||||||
|
type mockWalletController struct {
|
||||||
|
rootKey *btcec.PrivateKey
|
||||||
|
publishedTxns chan *wire.MsgTx
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) FetchInputInfo(prevOut *wire.OutPoint) (
|
||||||
|
*lnwallet.Utxo, error) {
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) ConfirmedBalance(confs int32) (btcutil.Amount,
|
||||||
|
error) {
|
||||||
|
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockWalletController) NewAddress(addrType lnwallet.AddressType,
|
||||||
|
change bool) (btcutil.Address, error) {
|
||||||
|
|
||||||
|
addr, _ := btcutil.NewAddressPubKey(
|
||||||
|
m.rootKey.PubKey().SerializeCompressed(), &chaincfg.MainNetParams,
|
||||||
|
)
|
||||||
|
return addr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) LastUnusedAddress(addrType lnwallet.AddressType) (
|
||||||
|
btcutil.Address, error) {
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) IsOurAddress(a btcutil.Address) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) SendOutputs(outputs []*wire.TxOut,
|
||||||
|
feeRate chainfee.SatPerKWeight, label string) (*wire.MsgTx, error) {
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) CreateSimpleTx(outputs []*wire.TxOut,
|
||||||
|
feeRate chainfee.SatPerKWeight, dryRun bool) (*txauthor.AuthoredTx, error) {
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) ListUnspentWitness(minconfirms,
|
||||||
|
maxconfirms int32) ([]*lnwallet.Utxo, error) {
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) ListTransactionDetails(startHeight,
|
||||||
|
endHeight int32) ([]*lnwallet.TransactionDetail, error) {
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) LockOutpoint(o wire.OutPoint) {}
|
||||||
|
|
||||||
|
func (*mockWalletController) UnlockOutpoint(o wire.OutPoint) {}
|
||||||
|
|
||||||
|
func (m *mockWalletController) PublishTransaction(tx *wire.MsgTx,
|
||||||
|
label string) error {
|
||||||
|
m.publishedTxns <- tx
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) LabelTransaction(hash chainhash.Hash,
|
||||||
|
label string, overwrite bool) error {
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) SubscribeTransactions() (
|
||||||
|
lnwallet.TransactionSubscription, error) {
|
||||||
|
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) IsSynced() (bool, int64, error) {
|
||||||
|
return false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) Start() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) Stop() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) BackEnd() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) LeaseOutput(wtxmgr.LockID,
|
||||||
|
wire.OutPoint) (time.Time, error) {
|
||||||
|
|
||||||
|
return time.Now(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) ReleaseOutput(wtxmgr.LockID, wire.OutPoint) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*mockWalletController) GetRecoveryInfo() (bool, float64, error) {
|
||||||
|
return false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ lnwallet.WalletController = (*mockWalletController)(nil)
|
||||||
|
|
||||||
|
type mockNotifier struct {
|
||||||
|
confChannel chan *chainntnfs.TxConfirmation
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
|
||||||
|
_ []byte, numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent,
|
||||||
|
error) {
|
||||||
|
|
||||||
|
return &chainntnfs.ConfirmationEvent{
|
||||||
|
Confirmed: m.confChannel,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, _ []byte,
|
||||||
|
heightHint uint32) (*chainntnfs.SpendEvent, error) {
|
||||||
|
|
||||||
|
return &chainntnfs.SpendEvent{
|
||||||
|
Spend: make(chan *chainntnfs.SpendDetail),
|
||||||
|
Cancel: func() {},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockNotifier) RegisterBlockEpochNtfn(
|
||||||
|
bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
|
||||||
|
|
||||||
|
return &chainntnfs.BlockEpochEvent{
|
||||||
|
Epochs: make(chan *chainntnfs.BlockEpoch),
|
||||||
|
Cancel: func() {},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockNotifier) Start() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockNotifier) Stop() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockNotifier) Started() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ chainntnfs.ChainNotifier = (*mockNotifier)(nil)
|
||||||
|
|
||||||
|
// createTestPeer creates a channel between two nodes, and returns a peer for
|
||||||
|
// one of the nodes, together with the channel seen from both nodes. It takes
|
||||||
|
// an updateChan function which can be used to modify the default values on
|
||||||
|
// the channel states for each peer.
|
||||||
|
func createTestPeer(notifier chainntnfs.ChainNotifier,
|
||||||
|
publTx chan *wire.MsgTx, updateChan func(a, b *channeldb.OpenChannel)) (
|
||||||
|
*Brontide, *lnwallet.LightningChannel, func(), error) {
|
||||||
|
|
||||||
|
aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(
|
||||||
|
btcec.S256(), alicesPrivKey,
|
||||||
|
)
|
||||||
|
aliceKeySigner := &keychain.PrivKeyDigestSigner{PrivKey: aliceKeyPriv}
|
||||||
|
bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(
|
||||||
|
btcec.S256(), bobsPrivKey,
|
||||||
|
)
|
||||||
|
|
||||||
|
channelCapacity := btcutil.Amount(10 * 1e8)
|
||||||
|
channelBal := channelCapacity / 2
|
||||||
|
aliceDustLimit := btcutil.Amount(200)
|
||||||
|
bobDustLimit := btcutil.Amount(1300)
|
||||||
|
csvTimeoutAlice := uint32(5)
|
||||||
|
csvTimeoutBob := uint32(4)
|
||||||
|
|
||||||
|
prevOut := &wire.OutPoint{
|
||||||
|
Hash: chainhash.Hash(testHdSeed),
|
||||||
|
Index: 0,
|
||||||
|
}
|
||||||
|
fundingTxIn := wire.NewTxIn(prevOut, nil, nil)
|
||||||
|
|
||||||
|
aliceCfg := channeldb.ChannelConfig{
|
||||||
|
ChannelConstraints: channeldb.ChannelConstraints{
|
||||||
|
DustLimit: aliceDustLimit,
|
||||||
|
MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
|
||||||
|
ChanReserve: btcutil.Amount(rand.Int63()),
|
||||||
|
MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
|
||||||
|
MaxAcceptedHtlcs: uint16(rand.Int31()),
|
||||||
|
CsvDelay: uint16(csvTimeoutAlice),
|
||||||
|
},
|
||||||
|
MultiSigKey: keychain.KeyDescriptor{
|
||||||
|
PubKey: aliceKeyPub,
|
||||||
|
},
|
||||||
|
RevocationBasePoint: keychain.KeyDescriptor{
|
||||||
|
PubKey: aliceKeyPub,
|
||||||
|
},
|
||||||
|
PaymentBasePoint: keychain.KeyDescriptor{
|
||||||
|
PubKey: aliceKeyPub,
|
||||||
|
},
|
||||||
|
DelayBasePoint: keychain.KeyDescriptor{
|
||||||
|
PubKey: aliceKeyPub,
|
||||||
|
},
|
||||||
|
HtlcBasePoint: keychain.KeyDescriptor{
|
||||||
|
PubKey: aliceKeyPub,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
bobCfg := channeldb.ChannelConfig{
|
||||||
|
ChannelConstraints: channeldb.ChannelConstraints{
|
||||||
|
DustLimit: bobDustLimit,
|
||||||
|
MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
|
||||||
|
ChanReserve: btcutil.Amount(rand.Int63()),
|
||||||
|
MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
|
||||||
|
MaxAcceptedHtlcs: uint16(rand.Int31()),
|
||||||
|
CsvDelay: uint16(csvTimeoutBob),
|
||||||
|
},
|
||||||
|
MultiSigKey: keychain.KeyDescriptor{
|
||||||
|
PubKey: bobKeyPub,
|
||||||
|
},
|
||||||
|
RevocationBasePoint: keychain.KeyDescriptor{
|
||||||
|
PubKey: bobKeyPub,
|
||||||
|
},
|
||||||
|
PaymentBasePoint: keychain.KeyDescriptor{
|
||||||
|
PubKey: bobKeyPub,
|
||||||
|
},
|
||||||
|
DelayBasePoint: keychain.KeyDescriptor{
|
||||||
|
PubKey: bobKeyPub,
|
||||||
|
},
|
||||||
|
HtlcBasePoint: keychain.KeyDescriptor{
|
||||||
|
PubKey: bobKeyPub,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot)
|
||||||
|
bobFirstRevoke, err := bobPreimageProducer.AtIndex(0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:])
|
||||||
|
|
||||||
|
aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize())
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot)
|
||||||
|
aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:])
|
||||||
|
|
||||||
|
aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns(
|
||||||
|
channelBal, channelBal, &aliceCfg, &bobCfg, aliceCommitPoint,
|
||||||
|
bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
alicePath, err := ioutil.TempDir("", "alicedb")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dbAlice, err := channeldb.Open(alicePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bobPath, err := ioutil.TempDir("", "bobdb")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dbBob, err := channeldb.Open(bobPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
estimator := chainfee.NewStaticEstimator(12500, 0)
|
||||||
|
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(roasbeef): need to factor in commit fee?
|
||||||
|
aliceCommit := channeldb.ChannelCommitment{
|
||||||
|
CommitHeight: 0,
|
||||||
|
LocalBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
||||||
|
RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
||||||
|
FeePerKw: btcutil.Amount(feePerKw),
|
||||||
|
CommitFee: feePerKw.FeeForWeight(input.CommitWeight),
|
||||||
|
CommitTx: aliceCommitTx,
|
||||||
|
CommitSig: bytes.Repeat([]byte{1}, 71),
|
||||||
|
}
|
||||||
|
bobCommit := channeldb.ChannelCommitment{
|
||||||
|
CommitHeight: 0,
|
||||||
|
LocalBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
||||||
|
RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
||||||
|
FeePerKw: btcutil.Amount(feePerKw),
|
||||||
|
CommitFee: feePerKw.FeeForWeight(input.CommitWeight),
|
||||||
|
CommitTx: bobCommitTx,
|
||||||
|
CommitSig: bytes.Repeat([]byte{1}, 71),
|
||||||
|
}
|
||||||
|
|
||||||
|
var chanIDBytes [8]byte
|
||||||
|
if _, err := io.ReadFull(crand.Reader, chanIDBytes[:]); err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
shortChanID := lnwire.NewShortChanIDFromInt(
|
||||||
|
binary.BigEndian.Uint64(chanIDBytes[:]),
|
||||||
|
)
|
||||||
|
|
||||||
|
aliceChannelState := &channeldb.OpenChannel{
|
||||||
|
LocalChanCfg: aliceCfg,
|
||||||
|
RemoteChanCfg: bobCfg,
|
||||||
|
IdentityPub: aliceKeyPub,
|
||||||
|
FundingOutpoint: *prevOut,
|
||||||
|
ShortChannelID: shortChanID,
|
||||||
|
ChanType: channeldb.SingleFunderTweaklessBit,
|
||||||
|
IsInitiator: true,
|
||||||
|
Capacity: channelCapacity,
|
||||||
|
RemoteCurrentRevocation: bobCommitPoint,
|
||||||
|
RevocationProducer: alicePreimageProducer,
|
||||||
|
RevocationStore: shachain.NewRevocationStore(),
|
||||||
|
LocalCommitment: aliceCommit,
|
||||||
|
RemoteCommitment: aliceCommit,
|
||||||
|
Db: dbAlice,
|
||||||
|
Packager: channeldb.NewChannelPackager(shortChanID),
|
||||||
|
FundingTxn: testTx,
|
||||||
|
}
|
||||||
|
bobChannelState := &channeldb.OpenChannel{
|
||||||
|
LocalChanCfg: bobCfg,
|
||||||
|
RemoteChanCfg: aliceCfg,
|
||||||
|
IdentityPub: bobKeyPub,
|
||||||
|
FundingOutpoint: *prevOut,
|
||||||
|
ChanType: channeldb.SingleFunderTweaklessBit,
|
||||||
|
IsInitiator: false,
|
||||||
|
Capacity: channelCapacity,
|
||||||
|
RemoteCurrentRevocation: aliceCommitPoint,
|
||||||
|
RevocationProducer: bobPreimageProducer,
|
||||||
|
RevocationStore: shachain.NewRevocationStore(),
|
||||||
|
LocalCommitment: bobCommit,
|
||||||
|
RemoteCommitment: bobCommit,
|
||||||
|
Db: dbBob,
|
||||||
|
Packager: channeldb.NewChannelPackager(shortChanID),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set custom values on the channel states.
|
||||||
|
updateChan(aliceChannelState, bobChannelState)
|
||||||
|
|
||||||
|
aliceAddr := &net.TCPAddr{
|
||||||
|
IP: net.ParseIP("127.0.0.1"),
|
||||||
|
Port: 18555,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := aliceChannelState.SyncPending(aliceAddr, 0); err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bobAddr := &net.TCPAddr{
|
||||||
|
IP: net.ParseIP("127.0.0.1"),
|
||||||
|
Port: 18556,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := bobChannelState.SyncPending(bobAddr, 0); err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
cleanUpFunc := func() {
|
||||||
|
os.RemoveAll(bobPath)
|
||||||
|
os.RemoveAll(alicePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
aliceSigner := &mockSigner{aliceKeyPriv}
|
||||||
|
bobSigner := &mockSigner{bobKeyPriv}
|
||||||
|
|
||||||
|
alicePool := lnwallet.NewSigPool(1, aliceSigner)
|
||||||
|
channelAlice, err := lnwallet.NewLightningChannel(
|
||||||
|
aliceSigner, aliceChannelState, alicePool,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
_ = alicePool.Start()
|
||||||
|
|
||||||
|
bobPool := lnwallet.NewSigPool(1, bobSigner)
|
||||||
|
channelBob, err := lnwallet.NewLightningChannel(
|
||||||
|
bobSigner, bobChannelState, bobPool,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
_ = bobPool.Start()
|
||||||
|
|
||||||
|
chainIO := &mockChainIO{
|
||||||
|
bestHeight: broadcastHeight,
|
||||||
|
}
|
||||||
|
wallet := &lnwallet.LightningWallet{
|
||||||
|
WalletController: &mockWalletController{
|
||||||
|
rootKey: aliceKeyPriv,
|
||||||
|
publishedTxns: publTx,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
_, currentHeight, err := chainIO.GetBestBlock()
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
htlcSwitch, err := htlcswitch.New(htlcswitch.Config{
|
||||||
|
DB: dbAlice,
|
||||||
|
SwitchPackager: channeldb.NewSwitchPackager(),
|
||||||
|
Notifier: notifier,
|
||||||
|
FwdEventTicker: ticker.New(
|
||||||
|
htlcswitch.DefaultFwdEventInterval),
|
||||||
|
LogEventTicker: ticker.New(
|
||||||
|
htlcswitch.DefaultLogInterval),
|
||||||
|
AckEventTicker: ticker.New(
|
||||||
|
htlcswitch.DefaultAckInterval),
|
||||||
|
}, uint32(currentHeight))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
if err = htlcSwitch.Start(); err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeSignerAlice := netann.NewNodeSigner(aliceKeySigner)
|
||||||
|
|
||||||
|
const chanActiveTimeout = time.Minute
|
||||||
|
|
||||||
|
chanStatusMgr, err := netann.NewChanStatusManager(&netann.ChanStatusConfig{
|
||||||
|
ChanStatusSampleInterval: 30 * time.Second,
|
||||||
|
ChanEnableTimeout: chanActiveTimeout,
|
||||||
|
ChanDisableTimeout: 2 * time.Minute,
|
||||||
|
DB: dbAlice,
|
||||||
|
Graph: dbAlice.ChannelGraph(),
|
||||||
|
MessageSigner: nodeSignerAlice,
|
||||||
|
OurPubKey: aliceKeyPub,
|
||||||
|
IsChannelActive: htlcSwitch.HasActiveLink,
|
||||||
|
ApplyChannelUpdate: func(*lnwire.ChannelUpdate) error { return nil },
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
if err = chanStatusMgr.Start(); err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
errBuffer, err := queue.NewCircularBuffer(ErrorBufferSize)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var pubKey [33]byte
|
||||||
|
copy(pubKey[:], aliceKeyPub.SerializeCompressed())
|
||||||
|
|
||||||
|
cfgAddr := &lnwire.NetAddress{
|
||||||
|
IdentityKey: aliceKeyPub,
|
||||||
|
Address: aliceAddr,
|
||||||
|
ChainNet: wire.SimNet,
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := &Config{
|
||||||
|
Addr: cfgAddr,
|
||||||
|
PubKeyBytes: pubKey,
|
||||||
|
ErrorBuffer: errBuffer,
|
||||||
|
ChainIO: chainIO,
|
||||||
|
Switch: htlcSwitch,
|
||||||
|
|
||||||
|
ChanActiveTimeout: chanActiveTimeout,
|
||||||
|
InterceptSwitch: htlcswitch.NewInterceptableSwitch(htlcSwitch),
|
||||||
|
|
||||||
|
ChannelDB: dbAlice,
|
||||||
|
FeeEstimator: estimator,
|
||||||
|
Wallet: wallet,
|
||||||
|
ChainNotifier: notifier,
|
||||||
|
ChanStatusMgr: chanStatusMgr,
|
||||||
|
DisconnectPeer: func(b *btcec.PublicKey) error { return nil },
|
||||||
|
}
|
||||||
|
|
||||||
|
alicePeer := NewBrontide(*cfg)
|
||||||
|
|
||||||
|
chanID := lnwire.NewChanIDFromOutPoint(channelAlice.ChannelPoint())
|
||||||
|
alicePeer.activeChannels[chanID] = channelAlice
|
||||||
|
|
||||||
|
alicePeer.wg.Add(1)
|
||||||
|
go alicePeer.channelManager()
|
||||||
|
|
||||||
|
return alicePeer, channelBob, cleanUpFunc, nil
|
||||||
|
}
|
37
rpcserver.go
37
rpcserver.go
@ -57,6 +57,7 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lnwire"
|
"github.com/lightningnetwork/lnd/lnwire"
|
||||||
"github.com/lightningnetwork/lnd/macaroons"
|
"github.com/lightningnetwork/lnd/macaroons"
|
||||||
"github.com/lightningnetwork/lnd/monitoring"
|
"github.com/lightningnetwork/lnd/monitoring"
|
||||||
|
"github.com/lightningnetwork/lnd/peer"
|
||||||
"github.com/lightningnetwork/lnd/peernotifier"
|
"github.com/lightningnetwork/lnd/peernotifier"
|
||||||
"github.com/lightningnetwork/lnd/record"
|
"github.com/lightningnetwork/lnd/record"
|
||||||
"github.com/lightningnetwork/lnd/routing"
|
"github.com/lightningnetwork/lnd/routing"
|
||||||
@ -2105,17 +2106,17 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
|
|||||||
// With the transaction broadcast, we send our first update to
|
// With the transaction broadcast, we send our first update to
|
||||||
// the client.
|
// the client.
|
||||||
updateChan = make(chan interface{}, 2)
|
updateChan = make(chan interface{}, 2)
|
||||||
updateChan <- &pendingUpdate{
|
updateChan <- &peer.PendingUpdate{
|
||||||
Txid: closingTxid[:],
|
Txid: closingTxid[:],
|
||||||
}
|
}
|
||||||
|
|
||||||
errChan = make(chan error, 1)
|
errChan = make(chan error, 1)
|
||||||
notifier := r.server.cc.chainNotifier
|
notifier := r.server.cc.chainNotifier
|
||||||
go waitForChanToClose(uint32(bestHeight), notifier, errChan, chanPoint,
|
go peer.WaitForChanToClose(uint32(bestHeight), notifier, errChan, chanPoint,
|
||||||
&closingTxid, closingTx.TxOut[0].PkScript, func() {
|
&closingTxid, closingTx.TxOut[0].PkScript, func() {
|
||||||
// Respond to the local subsystem which
|
// Respond to the local subsystem which
|
||||||
// requested the channel closure.
|
// requested the channel closure.
|
||||||
updateChan <- &channelCloseUpdate{
|
updateChan <- &peer.ChannelCloseUpdate{
|
||||||
ClosingTxid: closingTxid[:],
|
ClosingTxid: closingTxid[:],
|
||||||
Success: true,
|
Success: true,
|
||||||
}
|
}
|
||||||
@ -2228,7 +2229,7 @@ out:
|
|||||||
// then we can break out of our dispatch loop as we no
|
// then we can break out of our dispatch loop as we no
|
||||||
// longer need to process any further updates.
|
// longer need to process any further updates.
|
||||||
switch closeUpdate := closingUpdate.(type) {
|
switch closeUpdate := closingUpdate.(type) {
|
||||||
case *channelCloseUpdate:
|
case *peer.ChannelCloseUpdate:
|
||||||
h, _ := chainhash.NewHash(closeUpdate.ClosingTxid)
|
h, _ := chainhash.NewHash(closeUpdate.ClosingTxid)
|
||||||
rpcsLog.Infof("[closechannel] close completed: "+
|
rpcsLog.Infof("[closechannel] close completed: "+
|
||||||
"txid(%v)", h)
|
"txid(%v)", h)
|
||||||
@ -2246,7 +2247,7 @@ func createRPCCloseUpdate(update interface{}) (
|
|||||||
*lnrpc.CloseStatusUpdate, error) {
|
*lnrpc.CloseStatusUpdate, error) {
|
||||||
|
|
||||||
switch u := update.(type) {
|
switch u := update.(type) {
|
||||||
case *channelCloseUpdate:
|
case *peer.ChannelCloseUpdate:
|
||||||
return &lnrpc.CloseStatusUpdate{
|
return &lnrpc.CloseStatusUpdate{
|
||||||
Update: &lnrpc.CloseStatusUpdate_ChanClose{
|
Update: &lnrpc.CloseStatusUpdate_ChanClose{
|
||||||
ChanClose: &lnrpc.ChannelCloseUpdate{
|
ChanClose: &lnrpc.ChannelCloseUpdate{
|
||||||
@ -2254,7 +2255,7 @@ func createRPCCloseUpdate(update interface{}) (
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
case *pendingUpdate:
|
case *peer.PendingUpdate:
|
||||||
return &lnrpc.CloseStatusUpdate{
|
return &lnrpc.CloseStatusUpdate{
|
||||||
Update: &lnrpc.CloseStatusUpdate_ClosePending{
|
Update: &lnrpc.CloseStatusUpdate_ClosePending{
|
||||||
ClosePending: &lnrpc.PendingUpdate{
|
ClosePending: &lnrpc.PendingUpdate{
|
||||||
@ -2571,12 +2572,12 @@ func (r *rpcServer) ListPeers(ctx context.Context,
|
|||||||
serverPeer.RemoteFeatures(),
|
serverPeer.RemoteFeatures(),
|
||||||
)
|
)
|
||||||
|
|
||||||
peer := &lnrpc.Peer{
|
rpcPeer := &lnrpc.Peer{
|
||||||
PubKey: hex.EncodeToString(nodePub[:]),
|
PubKey: hex.EncodeToString(nodePub[:]),
|
||||||
Address: serverPeer.conn.RemoteAddr().String(),
|
Address: serverPeer.Conn().RemoteAddr().String(),
|
||||||
Inbound: serverPeer.inbound,
|
Inbound: serverPeer.Inbound(),
|
||||||
BytesRecv: atomic.LoadUint64(&serverPeer.bytesReceived),
|
BytesRecv: serverPeer.BytesReceived(),
|
||||||
BytesSent: atomic.LoadUint64(&serverPeer.bytesSent),
|
BytesSent: serverPeer.BytesSent(),
|
||||||
SatSent: satSent,
|
SatSent: satSent,
|
||||||
SatRecv: satRecv,
|
SatRecv: satRecv,
|
||||||
PingTime: serverPeer.PingTime(),
|
PingTime: serverPeer.PingTime(),
|
||||||
@ -2591,27 +2592,27 @@ func (r *rpcServer) ListPeers(ctx context.Context,
|
|||||||
// it is non-nil. If we want all the stored errors, simply
|
// it is non-nil. If we want all the stored errors, simply
|
||||||
// add the full list to our set of errors.
|
// add the full list to our set of errors.
|
||||||
if in.LatestError {
|
if in.LatestError {
|
||||||
latestErr := serverPeer.errorBuffer.Latest()
|
latestErr := serverPeer.ErrorBuffer().Latest()
|
||||||
if latestErr != nil {
|
if latestErr != nil {
|
||||||
peerErrors = []interface{}{latestErr}
|
peerErrors = []interface{}{latestErr}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
peerErrors = serverPeer.errorBuffer.List()
|
peerErrors = serverPeer.ErrorBuffer().List()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the relevant peer errors to our response.
|
// Add the relevant peer errors to our response.
|
||||||
for _, error := range peerErrors {
|
for _, error := range peerErrors {
|
||||||
tsError := error.(*timestampedError)
|
tsError := error.(*peer.TimestampedError)
|
||||||
|
|
||||||
rpcErr := &lnrpc.TimestampedError{
|
rpcErr := &lnrpc.TimestampedError{
|
||||||
Timestamp: uint64(tsError.timestamp.Unix()),
|
Timestamp: uint64(tsError.Timestamp.Unix()),
|
||||||
Error: tsError.error.Error(),
|
Error: tsError.Error.Error(),
|
||||||
}
|
}
|
||||||
|
|
||||||
peer.Errors = append(peer.Errors, rpcErr)
|
rpcPeer.Errors = append(rpcPeer.Errors, rpcErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
resp.Peers = append(resp.Peers, peer)
|
resp.Peers = append(resp.Peers, rpcPeer)
|
||||||
}
|
}
|
||||||
|
|
||||||
rpcsLog.Debugf("[listpeers] yielded %v peers", serverPeers)
|
rpcsLog.Debugf("[listpeers] yielded %v peers", serverPeers)
|
||||||
|
167
server.go
167
server.go
@ -52,6 +52,7 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lnwire"
|
"github.com/lightningnetwork/lnd/lnwire"
|
||||||
"github.com/lightningnetwork/lnd/nat"
|
"github.com/lightningnetwork/lnd/nat"
|
||||||
"github.com/lightningnetwork/lnd/netann"
|
"github.com/lightningnetwork/lnd/netann"
|
||||||
|
"github.com/lightningnetwork/lnd/peer"
|
||||||
"github.com/lightningnetwork/lnd/peernotifier"
|
"github.com/lightningnetwork/lnd/peernotifier"
|
||||||
"github.com/lightningnetwork/lnd/pool"
|
"github.com/lightningnetwork/lnd/pool"
|
||||||
"github.com/lightningnetwork/lnd/queue"
|
"github.com/lightningnetwork/lnd/queue"
|
||||||
@ -113,7 +114,7 @@ var (
|
|||||||
// errPeerAlreadyConnected is an error returned by the server when we're
|
// errPeerAlreadyConnected is an error returned by the server when we're
|
||||||
// commanded to connect to a peer, but they're already connected.
|
// commanded to connect to a peer, but they're already connected.
|
||||||
type errPeerAlreadyConnected struct {
|
type errPeerAlreadyConnected struct {
|
||||||
peer *peer
|
peer *peer.Brontide
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error returns the human readable version of this error type.
|
// Error returns the human readable version of this error type.
|
||||||
@ -167,10 +168,10 @@ type server struct {
|
|||||||
lastDetectedIP net.IP
|
lastDetectedIP net.IP
|
||||||
|
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
peersByPub map[string]*peer
|
peersByPub map[string]*peer.Brontide
|
||||||
|
|
||||||
inboundPeers map[string]*peer
|
inboundPeers map[string]*peer.Brontide
|
||||||
outboundPeers map[string]*peer
|
outboundPeers map[string]*peer.Brontide
|
||||||
|
|
||||||
peerConnectedListeners map[string][]chan<- lnpeer.Peer
|
peerConnectedListeners map[string][]chan<- lnpeer.Peer
|
||||||
peerDisconnectedListeners map[string][]chan<- struct{}
|
peerDisconnectedListeners map[string][]chan<- struct{}
|
||||||
@ -190,7 +191,7 @@ type server struct {
|
|||||||
// a disconnect. Adding a peer to this map causes the peer termination
|
// a disconnect. Adding a peer to this map causes the peer termination
|
||||||
// watcher to short circuit in the event that peers are purposefully
|
// watcher to short circuit in the event that peers are purposefully
|
||||||
// disconnected.
|
// disconnected.
|
||||||
ignorePeerTermination map[*peer]struct{}
|
ignorePeerTermination map[*peer.Brontide]struct{}
|
||||||
|
|
||||||
// scheduledPeerConnection maps a pubkey string to a callback that
|
// scheduledPeerConnection maps a pubkey string to a callback that
|
||||||
// should be executed in the peerTerminationWatcher the prior peer with
|
// should be executed in the peerTerminationWatcher the prior peer with
|
||||||
@ -452,12 +453,12 @@ func newServer(cfg *Config, listenAddrs []net.Addr, chanDB *channeldb.DB,
|
|||||||
persistentConnReqs: make(map[string][]*connmgr.ConnReq),
|
persistentConnReqs: make(map[string][]*connmgr.ConnReq),
|
||||||
persistentRetryCancels: make(map[string]chan struct{}),
|
persistentRetryCancels: make(map[string]chan struct{}),
|
||||||
peerErrors: make(map[string]*queue.CircularBuffer),
|
peerErrors: make(map[string]*queue.CircularBuffer),
|
||||||
ignorePeerTermination: make(map[*peer]struct{}),
|
ignorePeerTermination: make(map[*peer.Brontide]struct{}),
|
||||||
scheduledPeerConnection: make(map[string]func()),
|
scheduledPeerConnection: make(map[string]func()),
|
||||||
|
|
||||||
peersByPub: make(map[string]*peer),
|
peersByPub: make(map[string]*peer.Brontide),
|
||||||
inboundPeers: make(map[string]*peer),
|
inboundPeers: make(map[string]*peer.Brontide),
|
||||||
outboundPeers: make(map[string]*peer),
|
outboundPeers: make(map[string]*peer.Brontide),
|
||||||
peerConnectedListeners: make(map[string][]chan<- lnpeer.Peer),
|
peerConnectedListeners: make(map[string][]chan<- lnpeer.Peer),
|
||||||
peerDisconnectedListeners: make(map[string][]chan<- struct{}),
|
peerDisconnectedListeners: make(map[string][]chan<- struct{}),
|
||||||
|
|
||||||
@ -491,15 +492,7 @@ func newServer(cfg *Config, listenAddrs []net.Addr, chanDB *channeldb.DB,
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
peer.HandleLocalCloseChanReqs(request)
|
||||||
case peer.localCloseChanReqs <- request:
|
|
||||||
srvrLog.Infof("Local close channel request "+
|
|
||||||
"delivered to peer: %x", pubKey[:])
|
|
||||||
case <-peer.quit:
|
|
||||||
srvrLog.Errorf("Unable to deliver local close "+
|
|
||||||
"channel request to peer %x, err: %v",
|
|
||||||
pubKey[:], err)
|
|
||||||
}
|
|
||||||
},
|
},
|
||||||
FwdingLog: chanDB.ForwardingLog(),
|
FwdingLog: chanDB.ForwardingLog(),
|
||||||
SwitchPackager: channeldb.NewSwitchPackager(),
|
SwitchPackager: channeldb.NewSwitchPackager(),
|
||||||
@ -1481,7 +1474,13 @@ func (s *server) Stop() error {
|
|||||||
// Disconnect from each active peers to ensure that
|
// Disconnect from each active peers to ensure that
|
||||||
// peerTerminationWatchers signal completion to each peer.
|
// peerTerminationWatchers signal completion to each peer.
|
||||||
for _, peer := range s.Peers() {
|
for _, peer := range s.Peers() {
|
||||||
s.DisconnectPeer(peer.addr.IdentityKey)
|
err := s.DisconnectPeer(peer.IdentityKey())
|
||||||
|
if err != nil {
|
||||||
|
srvrLog.Warnf("could not disconnect peer: %v"+
|
||||||
|
"received error: %v", peer.IdentityKey(),
|
||||||
|
err,
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now that all connections have been torn down, stop the tower
|
// Now that all connections have been torn down, stop the tower
|
||||||
@ -1820,7 +1819,7 @@ func (s *server) peerBootstrapper(numTargetPeers uint32,
|
|||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
ignoreList := make(map[autopilot.NodeID]struct{})
|
ignoreList := make(map[autopilot.NodeID]struct{})
|
||||||
for _, peer := range s.peersByPub {
|
for _, peer := range s.peersByPub {
|
||||||
nID := autopilot.NewNodeID(peer.addr.IdentityKey)
|
nID := autopilot.NewNodeID(peer.IdentityKey())
|
||||||
ignoreList[nID] = struct{}{}
|
ignoreList[nID] = struct{}{}
|
||||||
}
|
}
|
||||||
s.mu.RUnlock()
|
s.mu.RUnlock()
|
||||||
@ -2310,12 +2309,12 @@ func (s *server) BroadcastMessage(skips map[route.Vertex]struct{},
|
|||||||
// peersByPub throughout this process to ensure we deliver messages to
|
// peersByPub throughout this process to ensure we deliver messages to
|
||||||
// exact set of peers present at the time of invocation.
|
// exact set of peers present at the time of invocation.
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
peers := make([]*peer, 0, len(s.peersByPub))
|
peers := make([]*peer.Brontide, 0, len(s.peersByPub))
|
||||||
for _, sPeer := range s.peersByPub {
|
for _, sPeer := range s.peersByPub {
|
||||||
if skips != nil {
|
if skips != nil {
|
||||||
if _, ok := skips[sPeer.pubKeyBytes]; ok {
|
if _, ok := skips[sPeer.PubKey()]; ok {
|
||||||
srvrLog.Tracef("Skipping %x in broadcast",
|
srvrLog.Tracef("Skipping %x in broadcast",
|
||||||
sPeer.pubKeyBytes[:])
|
sPeer.PubKey())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2413,7 +2412,7 @@ func (s *server) NotifyWhenOffline(peerPubKey [33]byte) <-chan struct{} {
|
|||||||
// daemon's local representation of the remote peer.
|
// daemon's local representation of the remote peer.
|
||||||
//
|
//
|
||||||
// NOTE: This function is safe for concurrent access.
|
// NOTE: This function is safe for concurrent access.
|
||||||
func (s *server) FindPeer(peerKey *btcec.PublicKey) (*peer, error) {
|
func (s *server) FindPeer(peerKey *btcec.PublicKey) (*peer.Brontide, error) {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
@ -2427,7 +2426,7 @@ func (s *server) FindPeer(peerKey *btcec.PublicKey) (*peer, error) {
|
|||||||
// public key.
|
// public key.
|
||||||
//
|
//
|
||||||
// NOTE: This function is safe for concurrent access.
|
// NOTE: This function is safe for concurrent access.
|
||||||
func (s *server) FindPeerByPubStr(pubStr string) (*peer, error) {
|
func (s *server) FindPeerByPubStr(pubStr string) (*peer.Brontide, error) {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
@ -2436,7 +2435,7 @@ func (s *server) FindPeerByPubStr(pubStr string) (*peer, error) {
|
|||||||
|
|
||||||
// findPeerByPubStr is an internal method that retrieves the specified peer from
|
// findPeerByPubStr is an internal method that retrieves the specified peer from
|
||||||
// the server's internal state using.
|
// the server's internal state using.
|
||||||
func (s *server) findPeerByPubStr(pubStr string) (*peer, error) {
|
func (s *server) findPeerByPubStr(pubStr string) (*peer.Brontide, error) {
|
||||||
peer, ok := s.peersByPub[pubStr]
|
peer, ok := s.peersByPub[pubStr]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, ErrPeerNotConnected
|
return nil, ErrPeerNotConnected
|
||||||
@ -2565,7 +2564,7 @@ func (s *server) InboundPeerConnected(conn net.Conn) {
|
|||||||
// we'll close out the new connection s.t there's only a single
|
// we'll close out the new connection s.t there's only a single
|
||||||
// connection between us.
|
// connection between us.
|
||||||
localPub := s.identityECDH.PubKey()
|
localPub := s.identityECDH.PubKey()
|
||||||
if !connectedPeer.inbound &&
|
if !connectedPeer.Inbound() &&
|
||||||
!shouldDropLocalConnection(localPub, nodePub) {
|
!shouldDropLocalConnection(localPub, nodePub) {
|
||||||
|
|
||||||
srvrLog.Warnf("Received inbound connection from "+
|
srvrLog.Warnf("Received inbound connection from "+
|
||||||
@ -2676,7 +2675,7 @@ func (s *server) OutboundPeerConnected(connReq *connmgr.ConnReq, conn net.Conn)
|
|||||||
// we'll close out the new connection s.t there's only a single
|
// we'll close out the new connection s.t there's only a single
|
||||||
// connection between us.
|
// connection between us.
|
||||||
localPub := s.identityECDH.PubKey()
|
localPub := s.identityECDH.PubKey()
|
||||||
if connectedPeer.inbound &&
|
if connectedPeer.Inbound() &&
|
||||||
shouldDropLocalConnection(localPub, nodePub) {
|
shouldDropLocalConnection(localPub, nodePub) {
|
||||||
|
|
||||||
srvrLog.Warnf("Established outbound connection to "+
|
srvrLog.Warnf("Established outbound connection to "+
|
||||||
@ -2786,7 +2785,7 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq,
|
|||||||
errBuffer, ok := s.peerErrors[pkStr]
|
errBuffer, ok := s.peerErrors[pkStr]
|
||||||
if !ok {
|
if !ok {
|
||||||
var err error
|
var err error
|
||||||
errBuffer, err = queue.NewCircularBuffer(errorBufferSize)
|
errBuffer, err = queue.NewCircularBuffer(peer.ErrorBufferSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
srvrLog.Errorf("unable to create peer %v", err)
|
srvrLog.Errorf("unable to create peer %v", err)
|
||||||
return
|
return
|
||||||
@ -2799,16 +2798,63 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq,
|
|||||||
// offered that would trigger channel closure. In case of outgoing
|
// offered that would trigger channel closure. In case of outgoing
|
||||||
// htlcs, an extra block is added to prevent the channel from being
|
// htlcs, an extra block is added to prevent the channel from being
|
||||||
// closed when the htlc is outstanding and a new block comes in.
|
// closed when the htlc is outstanding and a new block comes in.
|
||||||
p, err := newPeer(
|
pCfg := peer.Config{
|
||||||
s.cfg, conn, connReq, s, peerAddr, inbound, initFeatures,
|
Conn: conn,
|
||||||
legacyFeatures, s.cfg.ChanEnableTimeout,
|
ConnReq: connReq,
|
||||||
lncfg.DefaultOutgoingCltvRejectDelta, errBuffer,
|
Addr: peerAddr,
|
||||||
)
|
Inbound: inbound,
|
||||||
if err != nil {
|
Features: initFeatures,
|
||||||
srvrLog.Errorf("unable to create peer %v", err)
|
LegacyFeatures: legacyFeatures,
|
||||||
return
|
OutgoingCltvRejectDelta: lncfg.DefaultOutgoingCltvRejectDelta,
|
||||||
|
ChanActiveTimeout: s.cfg.ChanEnableTimeout,
|
||||||
|
ErrorBuffer: errBuffer,
|
||||||
|
WritePool: s.writePool,
|
||||||
|
ReadPool: s.readPool,
|
||||||
|
Switch: s.htlcSwitch,
|
||||||
|
InterceptSwitch: s.interceptableSwitch,
|
||||||
|
ChannelDB: s.chanDB,
|
||||||
|
ChainArb: s.chainArb,
|
||||||
|
AuthGossiper: s.authGossiper,
|
||||||
|
ChanStatusMgr: s.chanStatusMgr,
|
||||||
|
ChainIO: s.cc.chainIO,
|
||||||
|
FeeEstimator: s.cc.feeEstimator,
|
||||||
|
Signer: s.cc.wallet.Cfg.Signer,
|
||||||
|
SigPool: s.sigPool,
|
||||||
|
Wallet: s.cc.wallet,
|
||||||
|
ChainNotifier: s.cc.chainNotifier,
|
||||||
|
RoutingPolicy: s.cc.routingPolicy,
|
||||||
|
Sphinx: s.sphinx,
|
||||||
|
WitnessBeacon: s.witnessBeacon,
|
||||||
|
Invoices: s.invoices,
|
||||||
|
ChannelNotifier: s.channelNotifier,
|
||||||
|
HtlcNotifier: s.htlcNotifier,
|
||||||
|
TowerClient: s.towerClient,
|
||||||
|
DisconnectPeer: s.DisconnectPeer,
|
||||||
|
GenNodeAnnouncement: s.genNodeAnnouncement,
|
||||||
|
|
||||||
|
PrunePersistentPeerConnection: s.prunePersistentPeerConnection,
|
||||||
|
|
||||||
|
FetchLastChanUpdate: s.fetchLastChanUpdate(),
|
||||||
|
ProcessFundingOpen: s.fundingMgr.processFundingOpen,
|
||||||
|
ProcessFundingAccept: s.fundingMgr.processFundingAccept,
|
||||||
|
ProcessFundingCreated: s.fundingMgr.processFundingCreated,
|
||||||
|
ProcessFundingSigned: s.fundingMgr.processFundingSigned,
|
||||||
|
ProcessFundingLocked: s.fundingMgr.processFundingLocked,
|
||||||
|
ProcessFundingError: s.fundingMgr.processFundingError,
|
||||||
|
IsPendingChannel: s.fundingMgr.IsPendingChannel,
|
||||||
|
|
||||||
|
Hodl: s.cfg.Hodl,
|
||||||
|
UnsafeReplay: s.cfg.UnsafeReplay,
|
||||||
|
MaxOutgoingCltvExpiry: s.cfg.MaxOutgoingCltvExpiry,
|
||||||
|
MaxChannelFeeAllocation: s.cfg.MaxChannelFeeAllocation,
|
||||||
|
Quit: s.quit,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
copy(pCfg.PubKeyBytes[:], peerAddr.IdentityKey.SerializeCompressed())
|
||||||
|
copy(pCfg.ServerPubKey[:], s.identityECDH.PubKey().SerializeCompressed())
|
||||||
|
|
||||||
|
p := peer.NewBrontide(pCfg)
|
||||||
|
|
||||||
// TODO(roasbeef): update IP address for link-node
|
// TODO(roasbeef): update IP address for link-node
|
||||||
// * also mark last-seen, do it one single transaction?
|
// * also mark last-seen, do it one single transaction?
|
||||||
|
|
||||||
@ -2828,7 +2874,7 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq,
|
|||||||
|
|
||||||
// addPeer adds the passed peer to the server's global state of all active
|
// addPeer adds the passed peer to the server's global state of all active
|
||||||
// peers.
|
// peers.
|
||||||
func (s *server) addPeer(p *peer) {
|
func (s *server) addPeer(p *peer.Brontide) {
|
||||||
if p == nil {
|
if p == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -2844,12 +2890,12 @@ func (s *server) addPeer(p *peer) {
|
|||||||
// TODO(roasbeef): pipe all requests through to the
|
// TODO(roasbeef): pipe all requests through to the
|
||||||
// queryHandler/peerManager
|
// queryHandler/peerManager
|
||||||
|
|
||||||
pubSer := p.addr.IdentityKey.SerializeCompressed()
|
pubSer := p.IdentityKey().SerializeCompressed()
|
||||||
pubStr := string(pubSer)
|
pubStr := string(pubSer)
|
||||||
|
|
||||||
s.peersByPub[pubStr] = p
|
s.peersByPub[pubStr] = p
|
||||||
|
|
||||||
if p.inbound {
|
if p.Inbound() {
|
||||||
s.inboundPeers[pubStr] = p
|
s.inboundPeers[pubStr] = p
|
||||||
} else {
|
} else {
|
||||||
s.outboundPeers[pubStr] = p
|
s.outboundPeers[pubStr] = p
|
||||||
@ -2872,7 +2918,7 @@ func (s *server) addPeer(p *peer) {
|
|||||||
// be signaled of the new peer once the method returns.
|
// be signaled of the new peer once the method returns.
|
||||||
//
|
//
|
||||||
// NOTE: This MUST be launched as a goroutine.
|
// NOTE: This MUST be launched as a goroutine.
|
||||||
func (s *server) peerInitializer(p *peer) {
|
func (s *server) peerInitializer(p *peer.Brontide) {
|
||||||
defer s.wg.Done()
|
defer s.wg.Done()
|
||||||
|
|
||||||
// Avoid initializing peers while the server is exiting.
|
// Avoid initializing peers while the server is exiting.
|
||||||
@ -2905,7 +2951,7 @@ func (s *server) peerInitializer(p *peer) {
|
|||||||
// was successful, and to begin watching the peer's wait group.
|
// was successful, and to begin watching the peer's wait group.
|
||||||
close(ready)
|
close(ready)
|
||||||
|
|
||||||
pubStr := string(p.addr.IdentityKey.SerializeCompressed())
|
pubStr := string(p.IdentityKey().SerializeCompressed())
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
@ -2933,7 +2979,7 @@ func (s *server) peerInitializer(p *peer) {
|
|||||||
// successfully, otherwise the peer should be disconnected instead.
|
// successfully, otherwise the peer should be disconnected instead.
|
||||||
//
|
//
|
||||||
// NOTE: This MUST be launched as a goroutine.
|
// NOTE: This MUST be launched as a goroutine.
|
||||||
func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) {
|
func (s *server) peerTerminationWatcher(p *peer.Brontide, ready chan struct{}) {
|
||||||
defer s.wg.Done()
|
defer s.wg.Done()
|
||||||
|
|
||||||
p.WaitForDisconnect(ready)
|
p.WaitForDisconnect(ready)
|
||||||
@ -2952,7 +2998,7 @@ func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) {
|
|||||||
// available for use.
|
// available for use.
|
||||||
s.fundingMgr.CancelPeerReservations(p.PubKey())
|
s.fundingMgr.CancelPeerReservations(p.PubKey())
|
||||||
|
|
||||||
pubKey := p.addr.IdentityKey
|
pubKey := p.IdentityKey()
|
||||||
|
|
||||||
// We'll also inform the gossiper that this peer is no longer active,
|
// We'll also inform the gossiper that this peer is no longer active,
|
||||||
// so we don't need to maintain sync state for it any longer.
|
// so we don't need to maintain sync state for it any longer.
|
||||||
@ -2963,13 +3009,13 @@ func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) {
|
|||||||
// with this interface should be closed.
|
// with this interface should be closed.
|
||||||
//
|
//
|
||||||
// TODO(roasbeef): instead add a PurgeInterfaceLinks function?
|
// TODO(roasbeef): instead add a PurgeInterfaceLinks function?
|
||||||
links, err := p.server.htlcSwitch.GetLinksByInterface(p.pubKeyBytes)
|
links, err := s.htlcSwitch.GetLinksByInterface(p.PubKey())
|
||||||
if err != nil && err != htlcswitch.ErrNoLinksFound {
|
if err != nil && err != htlcswitch.ErrNoLinksFound {
|
||||||
srvrLog.Errorf("Unable to get channel links for %v: %v", p, err)
|
srvrLog.Errorf("Unable to get channel links for %v: %v", p, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, link := range links {
|
for _, link := range links {
|
||||||
p.server.htlcSwitch.RemoveLink(link.ChanID())
|
s.htlcSwitch.RemoveLink(link.ChanID())
|
||||||
}
|
}
|
||||||
|
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
@ -3022,12 +3068,12 @@ func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) {
|
|||||||
// within the peer's address for reconnection purposes.
|
// within the peer's address for reconnection purposes.
|
||||||
//
|
//
|
||||||
// TODO(roasbeef): use them all?
|
// TODO(roasbeef): use them all?
|
||||||
if p.inbound {
|
if p.Inbound() {
|
||||||
advertisedAddr, err := s.fetchNodeAdvertisedAddr(pubKey)
|
advertisedAddr, err := s.fetchNodeAdvertisedAddr(pubKey)
|
||||||
switch {
|
switch {
|
||||||
// We found an advertised address, so use it.
|
// We found an advertised address, so use it.
|
||||||
case err == nil:
|
case err == nil:
|
||||||
p.addr.Address = advertisedAddr
|
p.SetAddress(advertisedAddr)
|
||||||
|
|
||||||
// The peer doesn't have an advertised address.
|
// The peer doesn't have an advertised address.
|
||||||
case err == errNoAdvertisedAddr:
|
case err == errNoAdvertisedAddr:
|
||||||
@ -3060,7 +3106,7 @@ func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) {
|
|||||||
// Otherwise, we'll launch a new connection request in order to
|
// Otherwise, we'll launch a new connection request in order to
|
||||||
// attempt to maintain a persistent connection with this peer.
|
// attempt to maintain a persistent connection with this peer.
|
||||||
connReq := &connmgr.ConnReq{
|
connReq := &connmgr.ConnReq{
|
||||||
Addr: p.addr,
|
Addr: p.NetAddress(),
|
||||||
Permanent: true,
|
Permanent: true,
|
||||||
}
|
}
|
||||||
s.persistentConnReqs[pubStr] = append(
|
s.persistentConnReqs[pubStr] = append(
|
||||||
@ -3103,7 +3149,7 @@ func (s *server) peerTerminationWatcher(p *peer, ready chan struct{}) {
|
|||||||
|
|
||||||
// removePeer removes the passed peer from the server's state of all active
|
// removePeer removes the passed peer from the server's state of all active
|
||||||
// peers.
|
// peers.
|
||||||
func (s *server) removePeer(p *peer) {
|
func (s *server) removePeer(p *peer.Brontide) {
|
||||||
if p == nil {
|
if p == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -3115,8 +3161,8 @@ func (s *server) removePeer(p *peer) {
|
|||||||
p.Disconnect(fmt.Errorf("server: disconnecting peer %v", p))
|
p.Disconnect(fmt.Errorf("server: disconnecting peer %v", p))
|
||||||
|
|
||||||
// If this peer had an active persistent connection request, remove it.
|
// If this peer had an active persistent connection request, remove it.
|
||||||
if p.connReq != nil {
|
if p.ConnReq() != nil {
|
||||||
s.connMgr.Remove(p.connReq.ID())
|
s.connMgr.Remove(p.ConnReq().ID())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ignore deleting peers if we're shutting down.
|
// Ignore deleting peers if we're shutting down.
|
||||||
@ -3124,12 +3170,13 @@ func (s *server) removePeer(p *peer) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
pubSer := p.addr.IdentityKey.SerializeCompressed()
|
pKey := p.PubKey()
|
||||||
|
pubSer := pKey[:]
|
||||||
pubStr := string(pubSer)
|
pubStr := string(pubSer)
|
||||||
|
|
||||||
delete(s.peersByPub, pubStr)
|
delete(s.peersByPub, pubStr)
|
||||||
|
|
||||||
if p.inbound {
|
if p.Inbound() {
|
||||||
delete(s.inboundPeers, pubStr)
|
delete(s.inboundPeers, pubStr)
|
||||||
} else {
|
} else {
|
||||||
delete(s.outboundPeers, pubStr)
|
delete(s.outboundPeers, pubStr)
|
||||||
@ -3137,8 +3184,8 @@ func (s *server) removePeer(p *peer) {
|
|||||||
|
|
||||||
// Copy the peer's error buffer across to the server if it has any items
|
// Copy the peer's error buffer across to the server if it has any items
|
||||||
// in it so that we can restore peer errors across connections.
|
// in it so that we can restore peer errors across connections.
|
||||||
if p.errorBuffer.Total() > 0 {
|
if p.ErrorBuffer().Total() > 0 {
|
||||||
s.peerErrors[pubStr] = p.errorBuffer
|
s.peerErrors[pubStr] = p.ErrorBuffer()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Inform the peer notifier of a peer offline event so that it can be
|
// Inform the peer notifier of a peer offline event so that it can be
|
||||||
@ -3358,8 +3405,8 @@ func (s *server) OpenChannel(
|
|||||||
// We'll wait until the peer is active before beginning the channel
|
// We'll wait until the peer is active before beginning the channel
|
||||||
// opening process.
|
// opening process.
|
||||||
select {
|
select {
|
||||||
case <-peer.activeSignal:
|
case <-peer.ActiveSignal():
|
||||||
case <-peer.quit:
|
case <-peer.QuitSignal():
|
||||||
req.err <- fmt.Errorf("peer %x disconnected", pubKeyBytes)
|
req.err <- fmt.Errorf("peer %x disconnected", pubKeyBytes)
|
||||||
return req.updates, req.err
|
return req.updates, req.err
|
||||||
case <-s.quit:
|
case <-s.quit:
|
||||||
@ -3391,11 +3438,11 @@ func (s *server) OpenChannel(
|
|||||||
// Peers returns a slice of all active peers.
|
// Peers returns a slice of all active peers.
|
||||||
//
|
//
|
||||||
// NOTE: This function is safe for concurrent access.
|
// NOTE: This function is safe for concurrent access.
|
||||||
func (s *server) Peers() []*peer {
|
func (s *server) Peers() []*peer.Brontide {
|
||||||
s.mu.RLock()
|
s.mu.RLock()
|
||||||
defer s.mu.RUnlock()
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
peers := make([]*peer, 0, len(s.peersByPub))
|
peers := make([]*peer.Brontide, 0, len(s.peersByPub))
|
||||||
for _, peer := range s.peersByPub {
|
for _, peer := range s.peersByPub {
|
||||||
peers = append(peers, peer)
|
peers = append(peers, peer)
|
||||||
}
|
}
|
||||||
|
398
test_utils.go
398
test_utils.go
@ -1,34 +1,8 @@
|
|||||||
package lnd
|
package lnd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
crand "crypto/rand"
|
|
||||||
"encoding/binary"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/btcec"
|
|
||||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
|
||||||
"github.com/lightningnetwork/lnd/chainntnfs"
|
|
||||||
"github.com/lightningnetwork/lnd/channeldb"
|
|
||||||
"github.com/lightningnetwork/lnd/clock"
|
|
||||||
"github.com/lightningnetwork/lnd/contractcourt"
|
|
||||||
"github.com/lightningnetwork/lnd/htlcswitch"
|
|
||||||
"github.com/lightningnetwork/lnd/input"
|
|
||||||
"github.com/lightningnetwork/lnd/keychain"
|
|
||||||
"github.com/lightningnetwork/lnd/lnwallet"
|
|
||||||
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
|
||||||
"github.com/lightningnetwork/lnd/lnwallet/chancloser"
|
|
||||||
"github.com/lightningnetwork/lnd/lnwire"
|
|
||||||
"github.com/lightningnetwork/lnd/netann"
|
|
||||||
"github.com/lightningnetwork/lnd/shachain"
|
|
||||||
"github.com/lightningnetwork/lnd/ticker"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -54,9 +28,6 @@ var (
|
|||||||
0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
|
0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Just use some arbitrary bytes as delivery script.
|
|
||||||
dummyDeliveryScript = alicesPrivKey[:]
|
|
||||||
|
|
||||||
// testTx is used as the default funding txn for single-funder channels.
|
// testTx is used as the default funding txn for single-funder channels.
|
||||||
testTx = &wire.MsgTx{
|
testTx = &wire.MsgTx{
|
||||||
Version: 1,
|
Version: 1,
|
||||||
@ -91,372 +62,3 @@ var (
|
|||||||
LockTime: 5,
|
LockTime: 5,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
// noUpdate is a function which can be used as a parameter in createTestPeer to
|
|
||||||
// call the setup code with no custom values on the channels set up.
|
|
||||||
var noUpdate = func(a, b *channeldb.OpenChannel) {}
|
|
||||||
|
|
||||||
// createTestPeer creates a channel between two nodes, and returns a peer for
|
|
||||||
// one of the nodes, together with the channel seen from both nodes. It takes
|
|
||||||
// an updateChan function which can be used to modify the default values on
|
|
||||||
// the channel states for each peer.
|
|
||||||
func createTestPeer(notifier chainntnfs.ChainNotifier, publTx chan *wire.MsgTx,
|
|
||||||
updateChan func(a, b *channeldb.OpenChannel)) (*peer, *lnwallet.LightningChannel,
|
|
||||||
*lnwallet.LightningChannel, func(), error) {
|
|
||||||
|
|
||||||
aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(
|
|
||||||
btcec.S256(), alicesPrivKey,
|
|
||||||
)
|
|
||||||
aliceKeySigner := &keychain.PrivKeyDigestSigner{PrivKey: aliceKeyPriv}
|
|
||||||
bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(
|
|
||||||
btcec.S256(), bobsPrivKey,
|
|
||||||
)
|
|
||||||
|
|
||||||
channelCapacity := btcutil.Amount(10 * 1e8)
|
|
||||||
channelBal := channelCapacity / 2
|
|
||||||
aliceDustLimit := btcutil.Amount(200)
|
|
||||||
bobDustLimit := btcutil.Amount(1300)
|
|
||||||
csvTimeoutAlice := uint32(5)
|
|
||||||
csvTimeoutBob := uint32(4)
|
|
||||||
|
|
||||||
prevOut := &wire.OutPoint{
|
|
||||||
Hash: chainhash.Hash(testHdSeed),
|
|
||||||
Index: 0,
|
|
||||||
}
|
|
||||||
fundingTxIn := wire.NewTxIn(prevOut, nil, nil)
|
|
||||||
|
|
||||||
aliceCfg := channeldb.ChannelConfig{
|
|
||||||
ChannelConstraints: channeldb.ChannelConstraints{
|
|
||||||
DustLimit: aliceDustLimit,
|
|
||||||
MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
|
|
||||||
ChanReserve: btcutil.Amount(rand.Int63()),
|
|
||||||
MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
|
|
||||||
MaxAcceptedHtlcs: uint16(rand.Int31()),
|
|
||||||
CsvDelay: uint16(csvTimeoutAlice),
|
|
||||||
},
|
|
||||||
MultiSigKey: keychain.KeyDescriptor{
|
|
||||||
PubKey: aliceKeyPub,
|
|
||||||
},
|
|
||||||
RevocationBasePoint: keychain.KeyDescriptor{
|
|
||||||
PubKey: aliceKeyPub,
|
|
||||||
},
|
|
||||||
PaymentBasePoint: keychain.KeyDescriptor{
|
|
||||||
PubKey: aliceKeyPub,
|
|
||||||
},
|
|
||||||
DelayBasePoint: keychain.KeyDescriptor{
|
|
||||||
PubKey: aliceKeyPub,
|
|
||||||
},
|
|
||||||
HtlcBasePoint: keychain.KeyDescriptor{
|
|
||||||
PubKey: aliceKeyPub,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
bobCfg := channeldb.ChannelConfig{
|
|
||||||
ChannelConstraints: channeldb.ChannelConstraints{
|
|
||||||
DustLimit: bobDustLimit,
|
|
||||||
MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
|
|
||||||
ChanReserve: btcutil.Amount(rand.Int63()),
|
|
||||||
MinHTLC: lnwire.MilliSatoshi(rand.Int63()),
|
|
||||||
MaxAcceptedHtlcs: uint16(rand.Int31()),
|
|
||||||
CsvDelay: uint16(csvTimeoutBob),
|
|
||||||
},
|
|
||||||
MultiSigKey: keychain.KeyDescriptor{
|
|
||||||
PubKey: bobKeyPub,
|
|
||||||
},
|
|
||||||
RevocationBasePoint: keychain.KeyDescriptor{
|
|
||||||
PubKey: bobKeyPub,
|
|
||||||
},
|
|
||||||
PaymentBasePoint: keychain.KeyDescriptor{
|
|
||||||
PubKey: bobKeyPub,
|
|
||||||
},
|
|
||||||
DelayBasePoint: keychain.KeyDescriptor{
|
|
||||||
PubKey: bobKeyPub,
|
|
||||||
},
|
|
||||||
HtlcBasePoint: keychain.KeyDescriptor{
|
|
||||||
PubKey: bobKeyPub,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize())
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot)
|
|
||||||
bobFirstRevoke, err := bobPreimageProducer.AtIndex(0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:])
|
|
||||||
|
|
||||||
aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize())
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot)
|
|
||||||
aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:])
|
|
||||||
|
|
||||||
aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns(
|
|
||||||
channelBal, channelBal, &aliceCfg, &bobCfg, aliceCommitPoint,
|
|
||||||
bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
alicePath, err := ioutil.TempDir("", "alicedb")
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dbAlice, err := channeldb.Open(alicePath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
bobPath, err := ioutil.TempDir("", "bobdb")
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dbBob, err := channeldb.Open(bobPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
estimator := chainfee.NewStaticEstimator(12500, 0)
|
|
||||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(roasbeef): need to factor in commit fee?
|
|
||||||
aliceCommit := channeldb.ChannelCommitment{
|
|
||||||
CommitHeight: 0,
|
|
||||||
LocalBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
|
||||||
RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
|
||||||
FeePerKw: btcutil.Amount(feePerKw),
|
|
||||||
CommitFee: feePerKw.FeeForWeight(input.CommitWeight),
|
|
||||||
CommitTx: aliceCommitTx,
|
|
||||||
CommitSig: bytes.Repeat([]byte{1}, 71),
|
|
||||||
}
|
|
||||||
bobCommit := channeldb.ChannelCommitment{
|
|
||||||
CommitHeight: 0,
|
|
||||||
LocalBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
|
||||||
RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
|
||||||
FeePerKw: btcutil.Amount(feePerKw),
|
|
||||||
CommitFee: feePerKw.FeeForWeight(input.CommitWeight),
|
|
||||||
CommitTx: bobCommitTx,
|
|
||||||
CommitSig: bytes.Repeat([]byte{1}, 71),
|
|
||||||
}
|
|
||||||
|
|
||||||
var chanIDBytes [8]byte
|
|
||||||
if _, err := io.ReadFull(crand.Reader, chanIDBytes[:]); err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
shortChanID := lnwire.NewShortChanIDFromInt(
|
|
||||||
binary.BigEndian.Uint64(chanIDBytes[:]),
|
|
||||||
)
|
|
||||||
|
|
||||||
aliceChannelState := &channeldb.OpenChannel{
|
|
||||||
LocalChanCfg: aliceCfg,
|
|
||||||
RemoteChanCfg: bobCfg,
|
|
||||||
IdentityPub: aliceKeyPub,
|
|
||||||
FundingOutpoint: *prevOut,
|
|
||||||
ShortChannelID: shortChanID,
|
|
||||||
ChanType: channeldb.SingleFunderTweaklessBit,
|
|
||||||
IsInitiator: true,
|
|
||||||
Capacity: channelCapacity,
|
|
||||||
RemoteCurrentRevocation: bobCommitPoint,
|
|
||||||
RevocationProducer: alicePreimageProducer,
|
|
||||||
RevocationStore: shachain.NewRevocationStore(),
|
|
||||||
LocalCommitment: aliceCommit,
|
|
||||||
RemoteCommitment: aliceCommit,
|
|
||||||
Db: dbAlice,
|
|
||||||
Packager: channeldb.NewChannelPackager(shortChanID),
|
|
||||||
FundingTxn: testTx,
|
|
||||||
}
|
|
||||||
bobChannelState := &channeldb.OpenChannel{
|
|
||||||
LocalChanCfg: bobCfg,
|
|
||||||
RemoteChanCfg: aliceCfg,
|
|
||||||
IdentityPub: bobKeyPub,
|
|
||||||
FundingOutpoint: *prevOut,
|
|
||||||
ChanType: channeldb.SingleFunderTweaklessBit,
|
|
||||||
IsInitiator: false,
|
|
||||||
Capacity: channelCapacity,
|
|
||||||
RemoteCurrentRevocation: aliceCommitPoint,
|
|
||||||
RevocationProducer: bobPreimageProducer,
|
|
||||||
RevocationStore: shachain.NewRevocationStore(),
|
|
||||||
LocalCommitment: bobCommit,
|
|
||||||
RemoteCommitment: bobCommit,
|
|
||||||
Db: dbBob,
|
|
||||||
Packager: channeldb.NewChannelPackager(shortChanID),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set custom values on the channel states.
|
|
||||||
updateChan(aliceChannelState, bobChannelState)
|
|
||||||
|
|
||||||
aliceAddr := &net.TCPAddr{
|
|
||||||
IP: net.ParseIP("127.0.0.1"),
|
|
||||||
Port: 18555,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := aliceChannelState.SyncPending(aliceAddr, 0); err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
bobAddr := &net.TCPAddr{
|
|
||||||
IP: net.ParseIP("127.0.0.1"),
|
|
||||||
Port: 18556,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := bobChannelState.SyncPending(bobAddr, 0); err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
cleanUpFunc := func() {
|
|
||||||
os.RemoveAll(bobPath)
|
|
||||||
os.RemoveAll(alicePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
aliceSigner := &mockSigner{aliceKeyPriv}
|
|
||||||
bobSigner := &mockSigner{bobKeyPriv}
|
|
||||||
|
|
||||||
alicePool := lnwallet.NewSigPool(1, aliceSigner)
|
|
||||||
channelAlice, err := lnwallet.NewLightningChannel(
|
|
||||||
aliceSigner, aliceChannelState, alicePool,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
alicePool.Start()
|
|
||||||
|
|
||||||
bobPool := lnwallet.NewSigPool(1, bobSigner)
|
|
||||||
channelBob, err := lnwallet.NewLightningChannel(
|
|
||||||
bobSigner, bobChannelState, bobPool,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
bobPool.Start()
|
|
||||||
|
|
||||||
chainIO := &mockChainIO{
|
|
||||||
bestHeight: fundingBroadcastHeight,
|
|
||||||
}
|
|
||||||
wallet := &lnwallet.LightningWallet{
|
|
||||||
WalletController: &mockWalletController{
|
|
||||||
rootKey: aliceKeyPriv,
|
|
||||||
publishedTransactions: publTx,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
cc := &chainControl{
|
|
||||||
feeEstimator: estimator,
|
|
||||||
chainIO: chainIO,
|
|
||||||
chainNotifier: notifier,
|
|
||||||
wallet: wallet,
|
|
||||||
}
|
|
||||||
|
|
||||||
breachArbiter := &breachArbiter{}
|
|
||||||
|
|
||||||
chainArb := contractcourt.NewChainArbitrator(
|
|
||||||
contractcourt.ChainArbitratorConfig{
|
|
||||||
Notifier: notifier,
|
|
||||||
ChainIO: chainIO,
|
|
||||||
IsForwardedHTLC: func(chanID lnwire.ShortChannelID,
|
|
||||||
htlcIndex uint64) bool {
|
|
||||||
|
|
||||||
return true
|
|
||||||
},
|
|
||||||
Clock: clock.NewDefaultClock(),
|
|
||||||
}, dbAlice,
|
|
||||||
)
|
|
||||||
chainArb.WatchNewChannel(aliceChannelState)
|
|
||||||
|
|
||||||
s := &server{
|
|
||||||
chanDB: dbAlice,
|
|
||||||
cc: cc,
|
|
||||||
breachArbiter: breachArbiter,
|
|
||||||
chainArb: chainArb,
|
|
||||||
}
|
|
||||||
|
|
||||||
_, currentHeight, err := s.cc.chainIO.GetBestBlock()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
htlcSwitch, err := htlcswitch.New(htlcswitch.Config{
|
|
||||||
DB: dbAlice,
|
|
||||||
SwitchPackager: channeldb.NewSwitchPackager(),
|
|
||||||
Notifier: notifier,
|
|
||||||
FwdEventTicker: ticker.New(
|
|
||||||
htlcswitch.DefaultFwdEventInterval),
|
|
||||||
LogEventTicker: ticker.New(
|
|
||||||
htlcswitch.DefaultLogInterval),
|
|
||||||
AckEventTicker: ticker.New(
|
|
||||||
htlcswitch.DefaultAckInterval),
|
|
||||||
}, uint32(currentHeight))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
if err = htlcSwitch.Start(); err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
s.htlcSwitch = htlcSwitch
|
|
||||||
|
|
||||||
nodeSignerAlice := netann.NewNodeSigner(aliceKeySigner)
|
|
||||||
|
|
||||||
const chanActiveTimeout = time.Minute
|
|
||||||
|
|
||||||
chanStatusMgr, err := netann.NewChanStatusManager(&netann.ChanStatusConfig{
|
|
||||||
ChanStatusSampleInterval: 30 * time.Second,
|
|
||||||
ChanEnableTimeout: chanActiveTimeout,
|
|
||||||
ChanDisableTimeout: 2 * time.Minute,
|
|
||||||
DB: dbAlice,
|
|
||||||
Graph: dbAlice.ChannelGraph(),
|
|
||||||
MessageSigner: nodeSignerAlice,
|
|
||||||
OurPubKey: aliceKeyPub,
|
|
||||||
IsChannelActive: s.htlcSwitch.HasActiveLink,
|
|
||||||
ApplyChannelUpdate: func(*lnwire.ChannelUpdate) error { return nil },
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
if err = chanStatusMgr.Start(); err != nil {
|
|
||||||
return nil, nil, nil, nil, err
|
|
||||||
}
|
|
||||||
s.chanStatusMgr = chanStatusMgr
|
|
||||||
|
|
||||||
alicePeer := &peer{
|
|
||||||
addr: &lnwire.NetAddress{
|
|
||||||
IdentityKey: aliceKeyPub,
|
|
||||||
Address: aliceAddr,
|
|
||||||
},
|
|
||||||
|
|
||||||
server: s,
|
|
||||||
sendQueue: make(chan outgoingMsg, 1),
|
|
||||||
outgoingQueue: make(chan outgoingMsg, outgoingQueueLen),
|
|
||||||
|
|
||||||
activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
|
|
||||||
newChannels: make(chan *newChannelMsg, 1),
|
|
||||||
|
|
||||||
activeChanCloses: make(map[lnwire.ChannelID]*chancloser.ChanCloser),
|
|
||||||
localCloseChanReqs: make(chan *htlcswitch.ChanClose),
|
|
||||||
chanCloseMsgs: make(chan *closeMsg),
|
|
||||||
|
|
||||||
chanActiveTimeout: chanActiveTimeout,
|
|
||||||
|
|
||||||
queueQuit: make(chan struct{}),
|
|
||||||
quit: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
chanID := lnwire.NewChanIDFromOutPoint(channelAlice.ChannelPoint())
|
|
||||||
alicePeer.activeChannels[chanID] = channelAlice
|
|
||||||
|
|
||||||
alicePeer.wg.Add(1)
|
|
||||||
go alicePeer.channelManager()
|
|
||||||
|
|
||||||
return alicePeer, channelAlice, channelBob, cleanUpFunc, nil
|
|
||||||
}
|
|
||||||
|
Loading…
Reference in New Issue
Block a user