2017-11-11 01:38:38 +03:00
|
|
|
package htlcswitch
|
|
|
|
|
|
|
|
import (
|
2020-04-14 20:48:40 +03:00
|
|
|
"bytes"
|
2018-02-16 00:34:12 +03:00
|
|
|
"container/list"
|
|
|
|
"errors"
|
2020-04-14 20:49:48 +03:00
|
|
|
"fmt"
|
2017-11-11 01:38:38 +03:00
|
|
|
"sync"
|
2018-02-16 00:34:12 +03:00
|
|
|
"time"
|
2017-11-11 01:38:38 +03:00
|
|
|
|
2020-04-14 20:49:26 +03:00
|
|
|
"github.com/lightningnetwork/lnd/clock"
|
2017-11-11 01:38:38 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
|
|
|
)
|
|
|
|
|
2020-04-14 20:50:25 +03:00
|
|
|
var (
|
|
|
|
// ErrMailBoxShuttingDown is returned when the mailbox is interrupted by
|
|
|
|
// a shutdown request.
|
|
|
|
ErrMailBoxShuttingDown = errors.New("mailbox is shutting down")
|
|
|
|
|
|
|
|
// ErrPacketAlreadyExists signals that an attempt to add a packet failed
|
|
|
|
// because it already exists in the mailbox.
|
|
|
|
ErrPacketAlreadyExists = errors.New("mailbox already has packet")
|
|
|
|
)
|
2018-02-16 00:34:12 +03:00
|
|
|
|
|
|
|
// MailBox is an interface which represents a concurrent-safe, in-order
|
2017-11-11 01:38:38 +03:00
|
|
|
// delivery queue for messages from the network and also from the main switch.
|
|
|
|
// This struct servers as a buffer between incoming messages, and messages to
|
|
|
|
// the handled by the link. Each of the mutating methods within this interface
|
|
|
|
// should be implemented in a non-blocking manner.
|
2018-02-16 00:34:12 +03:00
|
|
|
type MailBox interface {
|
2017-11-11 01:38:38 +03:00
|
|
|
// AddMessage appends a new message to the end of the message queue.
|
|
|
|
AddMessage(msg lnwire.Message) error
|
|
|
|
|
|
|
|
// AddPacket appends a new message to the end of the packet queue.
|
|
|
|
AddPacket(pkt *htlcPacket) error
|
|
|
|
|
2018-02-16 00:34:12 +03:00
|
|
|
// HasPacket queries the packets for a circuit key, this is used to drop
|
|
|
|
// packets bound for the switch that already have a queued response.
|
|
|
|
HasPacket(CircuitKey) bool
|
|
|
|
|
|
|
|
// AckPacket removes a packet from the mailboxes in-memory replay
|
|
|
|
// buffer. This will prevent a packet from being delivered after a link
|
2020-04-14 20:48:40 +03:00
|
|
|
// restarts if the switch has remained online. The returned boolean
|
|
|
|
// indicates whether or not a packet with the passed incoming circuit
|
|
|
|
// key was removed.
|
|
|
|
AckPacket(CircuitKey) bool
|
|
|
|
|
|
|
|
// FailAdd fails an UpdateAddHTLC that exists within the mailbox,
|
|
|
|
// removing it from the in-memory replay buffer. This will prevent the
|
|
|
|
// packet from being delivered after the link restarts if the switch has
|
|
|
|
// remained online. The generated LinkError will show an
|
|
|
|
// OutgoingFailureDownstreamHtlcAdd FailureDetail.
|
|
|
|
FailAdd(pkt *htlcPacket)
|
2018-02-16 00:34:12 +03:00
|
|
|
|
2017-11-11 01:38:38 +03:00
|
|
|
// MessageOutBox returns a channel that any new messages ready for
|
|
|
|
// delivery will be sent on.
|
|
|
|
MessageOutBox() chan lnwire.Message
|
|
|
|
|
|
|
|
// PacketOutBox returns a channel that any new packets ready for
|
|
|
|
// delivery will be sent on.
|
|
|
|
PacketOutBox() chan *htlcPacket
|
|
|
|
|
2018-02-16 00:34:12 +03:00
|
|
|
// Clears any pending wire messages from the inbox.
|
|
|
|
ResetMessages() error
|
|
|
|
|
|
|
|
// Reset the packet head to point at the first element in the list.
|
|
|
|
ResetPackets() error
|
|
|
|
|
2017-11-11 01:38:38 +03:00
|
|
|
// Start starts the mailbox and any goroutines it needs to operate
|
|
|
|
// properly.
|
2020-04-07 21:55:54 +03:00
|
|
|
Start()
|
2017-11-11 01:38:38 +03:00
|
|
|
|
|
|
|
// Stop signals the mailbox and its goroutines for a graceful shutdown.
|
2020-04-07 21:55:54 +03:00
|
|
|
Stop()
|
2017-11-11 01:38:38 +03:00
|
|
|
}
|
|
|
|
|
2020-04-14 20:48:40 +03:00
|
|
|
type mailBoxConfig struct {
|
|
|
|
// shortChanID is the short channel id of the channel this mailbox
|
|
|
|
// belongs to.
|
|
|
|
shortChanID lnwire.ShortChannelID
|
|
|
|
|
|
|
|
// fetchUpdate retreives the most recent channel update for the channel
|
|
|
|
// this mailbox belongs to.
|
|
|
|
fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error)
|
|
|
|
|
|
|
|
// forwardPackets send a varidic number of htlcPackets to the switch to
|
|
|
|
// be routed. A quit channel should be provided so that the call can
|
|
|
|
// properly exit during shutdown.
|
2020-05-19 12:13:02 +03:00
|
|
|
forwardPackets func(chan struct{}, ...*htlcPacket) error
|
2020-04-14 20:49:26 +03:00
|
|
|
|
|
|
|
// clock is a time source for the mailbox.
|
|
|
|
clock clock.Clock
|
|
|
|
|
|
|
|
// expiry is the interval after which Adds will be cancelled if they
|
|
|
|
// have not been yet been delivered. The computed deadline will expiry
|
|
|
|
// this long after the Adds are added via AddPacket.
|
|
|
|
expiry time.Duration
|
2020-04-14 20:48:40 +03:00
|
|
|
}
|
|
|
|
|
2018-02-16 00:34:12 +03:00
|
|
|
// memoryMailBox is an implementation of the MailBox struct backed by purely
|
2017-11-11 01:38:38 +03:00
|
|
|
// in-memory queues.
|
|
|
|
type memoryMailBox struct {
|
2019-06-06 20:15:11 +03:00
|
|
|
started sync.Once
|
|
|
|
stopped sync.Once
|
2018-02-16 00:34:12 +03:00
|
|
|
|
2020-04-14 20:48:40 +03:00
|
|
|
cfg *mailBoxConfig
|
|
|
|
|
2018-02-16 00:34:12 +03:00
|
|
|
wireMessages *list.List
|
2017-11-11 01:38:38 +03:00
|
|
|
wireMtx sync.Mutex
|
|
|
|
wireCond *sync.Cond
|
|
|
|
|
|
|
|
messageOutbox chan lnwire.Message
|
2018-02-16 00:34:12 +03:00
|
|
|
msgReset chan chan struct{}
|
2017-11-11 01:38:38 +03:00
|
|
|
|
2020-04-14 20:51:48 +03:00
|
|
|
// repPkts is a queue for reply packets, e.g. Settles and Fails.
|
|
|
|
repPkts *list.List
|
|
|
|
repIndex map[CircuitKey]*list.Element
|
|
|
|
repHead *list.Element
|
2020-04-14 20:49:48 +03:00
|
|
|
|
2020-04-14 20:51:48 +03:00
|
|
|
// addPkts is a dedicated queue for Adds.
|
2020-04-14 20:49:48 +03:00
|
|
|
addPkts *list.List
|
|
|
|
addIndex map[CircuitKey]*list.Element
|
|
|
|
addHead *list.Element
|
|
|
|
|
|
|
|
pktMtx sync.Mutex
|
|
|
|
pktCond *sync.Cond
|
2017-11-11 01:38:38 +03:00
|
|
|
|
|
|
|
pktOutbox chan *htlcPacket
|
2018-02-16 00:34:12 +03:00
|
|
|
pktReset chan chan struct{}
|
2017-11-11 01:38:38 +03:00
|
|
|
|
2020-04-14 20:48:06 +03:00
|
|
|
wireShutdown chan struct{}
|
|
|
|
pktShutdown chan struct{}
|
|
|
|
quit chan struct{}
|
2017-11-11 01:38:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// newMemoryMailBox creates a new instance of the memoryMailBox.
|
2020-04-14 20:48:40 +03:00
|
|
|
func newMemoryMailBox(cfg *mailBoxConfig) *memoryMailBox {
|
2017-11-11 01:38:38 +03:00
|
|
|
box := &memoryMailBox{
|
2020-04-14 20:48:40 +03:00
|
|
|
cfg: cfg,
|
2018-02-16 00:34:12 +03:00
|
|
|
wireMessages: list.New(),
|
2020-04-14 20:51:48 +03:00
|
|
|
repPkts: list.New(),
|
2020-04-14 20:49:48 +03:00
|
|
|
addPkts: list.New(),
|
2017-11-11 01:38:38 +03:00
|
|
|
messageOutbox: make(chan lnwire.Message),
|
|
|
|
pktOutbox: make(chan *htlcPacket),
|
2018-02-16 00:34:12 +03:00
|
|
|
msgReset: make(chan chan struct{}, 1),
|
|
|
|
pktReset: make(chan chan struct{}, 1),
|
2020-04-14 20:51:48 +03:00
|
|
|
repIndex: make(map[CircuitKey]*list.Element),
|
2020-04-14 20:49:48 +03:00
|
|
|
addIndex: make(map[CircuitKey]*list.Element),
|
2020-04-14 20:48:06 +03:00
|
|
|
wireShutdown: make(chan struct{}),
|
|
|
|
pktShutdown: make(chan struct{}),
|
2018-02-16 00:34:12 +03:00
|
|
|
quit: make(chan struct{}),
|
2017-11-11 01:38:38 +03:00
|
|
|
}
|
|
|
|
box.wireCond = sync.NewCond(&box.wireMtx)
|
|
|
|
box.pktCond = sync.NewCond(&box.pktMtx)
|
|
|
|
|
|
|
|
return box
|
|
|
|
}
|
|
|
|
|
2018-02-16 00:34:12 +03:00
|
|
|
// A compile time assertion to ensure that memoryMailBox meets the MailBox
|
2017-11-11 01:38:38 +03:00
|
|
|
// interface.
|
2018-02-16 00:34:12 +03:00
|
|
|
var _ MailBox = (*memoryMailBox)(nil)
|
2017-11-11 01:38:38 +03:00
|
|
|
|
|
|
|
// courierType is an enum that reflects the distinct types of messages a
|
2018-02-16 00:34:12 +03:00
|
|
|
// MailBox can handle. Each type will be placed in an isolated mail box and
|
2017-11-11 01:38:38 +03:00
|
|
|
// will have a dedicated goroutine for delivering the messages.
|
|
|
|
type courierType uint8
|
|
|
|
|
|
|
|
const (
|
|
|
|
// wireCourier is a type of courier that handles wire messages.
|
|
|
|
wireCourier courierType = iota
|
|
|
|
|
2018-01-17 07:11:01 +03:00
|
|
|
// pktCourier is a type of courier that handles htlc packets.
|
2017-11-11 01:38:38 +03:00
|
|
|
pktCourier
|
|
|
|
)
|
|
|
|
|
|
|
|
// Start starts the mailbox and any goroutines it needs to operate properly.
|
|
|
|
//
|
2018-02-16 00:34:12 +03:00
|
|
|
// NOTE: This method is part of the MailBox interface.
|
2020-04-07 21:55:54 +03:00
|
|
|
func (m *memoryMailBox) Start() {
|
2019-06-06 20:15:11 +03:00
|
|
|
m.started.Do(func() {
|
|
|
|
go m.mailCourier(wireCourier)
|
|
|
|
go m.mailCourier(pktCourier)
|
|
|
|
})
|
2017-11-11 01:38:38 +03:00
|
|
|
}
|
|
|
|
|
2018-02-16 00:34:12 +03:00
|
|
|
// ResetMessages blocks until all buffered wire messages are cleared.
|
|
|
|
func (m *memoryMailBox) ResetMessages() error {
|
|
|
|
msgDone := make(chan struct{})
|
|
|
|
select {
|
|
|
|
case m.msgReset <- msgDone:
|
|
|
|
return m.signalUntilReset(wireCourier, msgDone)
|
|
|
|
case <-m.quit:
|
|
|
|
return ErrMailBoxShuttingDown
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ResetPackets blocks until the head of packets buffer is reset, causing the
|
|
|
|
// packets to be redelivered in order.
|
|
|
|
func (m *memoryMailBox) ResetPackets() error {
|
|
|
|
pktDone := make(chan struct{})
|
|
|
|
select {
|
|
|
|
case m.pktReset <- pktDone:
|
|
|
|
return m.signalUntilReset(pktCourier, pktDone)
|
|
|
|
case <-m.quit:
|
|
|
|
return ErrMailBoxShuttingDown
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// signalUntilReset strobes the condition variable for the specified inbox type
|
|
|
|
// until receiving a response that the mailbox has processed a reset.
|
|
|
|
func (m *memoryMailBox) signalUntilReset(cType courierType,
|
|
|
|
done chan struct{}) error {
|
|
|
|
|
|
|
|
for {
|
2020-04-14 20:48:06 +03:00
|
|
|
|
2018-02-16 00:34:12 +03:00
|
|
|
switch cType {
|
|
|
|
case wireCourier:
|
|
|
|
m.wireCond.Signal()
|
|
|
|
case pktCourier:
|
|
|
|
m.pktCond.Signal()
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-time.After(time.Millisecond):
|
|
|
|
continue
|
|
|
|
case <-done:
|
|
|
|
return nil
|
|
|
|
case <-m.quit:
|
|
|
|
return ErrMailBoxShuttingDown
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// AckPacket removes the packet identified by it's incoming circuit key from the
|
2020-04-14 20:48:40 +03:00
|
|
|
// queue of packets to be delivered. The returned boolean indicates whether or
|
|
|
|
// not a packet with the passed incoming circuit key was removed.
|
2018-02-16 00:34:12 +03:00
|
|
|
//
|
|
|
|
// NOTE: It is safe to call this method multiple times for the same circuit key.
|
2020-04-14 20:48:40 +03:00
|
|
|
func (m *memoryMailBox) AckPacket(inKey CircuitKey) bool {
|
2018-02-16 00:34:12 +03:00
|
|
|
m.pktCond.L.Lock()
|
2020-04-14 20:49:48 +03:00
|
|
|
defer m.pktCond.L.Unlock()
|
|
|
|
|
2020-04-14 20:51:48 +03:00
|
|
|
if entry, ok := m.repIndex[inKey]; ok {
|
2020-04-14 20:49:48 +03:00
|
|
|
// Check whether we are removing the head of the queue. If so,
|
|
|
|
// we must advance the head to the next packet before removing.
|
|
|
|
// It's possible that the courier has already advanced the
|
2020-04-14 20:51:48 +03:00
|
|
|
// repHead, so this check prevents the repHead from getting
|
2020-04-14 20:49:48 +03:00
|
|
|
// desynchronized.
|
2020-04-14 20:51:48 +03:00
|
|
|
if entry == m.repHead {
|
|
|
|
m.repHead = entry.Next()
|
2020-04-14 20:49:48 +03:00
|
|
|
}
|
2020-04-14 20:51:48 +03:00
|
|
|
m.repPkts.Remove(entry)
|
|
|
|
delete(m.repIndex, inKey)
|
2020-04-14 20:49:48 +03:00
|
|
|
|
|
|
|
return true
|
2018-02-16 00:34:12 +03:00
|
|
|
}
|
|
|
|
|
2020-04-14 20:49:48 +03:00
|
|
|
if entry, ok := m.addIndex[inKey]; ok {
|
|
|
|
// Check whether we are removing the head of the queue. If so,
|
|
|
|
// we must advance the head to the next add before removing.
|
|
|
|
// It's possible that the courier has already advanced the
|
|
|
|
// addHead, so this check prevents the addHead from getting
|
|
|
|
// desynchronized.
|
2020-04-14 20:50:07 +03:00
|
|
|
//
|
|
|
|
// NOTE: While this event is rare for Settles or Fails, it could
|
|
|
|
// be very common for Adds since the mailbox has the ability to
|
|
|
|
// cancel Adds before they are delivered. When that occurs, the
|
|
|
|
// head of addPkts has only been peeked and we expect to be
|
|
|
|
// removing the head of the queue.
|
2020-04-14 20:49:48 +03:00
|
|
|
if entry == m.addHead {
|
|
|
|
m.addHead = entry.Next()
|
|
|
|
}
|
2020-04-14 20:50:07 +03:00
|
|
|
|
2020-04-14 20:49:48 +03:00
|
|
|
m.addPkts.Remove(entry)
|
|
|
|
delete(m.addIndex, inKey)
|
|
|
|
|
|
|
|
return true
|
2020-04-14 20:48:58 +03:00
|
|
|
}
|
2020-04-14 20:48:40 +03:00
|
|
|
|
2020-04-14 20:49:48 +03:00
|
|
|
return false
|
2018-02-16 00:34:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// HasPacket queries the packets for a circuit key, this is used to drop packets
|
|
|
|
// bound for the switch that already have a queued response.
|
|
|
|
func (m *memoryMailBox) HasPacket(inKey CircuitKey) bool {
|
|
|
|
m.pktCond.L.Lock()
|
2020-04-14 20:51:48 +03:00
|
|
|
_, ok := m.repIndex[inKey]
|
2018-02-16 00:34:12 +03:00
|
|
|
m.pktCond.L.Unlock()
|
|
|
|
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2017-11-11 01:38:38 +03:00
|
|
|
// Stop signals the mailbox and its goroutines for a graceful shutdown.
|
|
|
|
//
|
2018-02-16 00:34:12 +03:00
|
|
|
// NOTE: This method is part of the MailBox interface.
|
2020-04-07 21:55:54 +03:00
|
|
|
func (m *memoryMailBox) Stop() {
|
2019-06-06 20:15:11 +03:00
|
|
|
m.stopped.Do(func() {
|
|
|
|
close(m.quit)
|
2017-11-11 01:38:38 +03:00
|
|
|
|
2020-04-14 20:48:06 +03:00
|
|
|
m.signalUntilShutdown(wireCourier)
|
|
|
|
m.signalUntilShutdown(pktCourier)
|
2019-06-06 20:15:11 +03:00
|
|
|
})
|
2017-11-11 01:38:38 +03:00
|
|
|
}
|
|
|
|
|
2020-04-14 20:48:06 +03:00
|
|
|
// signalUntilShutdown strobes the condition variable of the passed courier
|
|
|
|
// type, blocking until the worker has exited.
|
|
|
|
func (m *memoryMailBox) signalUntilShutdown(cType courierType) {
|
|
|
|
var (
|
|
|
|
cond *sync.Cond
|
|
|
|
shutdown chan struct{}
|
|
|
|
)
|
|
|
|
|
|
|
|
switch cType {
|
|
|
|
case wireCourier:
|
|
|
|
cond = m.wireCond
|
|
|
|
shutdown = m.wireShutdown
|
|
|
|
case pktCourier:
|
|
|
|
cond = m.pktCond
|
|
|
|
shutdown = m.pktShutdown
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-time.After(time.Millisecond):
|
|
|
|
cond.Signal()
|
|
|
|
case <-shutdown:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-14 20:50:07 +03:00
|
|
|
// pktWithExpiry wraps an incoming packet and records the time at which it it
|
|
|
|
// should be canceled from the mailbox. This will be used to detect if it gets
|
|
|
|
// stuck in the mailbox and inform when to cancel back.
|
|
|
|
type pktWithExpiry struct {
|
|
|
|
pkt *htlcPacket
|
|
|
|
expiry time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *pktWithExpiry) deadline(clock clock.Clock) <-chan time.Time {
|
|
|
|
return clock.TickAfter(p.expiry.Sub(clock.Now()))
|
|
|
|
}
|
|
|
|
|
2017-11-11 01:38:38 +03:00
|
|
|
// mailCourier is a dedicated goroutine whose job is to reliably deliver
|
|
|
|
// messages of a particular type. There are two types of couriers: wire
|
|
|
|
// couriers, and mail couriers. Depending on the passed courierType, this
|
|
|
|
// goroutine will assume one of two roles.
|
|
|
|
func (m *memoryMailBox) mailCourier(cType courierType) {
|
2020-04-14 20:48:06 +03:00
|
|
|
switch cType {
|
|
|
|
case wireCourier:
|
|
|
|
defer close(m.wireShutdown)
|
|
|
|
case pktCourier:
|
|
|
|
defer close(m.pktShutdown)
|
|
|
|
}
|
2017-11-11 01:38:38 +03:00
|
|
|
|
|
|
|
// TODO(roasbeef): refactor...
|
|
|
|
|
|
|
|
for {
|
|
|
|
// First, we'll check our condition. If our target mailbox is
|
|
|
|
// empty, then we'll wait until a new item is added.
|
|
|
|
switch cType {
|
|
|
|
case wireCourier:
|
|
|
|
m.wireCond.L.Lock()
|
2018-02-16 00:34:12 +03:00
|
|
|
for m.wireMessages.Front() == nil {
|
2017-11-11 01:38:38 +03:00
|
|
|
m.wireCond.Wait()
|
|
|
|
|
|
|
|
select {
|
2018-02-16 00:34:12 +03:00
|
|
|
case msgDone := <-m.msgReset:
|
|
|
|
m.wireMessages.Init()
|
2018-03-13 04:52:52 +03:00
|
|
|
|
2018-02-16 00:34:12 +03:00
|
|
|
close(msgDone)
|
2017-11-11 01:38:38 +03:00
|
|
|
case <-m.quit:
|
|
|
|
m.wireCond.L.Unlock()
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
case pktCourier:
|
|
|
|
m.pktCond.L.Lock()
|
2020-04-14 20:51:48 +03:00
|
|
|
for m.repHead == nil && m.addHead == nil {
|
2017-11-11 01:38:38 +03:00
|
|
|
m.pktCond.Wait()
|
|
|
|
|
|
|
|
select {
|
2018-03-13 04:52:52 +03:00
|
|
|
// Resetting the packet queue means just moving
|
|
|
|
// our pointer to the front. This ensures that
|
|
|
|
// any un-ACK'd messages are re-delivered upon
|
|
|
|
// reconnect.
|
2018-02-16 00:34:12 +03:00
|
|
|
case pktDone := <-m.pktReset:
|
2020-04-14 20:51:48 +03:00
|
|
|
m.repHead = m.repPkts.Front()
|
2020-04-14 20:49:48 +03:00
|
|
|
m.addHead = m.addPkts.Front()
|
2018-03-13 04:52:52 +03:00
|
|
|
|
2018-02-16 00:34:12 +03:00
|
|
|
close(pktDone)
|
2020-04-14 20:50:07 +03:00
|
|
|
|
2017-11-11 01:38:38 +03:00
|
|
|
case <-m.quit:
|
|
|
|
m.pktCond.L.Unlock()
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
2020-04-14 20:51:48 +03:00
|
|
|
nextRep *htlcPacket
|
|
|
|
nextRepEl *list.Element
|
2020-04-14 20:50:07 +03:00
|
|
|
nextAdd *pktWithExpiry
|
2020-04-14 20:49:48 +03:00
|
|
|
nextAddEl *list.Element
|
2020-04-14 20:48:58 +03:00
|
|
|
nextMsg lnwire.Message
|
2017-11-11 01:38:38 +03:00
|
|
|
)
|
|
|
|
switch cType {
|
2018-03-13 04:52:52 +03:00
|
|
|
// Grab the datum off the front of the queue, shifting the
|
|
|
|
// slice's reference down one in order to remove the datum from
|
|
|
|
// the queue.
|
2017-11-11 01:38:38 +03:00
|
|
|
case wireCourier:
|
2018-02-16 00:34:12 +03:00
|
|
|
entry := m.wireMessages.Front()
|
|
|
|
nextMsg = m.wireMessages.Remove(entry).(lnwire.Message)
|
2018-03-13 04:52:52 +03:00
|
|
|
|
|
|
|
// For packets, we actually never remove an item until it has
|
|
|
|
// been ACK'd by the link. This ensures that if a read packet
|
|
|
|
// doesn't make it into a commitment, then it'll be
|
|
|
|
// re-delivered once the link comes back online.
|
2017-11-11 01:38:38 +03:00
|
|
|
case pktCourier:
|
2020-04-14 20:50:07 +03:00
|
|
|
// Peek at the head of the Settle/Fails and Add queues.
|
|
|
|
// We peak both even if there is a Settle/Fail present
|
|
|
|
// because we need to set a deadline for the next
|
|
|
|
// pending Add if it's present. Due to clock
|
|
|
|
// monotonicity, we know that the head of the Adds is
|
|
|
|
// the next to expire.
|
2020-04-14 20:51:48 +03:00
|
|
|
if m.repHead != nil {
|
|
|
|
nextRep = m.repHead.Value.(*htlcPacket)
|
|
|
|
nextRepEl = m.repHead
|
2020-04-14 20:50:07 +03:00
|
|
|
}
|
|
|
|
if m.addHead != nil {
|
|
|
|
nextAdd = m.addHead.Value.(*pktWithExpiry)
|
2020-04-14 20:49:48 +03:00
|
|
|
nextAddEl = m.addHead
|
|
|
|
}
|
2017-11-11 01:38:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we're done with the condition, we can unlock it to
|
|
|
|
// allow any callers to append to the end of our target queue.
|
|
|
|
switch cType {
|
|
|
|
case wireCourier:
|
|
|
|
m.wireCond.L.Unlock()
|
|
|
|
case pktCourier:
|
|
|
|
m.pktCond.L.Unlock()
|
|
|
|
}
|
|
|
|
|
2018-03-13 04:52:52 +03:00
|
|
|
// With the next message obtained, we'll now select to attempt
|
2017-11-11 01:38:38 +03:00
|
|
|
// to deliver the message. If we receive a kill signal, then
|
|
|
|
// we'll bail out.
|
|
|
|
switch cType {
|
|
|
|
case wireCourier:
|
|
|
|
select {
|
|
|
|
case m.messageOutbox <- nextMsg:
|
2018-02-16 00:34:12 +03:00
|
|
|
case msgDone := <-m.msgReset:
|
|
|
|
m.wireCond.L.Lock()
|
|
|
|
m.wireMessages.Init()
|
|
|
|
m.wireCond.L.Unlock()
|
2018-03-13 04:52:52 +03:00
|
|
|
|
2018-02-16 00:34:12 +03:00
|
|
|
close(msgDone)
|
2017-11-11 01:38:38 +03:00
|
|
|
case <-m.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
case pktCourier:
|
2020-04-14 20:49:48 +03:00
|
|
|
var (
|
|
|
|
pktOutbox chan *htlcPacket
|
|
|
|
addOutbox chan *htlcPacket
|
2020-04-14 20:50:07 +03:00
|
|
|
add *htlcPacket
|
|
|
|
deadline <-chan time.Time
|
2020-04-14 20:49:48 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// Prioritize delivery of Settle/Fail packets over Adds.
|
|
|
|
// This ensures that we actively clear the commitment of
|
|
|
|
// existing HTLCs before trying to add new ones. This
|
|
|
|
// can help to improve forwarding performance since the
|
|
|
|
// time to sign a commitment is linear in the number of
|
|
|
|
// HTLCs manifested on the commitments.
|
|
|
|
//
|
|
|
|
// NOTE: Both types are eventually delivered over the
|
|
|
|
// same channel, but we can control which is delivered
|
|
|
|
// by exclusively making one nil and the other non-nil.
|
|
|
|
// We know from our loop condition that at least one
|
2020-04-14 20:51:48 +03:00
|
|
|
// nextRep and nextAdd are non-nil.
|
|
|
|
if nextRep != nil {
|
2020-04-14 20:49:48 +03:00
|
|
|
pktOutbox = m.pktOutbox
|
|
|
|
} else {
|
|
|
|
addOutbox = m.pktOutbox
|
|
|
|
}
|
|
|
|
|
2020-04-14 20:50:07 +03:00
|
|
|
// If we have a pending Add, we'll also construct the
|
|
|
|
// deadline so we can fail it back if we are unable to
|
|
|
|
// deliver any message in time. We also dereference the
|
|
|
|
// nextAdd's packet, since we will need access to it in
|
|
|
|
// the case we are delivering it and/or if the deadline
|
|
|
|
// expires.
|
|
|
|
//
|
|
|
|
// NOTE: It's possible after this point for add to be
|
|
|
|
// nil, but this can only occur when addOutbox is also
|
|
|
|
// nil, hence we won't accidentally deliver a nil
|
|
|
|
// packet.
|
|
|
|
if nextAdd != nil {
|
|
|
|
add = nextAdd.pkt
|
|
|
|
deadline = nextAdd.deadline(m.cfg.clock)
|
|
|
|
}
|
|
|
|
|
2017-11-11 01:38:38 +03:00
|
|
|
select {
|
2020-04-14 20:51:48 +03:00
|
|
|
case pktOutbox <- nextRep:
|
2020-04-14 20:48:58 +03:00
|
|
|
m.pktCond.L.Lock()
|
2020-04-14 20:51:48 +03:00
|
|
|
// Only advance the repHead if this Settle or
|
2020-04-14 20:49:48 +03:00
|
|
|
// Fail is still at the head of the queue.
|
2020-04-14 20:51:48 +03:00
|
|
|
if m.repHead != nil && m.repHead == nextRepEl {
|
|
|
|
m.repHead = m.repHead.Next()
|
2020-04-14 20:48:58 +03:00
|
|
|
}
|
|
|
|
m.pktCond.L.Unlock()
|
|
|
|
|
2020-04-14 20:50:07 +03:00
|
|
|
case addOutbox <- add:
|
2020-04-14 20:49:48 +03:00
|
|
|
m.pktCond.L.Lock()
|
|
|
|
// Only advance the addHead if this Add is still
|
|
|
|
// at the head of the queue.
|
|
|
|
if m.addHead != nil && m.addHead == nextAddEl {
|
|
|
|
m.addHead = m.addHead.Next()
|
|
|
|
}
|
|
|
|
m.pktCond.L.Unlock()
|
|
|
|
|
2020-04-14 20:50:07 +03:00
|
|
|
case <-deadline:
|
|
|
|
m.FailAdd(add)
|
|
|
|
|
2018-02-16 00:34:12 +03:00
|
|
|
case pktDone := <-m.pktReset:
|
|
|
|
m.pktCond.L.Lock()
|
2020-04-14 20:51:48 +03:00
|
|
|
m.repHead = m.repPkts.Front()
|
2020-04-14 20:49:48 +03:00
|
|
|
m.addHead = m.addPkts.Front()
|
2018-02-16 00:34:12 +03:00
|
|
|
m.pktCond.L.Unlock()
|
2018-03-13 04:52:52 +03:00
|
|
|
|
2018-02-16 00:34:12 +03:00
|
|
|
close(pktDone)
|
2020-04-14 20:49:48 +03:00
|
|
|
|
2017-11-11 01:38:38 +03:00
|
|
|
case <-m.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddMessage appends a new message to the end of the message queue.
|
|
|
|
//
|
2018-02-16 00:34:12 +03:00
|
|
|
// NOTE: This method is safe for concrete use and part of the MailBox
|
2017-11-11 01:38:38 +03:00
|
|
|
// interface.
|
|
|
|
func (m *memoryMailBox) AddMessage(msg lnwire.Message) error {
|
|
|
|
// First, we'll lock the condition, and add the message to the end of
|
|
|
|
// the wire message inbox.
|
|
|
|
m.wireCond.L.Lock()
|
2018-02-16 00:34:12 +03:00
|
|
|
m.wireMessages.PushBack(msg)
|
2017-11-11 01:38:38 +03:00
|
|
|
m.wireCond.L.Unlock()
|
|
|
|
|
|
|
|
// With the message added, we signal to the mailCourier that there are
|
|
|
|
// additional messages to deliver.
|
|
|
|
m.wireCond.Signal()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// AddPacket appends a new message to the end of the packet queue.
|
|
|
|
//
|
2018-02-16 00:34:12 +03:00
|
|
|
// NOTE: This method is safe for concrete use and part of the MailBox
|
2017-11-11 01:38:38 +03:00
|
|
|
// interface.
|
|
|
|
func (m *memoryMailBox) AddPacket(pkt *htlcPacket) error {
|
|
|
|
m.pktCond.L.Lock()
|
2020-04-14 20:49:48 +03:00
|
|
|
switch htlc := pkt.htlc.(type) {
|
|
|
|
|
2020-04-14 20:51:48 +03:00
|
|
|
// Split off Settle/Fail packets into the repPkts queue.
|
2020-04-14 20:49:48 +03:00
|
|
|
case *lnwire.UpdateFulfillHTLC, *lnwire.UpdateFailHTLC:
|
2020-04-14 20:51:48 +03:00
|
|
|
if _, ok := m.repIndex[pkt.inKey()]; ok {
|
2020-04-14 20:49:48 +03:00
|
|
|
m.pktCond.L.Unlock()
|
2020-04-14 20:50:25 +03:00
|
|
|
return ErrPacketAlreadyExists
|
2020-04-14 20:49:48 +03:00
|
|
|
}
|
|
|
|
|
2020-04-14 20:51:48 +03:00
|
|
|
entry := m.repPkts.PushBack(pkt)
|
|
|
|
m.repIndex[pkt.inKey()] = entry
|
|
|
|
if m.repHead == nil {
|
|
|
|
m.repHead = entry
|
2020-04-14 20:49:48 +03:00
|
|
|
}
|
2018-02-16 00:34:12 +03:00
|
|
|
|
2020-04-14 20:49:48 +03:00
|
|
|
// Split off Add packets into the addPkts queue.
|
|
|
|
case *lnwire.UpdateAddHTLC:
|
|
|
|
if _, ok := m.addIndex[pkt.inKey()]; ok {
|
|
|
|
m.pktCond.L.Unlock()
|
2020-04-14 20:50:25 +03:00
|
|
|
return ErrPacketAlreadyExists
|
2020-04-14 20:49:48 +03:00
|
|
|
}
|
|
|
|
|
2020-04-14 20:50:07 +03:00
|
|
|
entry := m.addPkts.PushBack(&pktWithExpiry{
|
|
|
|
pkt: pkt,
|
|
|
|
expiry: m.cfg.clock.Now().Add(m.cfg.expiry),
|
|
|
|
})
|
2020-04-14 20:49:48 +03:00
|
|
|
m.addIndex[pkt.inKey()] = entry
|
|
|
|
if m.addHead == nil {
|
|
|
|
m.addHead = entry
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
m.pktCond.L.Unlock()
|
|
|
|
return fmt.Errorf("unknown htlc type: %T", htlc)
|
2018-02-16 00:34:12 +03:00
|
|
|
}
|
2017-11-11 01:38:38 +03:00
|
|
|
m.pktCond.L.Unlock()
|
|
|
|
|
|
|
|
// With the packet added, we signal to the mailCourier that there are
|
|
|
|
// additional packets to consume.
|
|
|
|
m.pktCond.Signal()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-14 20:48:40 +03:00
|
|
|
// FailAdd fails an UpdateAddHTLC that exists within the mailbox, removing it
|
|
|
|
// from the in-memory replay buffer. This will prevent the packet from being
|
|
|
|
// delivered after the link restarts if the switch has remained online. The
|
|
|
|
// generated LinkError will show an OutgoingFailureDownstreamHtlcAdd
|
|
|
|
// FailureDetail.
|
|
|
|
func (m *memoryMailBox) FailAdd(pkt *htlcPacket) {
|
|
|
|
// First, remove the packet from mailbox. If we didn't find the packet
|
|
|
|
// because it has already been acked, we'll exit early to avoid sending
|
|
|
|
// a duplicate fail message through the switch.
|
|
|
|
if !m.AckPacket(pkt.inKey()) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
localFailure = false
|
|
|
|
reason lnwire.OpaqueReason
|
|
|
|
)
|
|
|
|
|
|
|
|
// Create a temporary channel failure which we will send back to our
|
|
|
|
// peer if this is a forward, or report to the user if the failed
|
|
|
|
// payment was locally initiated.
|
|
|
|
var failure lnwire.FailureMessage
|
|
|
|
update, err := m.cfg.fetchUpdate(m.cfg.shortChanID)
|
|
|
|
if err != nil {
|
|
|
|
failure = &lnwire.FailTemporaryNodeFailure{}
|
|
|
|
} else {
|
|
|
|
failure = lnwire.NewTemporaryChannelFailure(update)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the payment was locally initiated (which is indicated by a nil
|
|
|
|
// obfuscator), we do not need to encrypt it back to the sender.
|
|
|
|
if pkt.obfuscator == nil {
|
|
|
|
var b bytes.Buffer
|
|
|
|
err := lnwire.EncodeFailure(&b, failure, 0)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("Unable to encode failure: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
reason = lnwire.OpaqueReason(b.Bytes())
|
|
|
|
localFailure = true
|
|
|
|
} else {
|
|
|
|
// If the packet is part of a forward, (identified by a non-nil
|
|
|
|
// obfuscator) we need to encrypt the error back to the source.
|
|
|
|
var err error
|
|
|
|
reason, err = pkt.obfuscator.EncryptFirstHop(failure)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("Unable to obfuscate error: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a link error containing the temporary channel failure and a
|
|
|
|
// detail which indicates the we failed to add the htlc.
|
|
|
|
linkError := NewDetailedLinkError(
|
|
|
|
failure, OutgoingFailureDownstreamHtlcAdd,
|
|
|
|
)
|
|
|
|
|
|
|
|
failPkt := &htlcPacket{
|
|
|
|
incomingChanID: pkt.incomingChanID,
|
|
|
|
incomingHTLCID: pkt.incomingHTLCID,
|
|
|
|
circuit: pkt.circuit,
|
|
|
|
sourceRef: pkt.sourceRef,
|
|
|
|
hasSource: true,
|
|
|
|
localFailure: localFailure,
|
|
|
|
linkFailure: linkError,
|
|
|
|
htlc: &lnwire.UpdateFailHTLC{
|
|
|
|
Reason: reason,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-05-19 12:13:02 +03:00
|
|
|
if err := m.cfg.forwardPackets(m.quit, failPkt); err != nil {
|
|
|
|
log.Errorf("Unhandled error while reforwarding packets "+
|
|
|
|
"settle/fail over htlcswitch: %v", err)
|
|
|
|
}
|
2020-04-14 20:48:40 +03:00
|
|
|
}
|
|
|
|
|
2017-11-11 01:38:38 +03:00
|
|
|
// MessageOutBox returns a channel that any new messages ready for delivery
|
|
|
|
// will be sent on.
|
|
|
|
//
|
2018-02-16 00:34:12 +03:00
|
|
|
// NOTE: This method is part of the MailBox interface.
|
2017-11-11 01:38:38 +03:00
|
|
|
func (m *memoryMailBox) MessageOutBox() chan lnwire.Message {
|
|
|
|
return m.messageOutbox
|
|
|
|
}
|
|
|
|
|
|
|
|
// PacketOutBox returns a channel that any new packets ready for delivery will
|
|
|
|
// be sent on.
|
|
|
|
//
|
2018-02-16 00:34:12 +03:00
|
|
|
// NOTE: This method is part of the MailBox interface.
|
2017-11-11 01:38:38 +03:00
|
|
|
func (m *memoryMailBox) PacketOutBox() chan *htlcPacket {
|
|
|
|
return m.pktOutbox
|
|
|
|
}
|
2018-05-02 05:01:39 +03:00
|
|
|
|
|
|
|
// mailOrchestrator is responsible for coordinating the creation and lifecycle
|
|
|
|
// of mailboxes used within the switch. It supports the ability to create
|
|
|
|
// mailboxes, reassign their short channel id's, deliver htlc packets, and
|
|
|
|
// queue packets for mailboxes that have not been created due to a link's late
|
|
|
|
// registration.
|
|
|
|
type mailOrchestrator struct {
|
|
|
|
mu sync.RWMutex
|
|
|
|
|
2020-04-14 20:48:40 +03:00
|
|
|
cfg *mailOrchConfig
|
|
|
|
|
2018-05-02 05:01:39 +03:00
|
|
|
// mailboxes caches exactly one mailbox for all known channels.
|
|
|
|
mailboxes map[lnwire.ChannelID]MailBox
|
|
|
|
|
|
|
|
// liveIndex maps a live short chan id to the primary mailbox key.
|
|
|
|
// An index in liveIndex map is only entered under two conditions:
|
|
|
|
// 1. A link has a non-zero short channel id at time of AddLink.
|
|
|
|
// 2. A link receives a non-zero short channel via UpdateShortChanID.
|
|
|
|
liveIndex map[lnwire.ShortChannelID]lnwire.ChannelID
|
|
|
|
|
|
|
|
// TODO(conner): add another pair of indexes:
|
|
|
|
// chan_id -> short_chan_id
|
|
|
|
// short_chan_id -> mailbox
|
|
|
|
// so that Deliver can lookup mailbox directly once live,
|
|
|
|
// but still queriable by channel_id.
|
|
|
|
|
|
|
|
// unclaimedPackets maps a live short chan id to queue of packets if no
|
|
|
|
// mailbox has been created.
|
|
|
|
unclaimedPackets map[lnwire.ShortChannelID][]*htlcPacket
|
|
|
|
}
|
|
|
|
|
2020-04-14 20:48:40 +03:00
|
|
|
type mailOrchConfig struct {
|
|
|
|
// forwardPackets send a varidic number of htlcPackets to the switch to
|
|
|
|
// be routed. A quit channel should be provided so that the call can
|
|
|
|
// properly exit during shutdown.
|
2020-05-19 12:13:02 +03:00
|
|
|
forwardPackets func(chan struct{}, ...*htlcPacket) error
|
2020-04-14 20:48:40 +03:00
|
|
|
|
|
|
|
// fetchUpdate retreives the most recent channel update for the channel
|
|
|
|
// this mailbox belongs to.
|
|
|
|
fetchUpdate func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error)
|
2020-04-14 20:49:26 +03:00
|
|
|
|
|
|
|
// clock is a time source for the generated mailboxes.
|
|
|
|
clock clock.Clock
|
|
|
|
|
|
|
|
// expiry is the interval after which Adds will be cancelled if they
|
|
|
|
// have not been yet been delivered. The computed deadline will expiry
|
|
|
|
// this long after the Adds are added to a mailbox via AddPacket.
|
|
|
|
expiry time.Duration
|
2020-04-14 20:48:40 +03:00
|
|
|
}
|
|
|
|
|
2018-05-02 05:01:39 +03:00
|
|
|
// newMailOrchestrator initializes a fresh mailOrchestrator.
|
2020-04-14 20:48:40 +03:00
|
|
|
func newMailOrchestrator(cfg *mailOrchConfig) *mailOrchestrator {
|
2018-05-02 05:01:39 +03:00
|
|
|
return &mailOrchestrator{
|
2020-04-14 20:48:40 +03:00
|
|
|
cfg: cfg,
|
2018-05-02 05:01:39 +03:00
|
|
|
mailboxes: make(map[lnwire.ChannelID]MailBox),
|
|
|
|
liveIndex: make(map[lnwire.ShortChannelID]lnwire.ChannelID),
|
|
|
|
unclaimedPackets: make(map[lnwire.ShortChannelID][]*htlcPacket),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop instructs the orchestrator to stop all active mailboxes.
|
|
|
|
func (mo *mailOrchestrator) Stop() {
|
|
|
|
for _, mailbox := range mo.mailboxes {
|
|
|
|
mailbox.Stop()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetOrCreateMailBox returns an existing mailbox belonging to `chanID`, or
|
|
|
|
// creates and returns a new mailbox if none is found.
|
2020-04-14 20:48:40 +03:00
|
|
|
func (mo *mailOrchestrator) GetOrCreateMailBox(chanID lnwire.ChannelID,
|
|
|
|
shortChanID lnwire.ShortChannelID) MailBox {
|
|
|
|
|
2018-05-02 05:01:39 +03:00
|
|
|
// First, try lookup the mailbox directly using only the shared mutex.
|
|
|
|
mo.mu.RLock()
|
|
|
|
mailbox, ok := mo.mailboxes[chanID]
|
|
|
|
if ok {
|
|
|
|
mo.mu.RUnlock()
|
|
|
|
return mailbox
|
|
|
|
}
|
|
|
|
mo.mu.RUnlock()
|
|
|
|
|
|
|
|
// Otherwise, we will try again with exclusive lock, creating a mailbox
|
|
|
|
// if one still has not been created.
|
|
|
|
mo.mu.Lock()
|
2020-04-14 20:48:40 +03:00
|
|
|
mailbox = mo.exclusiveGetOrCreateMailBox(chanID, shortChanID)
|
2018-05-02 05:01:39 +03:00
|
|
|
mo.mu.Unlock()
|
|
|
|
|
|
|
|
return mailbox
|
|
|
|
}
|
|
|
|
|
|
|
|
// exclusiveGetOrCreateMailBox checks for the existence of a mailbox for the
|
|
|
|
// given channel id. If none is found, a new one is creates, started, and
|
|
|
|
// recorded.
|
|
|
|
//
|
|
|
|
// NOTE: This method MUST be invoked with the mailOrchestrator's exclusive lock.
|
|
|
|
func (mo *mailOrchestrator) exclusiveGetOrCreateMailBox(
|
2020-04-14 20:48:40 +03:00
|
|
|
chanID lnwire.ChannelID, shortChanID lnwire.ShortChannelID) MailBox {
|
2018-05-02 05:01:39 +03:00
|
|
|
|
|
|
|
mailbox, ok := mo.mailboxes[chanID]
|
|
|
|
if !ok {
|
2020-04-14 20:48:40 +03:00
|
|
|
mailbox = newMemoryMailBox(&mailBoxConfig{
|
|
|
|
shortChanID: shortChanID,
|
|
|
|
fetchUpdate: mo.cfg.fetchUpdate,
|
|
|
|
forwardPackets: mo.cfg.forwardPackets,
|
2020-04-14 20:49:26 +03:00
|
|
|
clock: mo.cfg.clock,
|
|
|
|
expiry: mo.cfg.expiry,
|
2020-04-14 20:48:40 +03:00
|
|
|
})
|
2018-05-02 05:01:39 +03:00
|
|
|
mailbox.Start()
|
|
|
|
mo.mailboxes[chanID] = mailbox
|
|
|
|
}
|
|
|
|
|
|
|
|
return mailbox
|
|
|
|
}
|
|
|
|
|
|
|
|
// BindLiveShortChanID registers that messages bound for a particular short
|
|
|
|
// channel id should be forwarded to the mailbox corresponding to the given
|
|
|
|
// channel id. This method also checks to see if there are any unclaimed
|
|
|
|
// packets for this short_chan_id. If any are found, they are delivered to the
|
|
|
|
// mailbox and removed (marked as claimed).
|
|
|
|
func (mo *mailOrchestrator) BindLiveShortChanID(mailbox MailBox,
|
|
|
|
cid lnwire.ChannelID, sid lnwire.ShortChannelID) {
|
|
|
|
|
|
|
|
mo.mu.Lock()
|
|
|
|
// Update the mapping from short channel id to mailbox's channel id.
|
|
|
|
mo.liveIndex[sid] = cid
|
|
|
|
|
|
|
|
// Retrieve any unclaimed packets destined for this mailbox.
|
|
|
|
pkts := mo.unclaimedPackets[sid]
|
|
|
|
delete(mo.unclaimedPackets, sid)
|
|
|
|
mo.mu.Unlock()
|
|
|
|
|
|
|
|
// Deliver the unclaimed packets.
|
|
|
|
for _, pkt := range pkts {
|
|
|
|
mailbox.AddPacket(pkt)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deliver lookups the target mailbox using the live index from short_chan_id
|
|
|
|
// to channel_id. If the mailbox is found, the message is delivered directly.
|
|
|
|
// Otherwise the packet is recorded as unclaimed, and will be delivered to the
|
|
|
|
// mailbox upon the subsequent call to BindLiveShortChanID.
|
|
|
|
func (mo *mailOrchestrator) Deliver(
|
|
|
|
sid lnwire.ShortChannelID, pkt *htlcPacket) error {
|
|
|
|
|
|
|
|
var (
|
|
|
|
mailbox MailBox
|
|
|
|
found bool
|
|
|
|
)
|
|
|
|
|
|
|
|
// First, try to find the channel id for the target short_chan_id. If
|
|
|
|
// the link is live, we will also look up the created mailbox.
|
|
|
|
mo.mu.RLock()
|
|
|
|
chanID, isLive := mo.liveIndex[sid]
|
|
|
|
if isLive {
|
|
|
|
mailbox, found = mo.mailboxes[chanID]
|
|
|
|
}
|
|
|
|
mo.mu.RUnlock()
|
|
|
|
|
|
|
|
// The link is live and target mailbox was found, deliver immediately.
|
|
|
|
if isLive && found {
|
|
|
|
return mailbox.AddPacket(pkt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we detected that the link has not been made live, we will acquire
|
|
|
|
// the exclusive lock preemptively in order to queue this packet in the
|
|
|
|
// list of unclaimed packets.
|
|
|
|
mo.mu.Lock()
|
|
|
|
|
|
|
|
// Double check to see if the mailbox has been not made live since the
|
|
|
|
// release of the shared lock.
|
|
|
|
//
|
|
|
|
// NOTE: Checking again with the exclusive lock held prevents a race
|
|
|
|
// condition where BindLiveShortChanID is interleaved between the
|
|
|
|
// release of the shared lock, and acquiring the exclusive lock. The
|
|
|
|
// result would be stuck packets, as they wouldn't be redelivered until
|
|
|
|
// the next call to BindLiveShortChanID, which is expected to occur
|
|
|
|
// infrequently.
|
|
|
|
chanID, isLive = mo.liveIndex[sid]
|
|
|
|
if isLive {
|
|
|
|
// Reaching this point indicates the mailbox is actually live.
|
|
|
|
// We'll try to load the mailbox using the fresh channel id.
|
|
|
|
//
|
|
|
|
// NOTE: This should never create a new mailbox, as the live
|
|
|
|
// index should only be set if the mailbox had been initialized
|
|
|
|
// beforehand. However, this does ensure that this case is
|
|
|
|
// handled properly in the event that it could happen.
|
2020-04-14 20:48:40 +03:00
|
|
|
mailbox = mo.exclusiveGetOrCreateMailBox(chanID, sid)
|
2018-05-02 05:01:39 +03:00
|
|
|
mo.mu.Unlock()
|
|
|
|
|
|
|
|
// Deliver the packet to the mailbox if it was found or created.
|
|
|
|
return mailbox.AddPacket(pkt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, if the channel id is still not found in the live index,
|
|
|
|
// we'll add this to the list of unclaimed packets. These will be
|
|
|
|
// delivered upon the next call to BindLiveShortChanID.
|
|
|
|
mo.unclaimedPackets[sid] = append(mo.unclaimedPackets[sid], pkt)
|
|
|
|
mo.mu.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|