2017-05-24 17:34:35 +03:00
|
|
|
package htlcswitch
|
|
|
|
|
|
|
|
import (
|
|
|
|
"sync"
|
2017-09-25 22:47:28 +03:00
|
|
|
"sync/atomic"
|
2017-11-11 01:52:27 +03:00
|
|
|
|
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
2017-05-24 17:34:35 +03:00
|
|
|
)
|
|
|
|
|
2018-04-18 05:02:04 +03:00
|
|
|
// packetQueue is a goroutine-safe queue of htlc packets which over flow the
|
2017-09-23 01:54:10 +03:00
|
|
|
// current commitment transaction. An HTLC will overflow the current commitment
|
2017-09-25 22:47:28 +03:00
|
|
|
// transaction if one attempts to add a new HTLC to the state machine which
|
|
|
|
// already has the max number of pending HTLC's present on the commitment
|
|
|
|
// transaction. Packets are removed from the queue by the channelLink itself
|
|
|
|
// as additional slots become available on the commitment transaction itself.
|
|
|
|
// In order to synchronize properly we use a semaphore to allow the channelLink
|
|
|
|
// to signal the number of slots available, and a condition variable to allow
|
|
|
|
// the packetQueue to know when new items have been added to the queue.
|
2017-05-24 17:34:35 +03:00
|
|
|
type packetQueue struct {
|
htlcswitch: fix alignment of the packetQueue's fields for 32-bit systems (#507)
In this commit, we fix an existing issue that would cause lnd to panic
on 32-bit systems. Within the packetQueue we utilize atomics heavily.
However, it's the caller's job to ensure 64-bit alignment of 64-bit words
accessed atomically. This is documented within the sync/atomic package
as a set of known bugs.
The old alignment of this struct was:
⛰ structlayout github.com/lightningnetwork/lnd/htlcswitch packetQueue
packetQueue.queueLen int32: 0-4 (size 4, align 4)
padding: 4-8 (size 4, align 0)
packetQueue.totalHtlcAmt int64: 8-16 (size 8, align 8)
packetQueue.queueCond *sync.Cond: 16-24 (size 8, align 8)
packetQueue.queueMtx.state int32: 24-28 (size 4, align 4)
packetQueue.queueMtx.sema uint32: 28-32 (size 4, align 4)
packetQueue.queue []*github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 32-56 (size 24, align 8)
packetQueue.outgoingPkts chan *github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 56-64 (size 8, align 8)
packetQueue.freeSlots chan struct{}: 64-72 (size 8, align 8)
packetQueue.wg.noCopy sync.noCopy: 72-72 (size 0, align 1)
packetQueue.wg.state1 [12]byte: 72-84 (size 12, align 1)
packetQueue.wg.sema uint32: 84-88 (size 4, align 4)
packetQueue.quit chan struct{}: 88-96 (size 8, align 8)
After this commit, the new alignment of this sturct is:
⛰ structlayout -json github.com/lightningnetwork/lnd/htlcswitch packetQueue | structlayout-optimize
packetQueue.queue []*github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 0-24 (size 24, align 8)
packetQueue.wg struct: 24-40 (size 16, align 8)
packetQueue.freeSlots chan struct{}: 40-48 (size 8, align 8)
packetQueue.queueCond *sync.Cond: 48-56 (size 8, align 8)
packetQueue.queueMtx struct: 56-64 (size 8, align 8)
packetQueue.outgoingPkts chan *github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 64-72 (size 8, align 8)
packetQueue.totalHtlcAmt int64: 72-80 (size 8, align 8)
packetQueue.quit chan struct{}: 80-88 (size 8, align 8)
packetQueue.queueLen int32: 88-92 (size 4, align 8)
padding: 92-96 (size 4, align 0)
Fixes #505, and #463.
2017-12-22 18:32:11 +03:00
|
|
|
queue []*htlcPacket
|
2017-09-25 22:47:28 +03:00
|
|
|
|
htlcswitch: fix alignment of the packetQueue's fields for 32-bit systems (#507)
In this commit, we fix an existing issue that would cause lnd to panic
on 32-bit systems. Within the packetQueue we utilize atomics heavily.
However, it's the caller's job to ensure 64-bit alignment of 64-bit words
accessed atomically. This is documented within the sync/atomic package
as a set of known bugs.
The old alignment of this struct was:
⛰ structlayout github.com/lightningnetwork/lnd/htlcswitch packetQueue
packetQueue.queueLen int32: 0-4 (size 4, align 4)
padding: 4-8 (size 4, align 0)
packetQueue.totalHtlcAmt int64: 8-16 (size 8, align 8)
packetQueue.queueCond *sync.Cond: 16-24 (size 8, align 8)
packetQueue.queueMtx.state int32: 24-28 (size 4, align 4)
packetQueue.queueMtx.sema uint32: 28-32 (size 4, align 4)
packetQueue.queue []*github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 32-56 (size 24, align 8)
packetQueue.outgoingPkts chan *github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 56-64 (size 8, align 8)
packetQueue.freeSlots chan struct{}: 64-72 (size 8, align 8)
packetQueue.wg.noCopy sync.noCopy: 72-72 (size 0, align 1)
packetQueue.wg.state1 [12]byte: 72-84 (size 12, align 1)
packetQueue.wg.sema uint32: 84-88 (size 4, align 4)
packetQueue.quit chan struct{}: 88-96 (size 8, align 8)
After this commit, the new alignment of this sturct is:
⛰ structlayout -json github.com/lightningnetwork/lnd/htlcswitch packetQueue | structlayout-optimize
packetQueue.queue []*github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 0-24 (size 24, align 8)
packetQueue.wg struct: 24-40 (size 16, align 8)
packetQueue.freeSlots chan struct{}: 40-48 (size 8, align 8)
packetQueue.queueCond *sync.Cond: 48-56 (size 8, align 8)
packetQueue.queueMtx struct: 56-64 (size 8, align 8)
packetQueue.outgoingPkts chan *github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 64-72 (size 8, align 8)
packetQueue.totalHtlcAmt int64: 72-80 (size 8, align 8)
packetQueue.quit chan struct{}: 80-88 (size 8, align 8)
packetQueue.queueLen int32: 88-92 (size 4, align 8)
padding: 92-96 (size 4, align 0)
Fixes #505, and #463.
2017-12-22 18:32:11 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
|
|
|
|
// freeSlots serves as a semaphore who's current value signals the
|
|
|
|
// number of available slots on the commitment transaction.
|
|
|
|
freeSlots chan struct{}
|
2017-11-11 01:52:27 +03:00
|
|
|
|
2017-09-23 01:54:10 +03:00
|
|
|
queueCond *sync.Cond
|
|
|
|
queueMtx sync.Mutex
|
|
|
|
|
|
|
|
// outgoingPkts is a channel that the channelLink will receive on in
|
|
|
|
// order to drain the packetQueue as new slots become available on the
|
|
|
|
// commitment transaction.
|
|
|
|
outgoingPkts chan *htlcPacket
|
2017-05-24 17:34:35 +03:00
|
|
|
|
htlcswitch: fix alignment of the packetQueue's fields for 32-bit systems (#507)
In this commit, we fix an existing issue that would cause lnd to panic
on 32-bit systems. Within the packetQueue we utilize atomics heavily.
However, it's the caller's job to ensure 64-bit alignment of 64-bit words
accessed atomically. This is documented within the sync/atomic package
as a set of known bugs.
The old alignment of this struct was:
⛰ structlayout github.com/lightningnetwork/lnd/htlcswitch packetQueue
packetQueue.queueLen int32: 0-4 (size 4, align 4)
padding: 4-8 (size 4, align 0)
packetQueue.totalHtlcAmt int64: 8-16 (size 8, align 8)
packetQueue.queueCond *sync.Cond: 16-24 (size 8, align 8)
packetQueue.queueMtx.state int32: 24-28 (size 4, align 4)
packetQueue.queueMtx.sema uint32: 28-32 (size 4, align 4)
packetQueue.queue []*github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 32-56 (size 24, align 8)
packetQueue.outgoingPkts chan *github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 56-64 (size 8, align 8)
packetQueue.freeSlots chan struct{}: 64-72 (size 8, align 8)
packetQueue.wg.noCopy sync.noCopy: 72-72 (size 0, align 1)
packetQueue.wg.state1 [12]byte: 72-84 (size 12, align 1)
packetQueue.wg.sema uint32: 84-88 (size 4, align 4)
packetQueue.quit chan struct{}: 88-96 (size 8, align 8)
After this commit, the new alignment of this sturct is:
⛰ structlayout -json github.com/lightningnetwork/lnd/htlcswitch packetQueue | structlayout-optimize
packetQueue.queue []*github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 0-24 (size 24, align 8)
packetQueue.wg struct: 24-40 (size 16, align 8)
packetQueue.freeSlots chan struct{}: 40-48 (size 8, align 8)
packetQueue.queueCond *sync.Cond: 48-56 (size 8, align 8)
packetQueue.queueMtx struct: 56-64 (size 8, align 8)
packetQueue.outgoingPkts chan *github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 64-72 (size 8, align 8)
packetQueue.totalHtlcAmt int64: 72-80 (size 8, align 8)
packetQueue.quit chan struct{}: 80-88 (size 8, align 8)
packetQueue.queueLen int32: 88-92 (size 4, align 8)
padding: 92-96 (size 4, align 0)
Fixes #505, and #463.
2017-12-22 18:32:11 +03:00
|
|
|
// totalHtlcAmt is the sum of the value of all pending HTLC's currently
|
|
|
|
// residing within the overflow queue. This value should only read or
|
|
|
|
// modified *atomically*.
|
|
|
|
totalHtlcAmt int64
|
2017-05-24 17:34:35 +03:00
|
|
|
|
2017-09-23 01:54:10 +03:00
|
|
|
quit chan struct{}
|
htlcswitch: fix alignment of the packetQueue's fields for 32-bit systems (#507)
In this commit, we fix an existing issue that would cause lnd to panic
on 32-bit systems. Within the packetQueue we utilize atomics heavily.
However, it's the caller's job to ensure 64-bit alignment of 64-bit words
accessed atomically. This is documented within the sync/atomic package
as a set of known bugs.
The old alignment of this struct was:
⛰ structlayout github.com/lightningnetwork/lnd/htlcswitch packetQueue
packetQueue.queueLen int32: 0-4 (size 4, align 4)
padding: 4-8 (size 4, align 0)
packetQueue.totalHtlcAmt int64: 8-16 (size 8, align 8)
packetQueue.queueCond *sync.Cond: 16-24 (size 8, align 8)
packetQueue.queueMtx.state int32: 24-28 (size 4, align 4)
packetQueue.queueMtx.sema uint32: 28-32 (size 4, align 4)
packetQueue.queue []*github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 32-56 (size 24, align 8)
packetQueue.outgoingPkts chan *github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 56-64 (size 8, align 8)
packetQueue.freeSlots chan struct{}: 64-72 (size 8, align 8)
packetQueue.wg.noCopy sync.noCopy: 72-72 (size 0, align 1)
packetQueue.wg.state1 [12]byte: 72-84 (size 12, align 1)
packetQueue.wg.sema uint32: 84-88 (size 4, align 4)
packetQueue.quit chan struct{}: 88-96 (size 8, align 8)
After this commit, the new alignment of this sturct is:
⛰ structlayout -json github.com/lightningnetwork/lnd/htlcswitch packetQueue | structlayout-optimize
packetQueue.queue []*github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 0-24 (size 24, align 8)
packetQueue.wg struct: 24-40 (size 16, align 8)
packetQueue.freeSlots chan struct{}: 40-48 (size 8, align 8)
packetQueue.queueCond *sync.Cond: 48-56 (size 8, align 8)
packetQueue.queueMtx struct: 56-64 (size 8, align 8)
packetQueue.outgoingPkts chan *github.com/lightningnetwork/lnd/htlcswitch.htlcPacket: 64-72 (size 8, align 8)
packetQueue.totalHtlcAmt int64: 72-80 (size 8, align 8)
packetQueue.quit chan struct{}: 80-88 (size 8, align 8)
packetQueue.queueLen int32: 88-92 (size 4, align 8)
padding: 92-96 (size 4, align 0)
Fixes #505, and #463.
2017-12-22 18:32:11 +03:00
|
|
|
|
|
|
|
// queueLen is an internal counter that reflects the size of the queue
|
|
|
|
// at any given instance. This value is intended to be use atomically
|
|
|
|
// as this value is used by internal methods to obtain the length of
|
|
|
|
// the queue w/o grabbing the main lock. This allows callers to avoid a
|
|
|
|
// deadlock situation where the main goroutine is attempting a send
|
|
|
|
// with the lock held.
|
|
|
|
queueLen int32
|
2017-05-24 17:34:35 +03:00
|
|
|
}
|
|
|
|
|
2017-09-25 22:47:28 +03:00
|
|
|
// newPacketQueue returns a new instance of the packetQueue. The maxFreeSlots
|
|
|
|
// value should reflect the max number of HTLC's that we're allowed to have
|
|
|
|
// outstanding within the commitment transaction.
|
|
|
|
func newPacketQueue(maxFreeSlots int) *packetQueue {
|
2017-09-23 01:54:10 +03:00
|
|
|
p := &packetQueue{
|
|
|
|
outgoingPkts: make(chan *htlcPacket),
|
2017-09-25 22:47:28 +03:00
|
|
|
freeSlots: make(chan struct{}, maxFreeSlots),
|
|
|
|
quit: make(chan struct{}),
|
2017-05-24 17:34:35 +03:00
|
|
|
}
|
2017-09-23 01:54:10 +03:00
|
|
|
p.queueCond = sync.NewCond(&p.queueMtx)
|
|
|
|
|
|
|
|
return p
|
2017-05-24 17:34:35 +03:00
|
|
|
}
|
|
|
|
|
2017-09-23 01:54:10 +03:00
|
|
|
// Start starts all goroutines that packetQueue needs to perform its normal
|
|
|
|
// duties.
|
|
|
|
func (p *packetQueue) Start() {
|
2017-09-25 22:34:51 +03:00
|
|
|
p.wg.Add(1)
|
2017-09-23 01:54:10 +03:00
|
|
|
go p.packetCoordinator()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop signals the packetQueue for a graceful shutdown, and waits for all
|
|
|
|
// goroutines to exit.
|
|
|
|
func (p *packetQueue) Stop() {
|
|
|
|
close(p.quit)
|
|
|
|
|
|
|
|
p.queueCond.Signal()
|
2017-05-24 17:34:35 +03:00
|
|
|
}
|
|
|
|
|
2017-09-23 01:54:10 +03:00
|
|
|
// packetCoordinator is a goroutine that handles the packet overflow queue.
|
|
|
|
// Using a synchronized queue, outside callers are able to append to the end of
|
|
|
|
// the queue, waking up the coordinator when the queue transitions from empty
|
|
|
|
// to non-empty. The packetCoordinator will then aggressively try to empty out
|
|
|
|
// the queue, passing new htlcPackets to the channelLink as slots within the
|
|
|
|
// commitment transaction become available.
|
|
|
|
//
|
|
|
|
// Future iterations of the packetCoordinator will implement congestion
|
|
|
|
// avoidance logic in the face of persistent htlcPacket back-pressure.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): later will need to add back pressure handling heuristics
|
|
|
|
// like reg congestion avoidance:
|
|
|
|
// * random dropping, RED, etc
|
|
|
|
func (p *packetQueue) packetCoordinator() {
|
|
|
|
defer p.wg.Done()
|
|
|
|
|
|
|
|
for {
|
|
|
|
// First, we'll check our condition. If the queue of packets is
|
|
|
|
// empty, then we'll wait until a new item is added.
|
|
|
|
p.queueCond.L.Lock()
|
|
|
|
for len(p.queue) == 0 {
|
|
|
|
p.queueCond.Wait()
|
|
|
|
|
|
|
|
// If we were woke up in order to exit, then we'll do
|
|
|
|
// so. Otherwise, we'll check the message queue for any
|
|
|
|
// new items.
|
|
|
|
select {
|
|
|
|
case <-p.quit:
|
|
|
|
p.queueCond.L.Unlock()
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nextPkt := p.queue[0]
|
|
|
|
|
2017-09-26 01:59:23 +03:00
|
|
|
p.queueCond.L.Unlock()
|
|
|
|
|
2017-09-23 01:54:10 +03:00
|
|
|
// If there aren't any further messages to sent (or the link
|
|
|
|
// didn't immediately read our message), then we'll block and
|
|
|
|
// wait for a new message to be sent into the overflow queue,
|
|
|
|
// or for the link's htlcForwarder to wake up.
|
|
|
|
select {
|
2017-09-25 22:47:28 +03:00
|
|
|
case <-p.freeSlots:
|
2017-09-26 01:59:23 +03:00
|
|
|
|
2017-09-25 22:47:28 +03:00
|
|
|
select {
|
|
|
|
case p.outgoingPkts <- nextPkt:
|
|
|
|
// Pop the item off the front of the queue and
|
|
|
|
// slide down the reference one to re-position
|
|
|
|
// the head pointer. This will set us up for
|
|
|
|
// the next iteration. If the queue is empty
|
|
|
|
// at this point, then we'll block at the top.
|
2017-09-26 01:59:23 +03:00
|
|
|
p.queueCond.L.Lock()
|
2017-09-25 22:47:28 +03:00
|
|
|
p.queue[0] = nil
|
|
|
|
p.queue = p.queue[1:]
|
|
|
|
atomic.AddInt32(&p.queueLen, -1)
|
2017-11-11 01:52:27 +03:00
|
|
|
atomic.AddInt64(&p.totalHtlcAmt, int64(-nextPkt.amount))
|
2017-09-26 02:55:13 +03:00
|
|
|
p.queueCond.L.Unlock()
|
2017-09-25 22:47:28 +03:00
|
|
|
case <-p.quit:
|
|
|
|
return
|
|
|
|
}
|
2017-09-23 01:54:10 +03:00
|
|
|
|
|
|
|
case <-p.quit:
|
|
|
|
return
|
|
|
|
|
|
|
|
default:
|
|
|
|
}
|
2017-05-24 17:34:35 +03:00
|
|
|
}
|
2017-09-23 01:54:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// AddPkt adds the referenced packet to the overflow queue, preserving ordering
|
|
|
|
// of the existing items.
|
|
|
|
func (p *packetQueue) AddPkt(pkt *htlcPacket) {
|
|
|
|
// First, we'll lock the condition, and add the message to the end of
|
2017-09-25 22:47:28 +03:00
|
|
|
// the message queue, and increment the internal atomic for tracking
|
|
|
|
// the queue's length.
|
2017-09-23 01:54:10 +03:00
|
|
|
p.queueCond.L.Lock()
|
|
|
|
p.queue = append(p.queue, pkt)
|
2017-09-25 22:47:28 +03:00
|
|
|
atomic.AddInt32(&p.queueLen, 1)
|
2017-11-11 01:52:27 +03:00
|
|
|
atomic.AddInt64(&p.totalHtlcAmt, int64(pkt.amount))
|
2017-09-23 01:54:10 +03:00
|
|
|
p.queueCond.L.Unlock()
|
|
|
|
|
|
|
|
// With the message added, we signal to the msgConsumer that there are
|
|
|
|
// additional messages to consume.
|
|
|
|
p.queueCond.Signal()
|
2017-05-24 17:34:35 +03:00
|
|
|
}
|
|
|
|
|
2017-09-25 22:47:28 +03:00
|
|
|
// SignalFreeSlot signals to the queue that a new slot has opened up within the
|
|
|
|
// commitment transaction. The max amount of free slots has been defined when
|
|
|
|
// initially creating the packetQueue itself. This method, combined with AddPkt
|
|
|
|
// creates the following abstraction: a synchronized queue of infinite length
|
|
|
|
// which can be added to at will, which flows onto a commitment of fixed
|
|
|
|
// capacity.
|
|
|
|
func (p *packetQueue) SignalFreeSlot() {
|
|
|
|
// We'll only send over a free slot signal if the queue *is not* empty.
|
|
|
|
// Otherwise, it's possible that we attempt to overfill the free slots
|
|
|
|
// semaphore and block indefinitely below.
|
|
|
|
if atomic.LoadInt32(&p.queueLen) == 0 {
|
|
|
|
return
|
2017-09-23 01:54:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2017-09-25 22:47:28 +03:00
|
|
|
case p.freeSlots <- struct{}{}:
|
2017-09-23 01:54:10 +03:00
|
|
|
case <-p.quit:
|
2017-09-25 22:47:28 +03:00
|
|
|
return
|
2017-09-23 01:54:10 +03:00
|
|
|
}
|
2017-05-24 17:34:35 +03:00
|
|
|
}
|
|
|
|
|
2017-09-25 22:47:28 +03:00
|
|
|
// Length returns the number of pending htlc packets present within the over
|
|
|
|
// flow queue.
|
|
|
|
func (p *packetQueue) Length() int32 {
|
|
|
|
return atomic.LoadInt32(&p.queueLen)
|
2017-05-24 17:34:35 +03:00
|
|
|
}
|
2017-11-11 01:52:27 +03:00
|
|
|
|
|
|
|
// TotalHtlcAmount is the total amount (in mSAT) of all HTLC's currently
|
|
|
|
// residing within the overflow queue.
|
|
|
|
func (p *packetQueue) TotalHtlcAmount() lnwire.MilliSatoshi {
|
|
|
|
// TODO(roasbeef): also factor in fee rate?
|
|
|
|
return lnwire.MilliSatoshi(atomic.LoadInt64(&p.totalHtlcAmt))
|
|
|
|
}
|