2017-11-11 01:38:38 +03:00
|
|
|
package htlcswitch
|
|
|
|
|
|
|
|
import (
|
|
|
|
prand "math/rand"
|
|
|
|
"reflect"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/davecgh/go-spew/spew"
|
2020-04-14 20:49:26 +03:00
|
|
|
"github.com/lightningnetwork/lnd/clock"
|
2017-11-11 01:38:38 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
|
|
|
)
|
|
|
|
|
2020-04-18 01:21:38 +03:00
|
|
|
const testExpiry = time.Minute
|
|
|
|
|
2017-11-11 01:38:38 +03:00
|
|
|
// TestMailBoxCouriers tests that both aspects of the mailBox struct works
|
|
|
|
// properly. Both packets and messages should be able to added to each
|
|
|
|
// respective mailbox concurrently, and also messages/packets should also be
|
|
|
|
// able to be received concurrently.
|
|
|
|
func TestMailBoxCouriers(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// First, we'll create new instance of the current default mailbox
|
|
|
|
// type.
|
2020-04-18 01:21:38 +03:00
|
|
|
ctx := newMailboxContext(t, time.Now(), testExpiry)
|
|
|
|
defer ctx.mailbox.Stop()
|
2017-11-11 01:38:38 +03:00
|
|
|
|
|
|
|
// We'll be adding 10 message of both types to the mailbox.
|
|
|
|
const numPackets = 10
|
2018-02-16 02:44:20 +03:00
|
|
|
const halfPackets = numPackets / 2
|
2017-11-11 01:38:38 +03:00
|
|
|
|
|
|
|
// We'll add a set of random packets to the mailbox.
|
|
|
|
sentPackets := make([]*htlcPacket, numPackets)
|
|
|
|
for i := 0; i < numPackets; i++ {
|
|
|
|
pkt := &htlcPacket{
|
2017-10-30 21:56:51 +03:00
|
|
|
outgoingChanID: lnwire.NewShortChanIDFromInt(uint64(prand.Int63())),
|
|
|
|
incomingChanID: lnwire.NewShortChanIDFromInt(uint64(prand.Int63())),
|
|
|
|
amount: lnwire.MilliSatoshi(prand.Int63()),
|
2020-04-14 20:49:48 +03:00
|
|
|
htlc: &lnwire.UpdateAddHTLC{
|
|
|
|
ID: uint64(i),
|
|
|
|
},
|
2017-11-11 01:38:38 +03:00
|
|
|
}
|
|
|
|
sentPackets[i] = pkt
|
|
|
|
|
2020-04-18 01:21:38 +03:00
|
|
|
err := ctx.mailbox.AddPacket(pkt)
|
2020-04-14 20:50:25 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to add packet: %v", err)
|
|
|
|
}
|
2017-11-11 01:38:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll do the same, but this time adding wire messages.
|
|
|
|
sentMessages := make([]lnwire.Message, numPackets)
|
|
|
|
for i := 0; i < numPackets; i++ {
|
|
|
|
msg := &lnwire.UpdateAddHTLC{
|
|
|
|
ID: uint64(prand.Int63()),
|
|
|
|
Amount: lnwire.MilliSatoshi(prand.Int63()),
|
|
|
|
}
|
|
|
|
sentMessages[i] = msg
|
|
|
|
|
2020-04-18 01:21:38 +03:00
|
|
|
err := ctx.mailbox.AddMessage(msg)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to add message: %v", err)
|
|
|
|
}
|
2017-11-11 01:38:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now we'll attempt to read back the packets/messages we added to the
|
|
|
|
// mailbox. We'll alternative reading from the message outbox vs the
|
|
|
|
// packet outbox to ensure that they work concurrently properly.
|
|
|
|
recvdPackets := make([]*htlcPacket, 0, numPackets)
|
|
|
|
recvdMessages := make([]lnwire.Message, 0, numPackets)
|
|
|
|
for i := 0; i < numPackets*2; i++ {
|
|
|
|
timeout := time.After(time.Second * 5)
|
|
|
|
if i%2 == 0 {
|
|
|
|
select {
|
|
|
|
case <-timeout:
|
|
|
|
t.Fatalf("didn't recv pkt after timeout")
|
2020-04-18 01:21:38 +03:00
|
|
|
case pkt := <-ctx.mailbox.PacketOutBox():
|
2017-11-11 01:38:38 +03:00
|
|
|
recvdPackets = append(recvdPackets, pkt)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
select {
|
|
|
|
case <-timeout:
|
|
|
|
t.Fatalf("didn't recv message after timeout")
|
2020-04-18 01:21:38 +03:00
|
|
|
case msg := <-ctx.mailbox.MessageOutBox():
|
2017-11-11 01:38:38 +03:00
|
|
|
recvdMessages = append(recvdMessages, msg)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The number of messages/packets we sent, and the number we received
|
|
|
|
// should match exactly.
|
|
|
|
if len(sentPackets) != len(recvdPackets) {
|
|
|
|
t.Fatalf("expected %v packets instead got %v", len(sentPackets),
|
|
|
|
len(recvdPackets))
|
|
|
|
}
|
|
|
|
if len(sentMessages) != len(recvdMessages) {
|
|
|
|
t.Fatalf("expected %v messages instead got %v", len(sentMessages),
|
|
|
|
len(recvdMessages))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Additionally, the set of packets should match exactly, as we should
|
2017-12-18 05:40:05 +03:00
|
|
|
// have received the packets in the exact same ordering that we added.
|
2017-11-11 01:38:38 +03:00
|
|
|
if !reflect.DeepEqual(sentPackets, recvdPackets) {
|
|
|
|
t.Fatalf("recvd packets mismatched: expected %v, got %v",
|
|
|
|
spew.Sdump(sentPackets), spew.Sdump(recvdPackets))
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(recvdMessages, recvdMessages) {
|
|
|
|
t.Fatalf("recvd messages mismatched: expected %v, got %v",
|
|
|
|
spew.Sdump(sentMessages), spew.Sdump(recvdMessages))
|
|
|
|
}
|
2018-02-16 02:44:20 +03:00
|
|
|
|
|
|
|
// Now that we've received all of the intended msgs/pkts, ack back half
|
|
|
|
// of the packets.
|
|
|
|
for _, recvdPkt := range recvdPackets[:halfPackets] {
|
2020-04-18 01:21:38 +03:00
|
|
|
ctx.mailbox.AckPacket(recvdPkt.inKey())
|
2018-02-16 02:44:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// With the packets drained and partially acked, we reset the mailbox,
|
|
|
|
// simulating a link shutting down and then coming back up.
|
2020-04-18 01:21:38 +03:00
|
|
|
err := ctx.mailbox.ResetMessages()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to reset messages: %v", err)
|
|
|
|
}
|
|
|
|
err = ctx.mailbox.ResetPackets()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to reset packets: %v", err)
|
|
|
|
}
|
2018-02-16 02:44:20 +03:00
|
|
|
|
|
|
|
// Now, we'll use the same alternating strategy to read from our
|
|
|
|
// mailbox. All wire messages are dropped on startup, but any unacked
|
|
|
|
// packets will be replayed in the same order they were delivered
|
|
|
|
// initially.
|
|
|
|
recvdPackets2 := make([]*htlcPacket, 0, halfPackets)
|
|
|
|
for i := 0; i < 2*halfPackets; i++ {
|
|
|
|
timeout := time.After(time.Second * 5)
|
|
|
|
if i%2 == 0 {
|
|
|
|
select {
|
|
|
|
case <-timeout:
|
|
|
|
t.Fatalf("didn't recv pkt after timeout")
|
2020-04-18 01:21:38 +03:00
|
|
|
case pkt := <-ctx.mailbox.PacketOutBox():
|
2018-02-16 02:44:20 +03:00
|
|
|
recvdPackets2 = append(recvdPackets2, pkt)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
select {
|
2020-04-18 01:21:38 +03:00
|
|
|
case <-ctx.mailbox.MessageOutBox():
|
2018-02-16 02:44:20 +03:00
|
|
|
t.Fatalf("should not receive wire msg after reset")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The number of packets we received should match the number of unacked
|
|
|
|
// packets left in the mailbox.
|
|
|
|
if halfPackets != len(recvdPackets2) {
|
|
|
|
t.Fatalf("expected %v packets instead got %v", halfPackets,
|
|
|
|
len(recvdPackets))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Additionally, the set of packets should match exactly with the
|
|
|
|
// unacked packets, and we should have received the packets in the exact
|
|
|
|
// same ordering that we added.
|
|
|
|
if !reflect.DeepEqual(recvdPackets[halfPackets:], recvdPackets2) {
|
|
|
|
t.Fatalf("recvd packets mismatched: expected %v, got %v",
|
|
|
|
spew.Sdump(sentPackets), spew.Sdump(recvdPackets))
|
|
|
|
}
|
2017-11-11 01:38:38 +03:00
|
|
|
}
|
2018-05-03 04:29:47 +03:00
|
|
|
|
2020-04-14 20:48:06 +03:00
|
|
|
// TestMailBoxResetAfterShutdown tests that ResetMessages and ResetPackets
|
|
|
|
// return ErrMailBoxShuttingDown after the mailbox has been stopped.
|
|
|
|
func TestMailBoxResetAfterShutdown(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2020-04-18 01:21:38 +03:00
|
|
|
ctx := newMailboxContext(t, time.Now(), time.Second)
|
2020-04-14 20:48:06 +03:00
|
|
|
|
|
|
|
// Stop the mailbox, then try to reset the message and packet couriers.
|
2020-04-18 01:21:38 +03:00
|
|
|
ctx.mailbox.Stop()
|
2020-04-14 20:48:06 +03:00
|
|
|
|
2020-04-18 01:21:38 +03:00
|
|
|
err := ctx.mailbox.ResetMessages()
|
2020-04-14 20:48:06 +03:00
|
|
|
if err != ErrMailBoxShuttingDown {
|
|
|
|
t.Fatalf("expected ErrMailBoxShuttingDown, got: %v", err)
|
|
|
|
}
|
|
|
|
|
2020-04-18 01:21:38 +03:00
|
|
|
err = ctx.mailbox.ResetPackets()
|
2020-04-14 20:48:06 +03:00
|
|
|
if err != ErrMailBoxShuttingDown {
|
|
|
|
t.Fatalf("expected ErrMailBoxShuttingDown, got: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-14 20:48:40 +03:00
|
|
|
type mailboxContext struct {
|
|
|
|
t *testing.T
|
|
|
|
mailbox MailBox
|
2020-04-14 20:50:07 +03:00
|
|
|
clock *clock.TestClock
|
2020-04-14 20:48:40 +03:00
|
|
|
forwards chan *htlcPacket
|
|
|
|
}
|
|
|
|
|
2020-04-14 20:49:26 +03:00
|
|
|
func newMailboxContext(t *testing.T, startTime time.Time,
|
|
|
|
expiry time.Duration) *mailboxContext {
|
2020-04-14 20:48:40 +03:00
|
|
|
|
|
|
|
ctx := &mailboxContext{
|
|
|
|
t: t,
|
2020-04-14 20:49:26 +03:00
|
|
|
clock: clock.NewTestClock(startTime),
|
2020-04-14 20:48:40 +03:00
|
|
|
forwards: make(chan *htlcPacket, 1),
|
|
|
|
}
|
|
|
|
ctx.mailbox = newMemoryMailBox(&mailBoxConfig{
|
|
|
|
fetchUpdate: func(sid lnwire.ShortChannelID) (
|
|
|
|
*lnwire.ChannelUpdate, error) {
|
|
|
|
return &lnwire.ChannelUpdate{
|
|
|
|
ShortChannelID: sid,
|
|
|
|
}, nil
|
|
|
|
},
|
|
|
|
forwardPackets: ctx.forward,
|
2020-04-14 20:49:26 +03:00
|
|
|
clock: ctx.clock,
|
|
|
|
expiry: expiry,
|
2020-04-14 20:48:40 +03:00
|
|
|
})
|
|
|
|
ctx.mailbox.Start()
|
|
|
|
|
|
|
|
return ctx
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *mailboxContext) forward(_ chan struct{},
|
|
|
|
pkts ...*htlcPacket) chan error {
|
|
|
|
|
|
|
|
for _, pkt := range pkts {
|
|
|
|
c.forwards <- pkt
|
|
|
|
}
|
|
|
|
|
|
|
|
errChan := make(chan error)
|
|
|
|
close(errChan)
|
|
|
|
|
|
|
|
return errChan
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *mailboxContext) sendAdds(start, num int) []*htlcPacket {
|
|
|
|
c.t.Helper()
|
|
|
|
|
|
|
|
sentPackets := make([]*htlcPacket, num)
|
|
|
|
for i := 0; i < num; i++ {
|
|
|
|
pkt := &htlcPacket{
|
|
|
|
outgoingChanID: lnwire.NewShortChanIDFromInt(
|
|
|
|
uint64(prand.Int63())),
|
|
|
|
incomingChanID: lnwire.NewShortChanIDFromInt(
|
|
|
|
uint64(prand.Int63())),
|
|
|
|
incomingHTLCID: uint64(start + i),
|
|
|
|
amount: lnwire.MilliSatoshi(prand.Int63()),
|
|
|
|
htlc: &lnwire.UpdateAddHTLC{
|
|
|
|
ID: uint64(start + i),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
sentPackets[i] = pkt
|
|
|
|
|
|
|
|
err := c.mailbox.AddPacket(pkt)
|
|
|
|
if err != nil {
|
|
|
|
c.t.Fatalf("unable to add packet: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return sentPackets
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *mailboxContext) receivePkts(pkts []*htlcPacket) {
|
|
|
|
c.t.Helper()
|
|
|
|
|
|
|
|
for i, expPkt := range pkts {
|
|
|
|
select {
|
|
|
|
case pkt := <-c.mailbox.PacketOutBox():
|
|
|
|
if reflect.DeepEqual(expPkt, pkt) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
c.t.Fatalf("inkey mismatch #%d, want: %v vs "+
|
|
|
|
"got: %v", i, expPkt.inKey(), pkt.inKey())
|
|
|
|
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
c.t.Fatalf("did not receive fail for index %d", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *mailboxContext) checkFails(adds []*htlcPacket) {
|
|
|
|
c.t.Helper()
|
|
|
|
|
|
|
|
for i, add := range adds {
|
|
|
|
select {
|
|
|
|
case fail := <-c.forwards:
|
|
|
|
if add.inKey() == fail.inKey() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
c.t.Fatalf("inkey mismatch #%d, add: %v vs fail: %v",
|
|
|
|
i, add.inKey(), fail.inKey())
|
|
|
|
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
c.t.Fatalf("did not receive fail for index %d", i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case pkt := <-c.forwards:
|
|
|
|
c.t.Fatalf("unexpected forward: %v", pkt)
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestMailBoxFailAdd asserts that FailAdd returns a response to the switch
|
|
|
|
// under various interleavings with other operations on the mailbox.
|
|
|
|
func TestMailBoxFailAdd(t *testing.T) {
|
2020-04-14 20:50:07 +03:00
|
|
|
var (
|
|
|
|
batchDelay = time.Second
|
|
|
|
expiry = time.Minute
|
|
|
|
firstBatchStart = time.Now()
|
|
|
|
secondBatchStart = time.Now().Add(batchDelay)
|
|
|
|
thirdBatchStart = time.Now().Add(2 * batchDelay)
|
|
|
|
thirdBatchExpiry = thirdBatchStart.Add(expiry)
|
|
|
|
)
|
|
|
|
ctx := newMailboxContext(t, firstBatchStart, expiry)
|
2020-04-14 20:48:40 +03:00
|
|
|
defer ctx.mailbox.Stop()
|
|
|
|
|
|
|
|
failAdds := func(adds []*htlcPacket) {
|
|
|
|
for _, add := range adds {
|
|
|
|
ctx.mailbox.FailAdd(add)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const numBatchPackets = 5
|
|
|
|
|
|
|
|
// Send 10 adds, and pull them from the mailbox.
|
2020-04-14 20:50:07 +03:00
|
|
|
firstBatch := ctx.sendAdds(0, numBatchPackets)
|
|
|
|
ctx.receivePkts(firstBatch)
|
2020-04-14 20:48:40 +03:00
|
|
|
|
|
|
|
// Fail all of these adds, simulating an error adding the HTLCs to the
|
|
|
|
// commitment. We should see a failure message for each.
|
2020-04-14 20:50:07 +03:00
|
|
|
go failAdds(firstBatch)
|
|
|
|
ctx.checkFails(firstBatch)
|
2020-04-14 20:48:40 +03:00
|
|
|
|
|
|
|
// As a sanity check, Fail all of them again and assert that no
|
|
|
|
// duplicate fails are sent.
|
2020-04-14 20:50:07 +03:00
|
|
|
go failAdds(firstBatch)
|
|
|
|
ctx.checkFails(nil)
|
|
|
|
|
|
|
|
// Now, send a second batch of adds after a short delay and deliver them
|
|
|
|
// to the link.
|
|
|
|
ctx.clock.SetTime(secondBatchStart)
|
|
|
|
secondBatch := ctx.sendAdds(numBatchPackets, numBatchPackets)
|
|
|
|
ctx.receivePkts(secondBatch)
|
|
|
|
|
|
|
|
// Reset the packet queue w/o changing the current time. This simulates
|
|
|
|
// the link flapping and coming back up before the second batch's
|
|
|
|
// expiries have elapsed. We should see no failures sent back.
|
|
|
|
err := ctx.mailbox.ResetPackets()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to reset packets: %v", err)
|
|
|
|
}
|
2020-04-14 20:48:40 +03:00
|
|
|
ctx.checkFails(nil)
|
2020-04-14 20:49:48 +03:00
|
|
|
|
2020-04-14 20:50:07 +03:00
|
|
|
// Redeliver the second batch to the link and hold them there.
|
|
|
|
ctx.receivePkts(secondBatch)
|
|
|
|
|
|
|
|
// Send a third batch of adds shortly after the second batch.
|
|
|
|
ctx.clock.SetTime(thirdBatchStart)
|
|
|
|
thirdBatch := ctx.sendAdds(2*numBatchPackets, numBatchPackets)
|
|
|
|
|
|
|
|
// Advance the clock so that the third batch expires. We expect to only
|
|
|
|
// see fails for the third batch, since the second batch is still being
|
|
|
|
// held by the link.
|
|
|
|
ctx.clock.SetTime(thirdBatchExpiry)
|
|
|
|
ctx.checkFails(thirdBatch)
|
|
|
|
|
|
|
|
// Finally, reset the link which should cause the second batch to be
|
|
|
|
// cancelled immediately.
|
|
|
|
err = ctx.mailbox.ResetPackets()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to reset packets: %v", err)
|
|
|
|
}
|
|
|
|
ctx.checkFails(secondBatch)
|
2020-04-14 20:49:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestMailBoxPacketPrioritization asserts that the mailbox will prioritize
|
|
|
|
// delivering Settle and Fail packets over Adds if both are available for
|
|
|
|
// delivery at the same time.
|
|
|
|
func TestMailBoxPacketPrioritization(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// First, we'll create new instance of the current default mailbox
|
|
|
|
// type.
|
2020-04-18 01:21:38 +03:00
|
|
|
ctx := newMailboxContext(t, time.Now(), testExpiry)
|
|
|
|
defer ctx.mailbox.Stop()
|
2020-04-14 20:49:48 +03:00
|
|
|
|
|
|
|
const numPackets = 5
|
|
|
|
|
|
|
|
_, _, aliceChanID, bobChanID := genIDs()
|
|
|
|
|
|
|
|
// Next we'll send the following sequence of packets:
|
|
|
|
// - Settle1
|
|
|
|
// - Add1
|
|
|
|
// - Add2
|
|
|
|
// - Fail
|
|
|
|
// - Settle2
|
|
|
|
sentPackets := make([]*htlcPacket, numPackets)
|
|
|
|
for i := 0; i < numPackets; i++ {
|
|
|
|
pkt := &htlcPacket{
|
|
|
|
outgoingChanID: aliceChanID,
|
|
|
|
outgoingHTLCID: uint64(i),
|
|
|
|
incomingChanID: bobChanID,
|
|
|
|
incomingHTLCID: uint64(i),
|
|
|
|
amount: lnwire.MilliSatoshi(prand.Int63()),
|
|
|
|
}
|
|
|
|
|
|
|
|
switch i {
|
|
|
|
case 0, 4:
|
|
|
|
// First and last packets are a Settle. A non-Add is
|
|
|
|
// sent first to make the test deterministic w/o needing
|
|
|
|
// to sleep.
|
|
|
|
pkt.htlc = &lnwire.UpdateFulfillHTLC{ID: uint64(i)}
|
|
|
|
case 1, 2:
|
|
|
|
// Next two packets are Adds.
|
|
|
|
pkt.htlc = &lnwire.UpdateAddHTLC{ID: uint64(i)}
|
|
|
|
case 3:
|
|
|
|
// Last packet is a Fail.
|
|
|
|
pkt.htlc = &lnwire.UpdateFailHTLC{ID: uint64(i)}
|
|
|
|
}
|
|
|
|
|
|
|
|
sentPackets[i] = pkt
|
|
|
|
|
2020-04-18 01:21:38 +03:00
|
|
|
err := ctx.mailbox.AddPacket(pkt)
|
2020-04-14 20:49:48 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to add packet: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// When dequeueing the packets, we expect the following sequence:
|
|
|
|
// - Settle1
|
|
|
|
// - Fail
|
|
|
|
// - Settle2
|
|
|
|
// - Add1
|
|
|
|
// - Add2
|
|
|
|
//
|
|
|
|
// We expect to see Fail and Settle2 to be delivered before either Add1
|
|
|
|
// or Add2 due to the prioritization between the split queue.
|
|
|
|
for i := 0; i < numPackets; i++ {
|
|
|
|
select {
|
2020-04-18 01:21:38 +03:00
|
|
|
case pkt := <-ctx.mailbox.PacketOutBox():
|
2020-04-14 20:49:48 +03:00
|
|
|
var expPkt *htlcPacket
|
|
|
|
switch i {
|
|
|
|
case 0:
|
|
|
|
// First packet should be Settle1.
|
|
|
|
expPkt = sentPackets[0]
|
|
|
|
case 1:
|
|
|
|
// Second packet should be Fail.
|
|
|
|
expPkt = sentPackets[3]
|
|
|
|
case 2:
|
|
|
|
// Third packet should be Settle2.
|
|
|
|
expPkt = sentPackets[4]
|
|
|
|
case 3:
|
|
|
|
// Fourth packet should be Add1.
|
|
|
|
expPkt = sentPackets[1]
|
|
|
|
case 4:
|
|
|
|
// Last packet should be Add2.
|
|
|
|
expPkt = sentPackets[2]
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(expPkt, pkt) {
|
|
|
|
t.Fatalf("recvd packet mismatch %d, want: %v, got: %v",
|
|
|
|
i, spew.Sdump(expPkt), spew.Sdump(pkt))
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
t.Fatalf("didn't receive packet %d before timeout", i)
|
|
|
|
}
|
|
|
|
}
|
2020-04-14 20:48:40 +03:00
|
|
|
}
|
|
|
|
|
2020-04-14 20:50:07 +03:00
|
|
|
// TestMailBoxAddExpiry asserts that the mailbox will cancel back Adds that have
|
|
|
|
// reached their expiry time.
|
|
|
|
func TestMailBoxAddExpiry(t *testing.T) {
|
|
|
|
var (
|
|
|
|
expiry = time.Minute
|
|
|
|
batchDelay = time.Second
|
|
|
|
firstBatchStart = time.Now()
|
|
|
|
firstBatchExpiry = firstBatchStart.Add(expiry)
|
|
|
|
secondBatchStart = firstBatchStart.Add(batchDelay)
|
|
|
|
secondBatchExpiry = secondBatchStart.Add(expiry)
|
|
|
|
)
|
|
|
|
|
|
|
|
ctx := newMailboxContext(t, firstBatchStart, expiry)
|
|
|
|
defer ctx.mailbox.Stop()
|
|
|
|
|
|
|
|
// Each batch will consist of 10 messages.
|
|
|
|
const numBatchPackets = 10
|
|
|
|
|
|
|
|
firstBatch := ctx.sendAdds(0, numBatchPackets)
|
|
|
|
|
|
|
|
ctx.clock.SetTime(secondBatchStart)
|
|
|
|
ctx.checkFails(nil)
|
|
|
|
|
|
|
|
secondBatch := ctx.sendAdds(numBatchPackets, numBatchPackets)
|
|
|
|
|
|
|
|
ctx.clock.SetTime(firstBatchExpiry)
|
|
|
|
ctx.checkFails(firstBatch)
|
|
|
|
|
|
|
|
ctx.clock.SetTime(secondBatchExpiry)
|
|
|
|
ctx.checkFails(secondBatch)
|
|
|
|
}
|
|
|
|
|
2020-04-14 20:50:25 +03:00
|
|
|
// TestMailBoxDuplicateAddPacket asserts that the mailbox returns an
|
|
|
|
// ErrPacketAlreadyExists failure when two htlcPackets are added with identical
|
|
|
|
// incoming circuit keys.
|
|
|
|
func TestMailBoxDuplicateAddPacket(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
2020-04-18 01:21:38 +03:00
|
|
|
ctx := newMailboxContext(t, time.Now(), testExpiry)
|
|
|
|
ctx.mailbox.Start()
|
|
|
|
defer ctx.mailbox.Stop()
|
2020-04-14 20:50:25 +03:00
|
|
|
|
|
|
|
addTwice := func(t *testing.T, pkt *htlcPacket) {
|
|
|
|
// The first add should succeed.
|
2020-04-18 01:21:38 +03:00
|
|
|
err := ctx.mailbox.AddPacket(pkt)
|
2020-04-14 20:50:25 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to add packet: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Adding again with the same incoming circuit key should fail.
|
2020-04-18 01:21:38 +03:00
|
|
|
err = ctx.mailbox.AddPacket(pkt)
|
2020-04-14 20:50:25 +03:00
|
|
|
if err != ErrPacketAlreadyExists {
|
|
|
|
t.Fatalf("expected ErrPacketAlreadyExists, got: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert duplicate AddPacket calls fail for all types of HTLCs.
|
|
|
|
addTwice(t, &htlcPacket{
|
|
|
|
incomingHTLCID: 0,
|
|
|
|
htlc: &lnwire.UpdateAddHTLC{},
|
|
|
|
})
|
|
|
|
addTwice(t, &htlcPacket{
|
|
|
|
incomingHTLCID: 1,
|
|
|
|
htlc: &lnwire.UpdateFulfillHTLC{},
|
|
|
|
})
|
|
|
|
addTwice(t, &htlcPacket{
|
|
|
|
incomingHTLCID: 2,
|
|
|
|
htlc: &lnwire.UpdateFailHTLC{},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-05-03 04:29:47 +03:00
|
|
|
// TestMailOrchestrator asserts that the orchestrator properly buffers packets
|
|
|
|
// for channels that haven't been made live, such that they are delivered
|
|
|
|
// immediately after BindLiveShortChanID. It also tests that packets are delivered
|
|
|
|
// readily to mailboxes for channels that are already in the live state.
|
|
|
|
func TestMailOrchestrator(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// First, we'll create a new instance of our orchestrator.
|
2020-04-14 20:49:26 +03:00
|
|
|
mo := newMailOrchestrator(&mailOrchConfig{
|
2020-04-18 01:21:38 +03:00
|
|
|
fetchUpdate: func(sid lnwire.ShortChannelID) (
|
|
|
|
*lnwire.ChannelUpdate, error) {
|
|
|
|
return &lnwire.ChannelUpdate{
|
|
|
|
ShortChannelID: sid,
|
|
|
|
}, nil
|
|
|
|
},
|
|
|
|
forwardPackets: func(_ chan struct{},
|
|
|
|
pkts ...*htlcPacket) chan error {
|
|
|
|
// Close the channel immediately so the goroutine
|
|
|
|
// logging errors can exit.
|
|
|
|
errChan := make(chan error)
|
|
|
|
close(errChan)
|
|
|
|
return errChan
|
|
|
|
},
|
|
|
|
clock: clock.NewTestClock(time.Now()),
|
|
|
|
expiry: testExpiry,
|
2020-04-14 20:49:26 +03:00
|
|
|
})
|
2018-05-03 04:29:47 +03:00
|
|
|
defer mo.Stop()
|
|
|
|
|
|
|
|
// We'll be delivering 10 htlc packets via the orchestrator.
|
|
|
|
const numPackets = 10
|
|
|
|
const halfPackets = numPackets / 2
|
|
|
|
|
|
|
|
// Before any mailbox is created or made live, we will deliver half of
|
|
|
|
// the htlcs via the orchestrator.
|
|
|
|
chanID1, chanID2, aliceChanID, bobChanID := genIDs()
|
|
|
|
sentPackets := make([]*htlcPacket, halfPackets)
|
|
|
|
for i := 0; i < halfPackets; i++ {
|
|
|
|
pkt := &htlcPacket{
|
|
|
|
outgoingChanID: aliceChanID,
|
|
|
|
outgoingHTLCID: uint64(i),
|
|
|
|
incomingChanID: bobChanID,
|
|
|
|
incomingHTLCID: uint64(i),
|
|
|
|
amount: lnwire.MilliSatoshi(prand.Int63()),
|
2020-04-14 20:49:48 +03:00
|
|
|
htlc: &lnwire.UpdateAddHTLC{
|
|
|
|
ID: uint64(i),
|
|
|
|
},
|
2018-05-03 04:29:47 +03:00
|
|
|
}
|
|
|
|
sentPackets[i] = pkt
|
|
|
|
|
|
|
|
mo.Deliver(pkt.outgoingChanID, pkt)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now, initialize a new mailbox for Alice's chanid.
|
2020-04-14 20:48:40 +03:00
|
|
|
mailbox := mo.GetOrCreateMailBox(chanID1, aliceChanID)
|
2018-05-03 04:29:47 +03:00
|
|
|
|
|
|
|
// Verify that no messages are received, since Alice's mailbox has not
|
|
|
|
// been made live.
|
|
|
|
for i := 0; i < halfPackets; i++ {
|
|
|
|
timeout := time.After(50 * time.Millisecond)
|
|
|
|
select {
|
|
|
|
case <-mailbox.MessageOutBox():
|
|
|
|
t.Fatalf("should not receive wire msg after reset")
|
|
|
|
case <-timeout:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assign a short chan id to the existing mailbox, make it available for
|
|
|
|
// capturing incoming HTLCs. The HTLCs added above should be delivered
|
|
|
|
// immediately.
|
|
|
|
mo.BindLiveShortChanID(mailbox, chanID1, aliceChanID)
|
|
|
|
|
|
|
|
// Verify that all of the packets are queued and delivered to Alice's
|
|
|
|
// mailbox.
|
|
|
|
recvdPackets := make([]*htlcPacket, 0, len(sentPackets))
|
|
|
|
for i := 0; i < halfPackets; i++ {
|
|
|
|
timeout := time.After(5 * time.Second)
|
|
|
|
select {
|
|
|
|
case <-timeout:
|
|
|
|
t.Fatalf("didn't recv pkt %d after timeout", i)
|
|
|
|
case pkt := <-mailbox.PacketOutBox():
|
|
|
|
recvdPackets = append(recvdPackets, pkt)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should have received half of the total number of packets.
|
|
|
|
if len(recvdPackets) != halfPackets {
|
|
|
|
t.Fatalf("expected %v packets instead got %v",
|
|
|
|
halfPackets, len(recvdPackets))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the received packets are equal to the sent packets.
|
|
|
|
if !reflect.DeepEqual(recvdPackets, sentPackets) {
|
|
|
|
t.Fatalf("recvd packets mismatched: expected %v, got %v",
|
|
|
|
spew.Sdump(sentPackets), spew.Sdump(recvdPackets))
|
|
|
|
}
|
|
|
|
|
|
|
|
// For the second half of the test, create a new mailbox for Bob and
|
|
|
|
// immediately make it live with an assigned short chan id.
|
2020-04-14 20:48:40 +03:00
|
|
|
mailbox = mo.GetOrCreateMailBox(chanID2, bobChanID)
|
2018-05-03 04:29:47 +03:00
|
|
|
mo.BindLiveShortChanID(mailbox, chanID2, bobChanID)
|
|
|
|
|
|
|
|
// Create the second half of our htlcs, and deliver them via the
|
|
|
|
// orchestrator. We should be able to receive each of these in order.
|
|
|
|
recvdPackets = make([]*htlcPacket, 0, len(sentPackets))
|
|
|
|
for i := 0; i < halfPackets; i++ {
|
|
|
|
pkt := &htlcPacket{
|
|
|
|
outgoingChanID: aliceChanID,
|
|
|
|
outgoingHTLCID: uint64(halfPackets + i),
|
|
|
|
incomingChanID: bobChanID,
|
|
|
|
incomingHTLCID: uint64(halfPackets + i),
|
|
|
|
amount: lnwire.MilliSatoshi(prand.Int63()),
|
2020-04-14 20:49:48 +03:00
|
|
|
htlc: &lnwire.UpdateAddHTLC{
|
|
|
|
ID: uint64(halfPackets + i),
|
|
|
|
},
|
2018-05-03 04:29:47 +03:00
|
|
|
}
|
|
|
|
sentPackets[i] = pkt
|
|
|
|
|
|
|
|
mo.Deliver(pkt.incomingChanID, pkt)
|
|
|
|
|
|
|
|
timeout := time.After(50 * time.Millisecond)
|
|
|
|
select {
|
|
|
|
case <-timeout:
|
|
|
|
t.Fatalf("didn't recv pkt %d after timeout", halfPackets+i)
|
|
|
|
case pkt := <-mailbox.PacketOutBox():
|
|
|
|
recvdPackets = append(recvdPackets, pkt)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Again, we should have received half of the total number of packets.
|
|
|
|
if len(recvdPackets) != halfPackets {
|
|
|
|
t.Fatalf("expected %v packets instead got %v",
|
|
|
|
halfPackets, len(recvdPackets))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the received packets are equal to the sent packets.
|
|
|
|
if !reflect.DeepEqual(recvdPackets, sentPackets) {
|
|
|
|
t.Fatalf("recvd packets mismatched: expected %v, got %v",
|
|
|
|
spew.Sdump(sentPackets), spew.Sdump(recvdPackets))
|
|
|
|
}
|
|
|
|
}
|