Merge pull request #4721 from guggero/itest-flake-hunt

itest: cleanup and flake fix
This commit is contained in:
Oliver Gugger 2020-10-28 17:05:06 +00:00 committed by GitHub
commit 8b32285f48
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 494 additions and 1046 deletions

@ -57,9 +57,6 @@ type NetworkHarness struct {
Alice *HarnessNode
Bob *HarnessNode
seenTxns chan *chainhash.Hash
bitcoinWatchRequests chan *txWatchRequest
// Channel for transmitting stderr output from failed lightning node
// to main process.
lndErrorChan chan error
@ -83,19 +80,16 @@ func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string) (
feeService := startFeeService()
n := NetworkHarness{
activeNodes: make(map[int]*HarnessNode),
nodesByPub: make(map[string]*HarnessNode),
seenTxns: make(chan *chainhash.Hash),
bitcoinWatchRequests: make(chan *txWatchRequest),
lndErrorChan: make(chan error),
netParams: r.ActiveNet,
Miner: r,
BackendCfg: b,
feeService: feeService,
quit: make(chan struct{}),
lndBinary: lndBinary,
activeNodes: make(map[int]*HarnessNode),
nodesByPub: make(map[string]*HarnessNode),
lndErrorChan: make(chan error),
netParams: r.ActiveNet,
Miner: r,
BackendCfg: b,
feeService: feeService,
quit: make(chan struct{}),
lndBinary: lndBinary,
}
go n.networkWatcher()
return &n, nil
}
@ -746,81 +740,12 @@ func saveProfilesPage(node *HarnessNode) error {
return nil
}
// TODO(roasbeef): add a WithChannel higher-order function?
// * python-like context manager w.r.t using a channel within a test
// * possibly adds more funds to the target wallet if the funds are not
// enough
// txWatchRequest encapsulates a request to the harness' Bitcoin network
// watcher to dispatch a notification once a transaction with the target txid
// is seen within the test network.
type txWatchRequest struct {
txid chainhash.Hash
eventChan chan struct{}
}
// networkWatcher is a goroutine which accepts async notification
// requests for the broadcast of a target transaction, and then dispatches the
// transaction once its seen on the Bitcoin network.
func (n *NetworkHarness) networkWatcher() {
seenTxns := make(map[chainhash.Hash]struct{})
clients := make(map[chainhash.Hash][]chan struct{})
for {
select {
case <-n.quit:
return
case req := <-n.bitcoinWatchRequests:
// If we've already seen this transaction, then
// immediately dispatch the request. Otherwise, append
// to the list of clients who are watching for the
// broadcast of this transaction.
if _, ok := seenTxns[req.txid]; ok {
close(req.eventChan)
} else {
clients[req.txid] = append(clients[req.txid], req.eventChan)
}
case txid := <-n.seenTxns:
// Add this txid to our set of "seen" transactions. So
// we're able to dispatch any notifications for this
// txid which arrive *after* it's seen within the
// network.
seenTxns[*txid] = struct{}{}
// If there isn't a registered notification for this
// transaction then ignore it.
txClients, ok := clients[*txid]
if !ok {
continue
}
// Otherwise, dispatch the notification to all clients,
// cleaning up the now un-needed state.
for _, client := range txClients {
close(client)
}
delete(clients, *txid)
}
}
}
// OnTxAccepted is a callback to be called each time a new transaction has been
// broadcast on the network.
func (n *NetworkHarness) OnTxAccepted(hash *chainhash.Hash) {
select {
case n.seenTxns <- hash:
case <-n.quit:
return
}
}
// WaitForTxBroadcast blocks until the target txid is seen on the network. If
// WaitForTxInMempool blocks until the target txid is seen in the mempool. If
// the transaction isn't seen within the network before the passed timeout,
// then an error is returned.
// TODO(roasbeef): add another method which creates queue of all seen transactions
func (n *NetworkHarness) WaitForTxBroadcast(ctx context.Context, txid chainhash.Hash) error {
func (n *NetworkHarness) WaitForTxInMempool(ctx context.Context,
txid chainhash.Hash) error {
// Return immediately if harness has been torn down.
select {
case <-n.quit:
@ -828,20 +753,29 @@ func (n *NetworkHarness) WaitForTxBroadcast(ctx context.Context, txid chainhash.
default:
}
eventChan := make(chan struct{})
ticker := time.NewTicker(50 * time.Millisecond)
defer ticker.Stop()
n.bitcoinWatchRequests <- &txWatchRequest{
txid: txid,
eventChan: eventChan,
}
var mempool []*chainhash.Hash
for {
select {
case <-ctx.Done():
return fmt.Errorf("wanted %v, found %v txs "+
"in mempool: %v", txid, len(mempool), mempool)
select {
case <-eventChan:
return nil
case <-n.quit:
return fmt.Errorf("NetworkHarness has been torn down")
case <-ctx.Done():
return fmt.Errorf("tx not seen before context timeout")
case <-ticker.C:
var err error
mempool, err = n.Miner.Node.GetRawMempool()
if err != nil {
return err
}
for _, mempoolTx := range mempool {
if *mempoolTx == txid {
return nil
}
}
}
}
}
@ -1163,7 +1097,7 @@ func (n *NetworkHarness) CloseChannel(ctx context.Context,
"%v", err)
return
}
if err := n.WaitForTxBroadcast(ctx, *closeTxid); err != nil {
if err := n.WaitForTxInMempool(ctx, *closeTxid); err != nil {
errChan <- fmt.Errorf("error while waiting for "+
"broadcast tx: %v", err)
return

@ -3,10 +3,8 @@ package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
@ -15,6 +13,7 @@ import (
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if
@ -51,9 +50,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
require.NoError(t.t, err)
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
@ -62,32 +59,21 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
defer cancel()
_, err = alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
require.NoError(t.t, err)
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash[:])
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash[:])
}, defaultTimeout)
require.NoError(t.t, err)
// Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit
@ -101,8 +87,9 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// At this point, Bob decides that he wants to exit the channel
// immediately, so he force closes his commitment transaction.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
bobForceClose := closeChannelAndAssertType(ctxt, t, net, bob,
aliceChanPoint, c == commitTypeAnchors, true)
bobForceClose := closeChannelAndAssertType(
ctxt, t, net, bob, aliceChanPoint, c == commitTypeAnchors, true,
)
// Alice will sweep her commitment output immediately. If there are
// anchors, Alice will also sweep hers.
@ -113,16 +100,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
_, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("unable to find alice's sweep tx in miner mempool: %v",
err)
}
require.NoError(t.t, err)
// Suspend Bob to force Carol to go to chain.
restartBob, err := net.SuspendNode(bob)
if err != nil {
t.Fatalf("unable to suspend bob: %v", err)
}
require.NoError(t.t, err)
// Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For
@ -134,31 +116,24 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:],
})
if err != nil {
t.Fatalf("settle invoice: %v", err)
}
require.NoError(t.t, err)
// We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive.
numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry -
lncfg.DefaultIncomingBroadcastDelta))
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
_, err = net.Miner.Node.Generate(numBlocks)
require.NoError(t.t, err)
// Carol's commitment transaction should now be in the mempool. If there
// is an anchor, Carol will sweep that too.
_, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
require.NoError(t.t, err)
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
require.NoError(t.t, err)
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
@ -174,16 +149,12 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// Mine a block that should confirm the commit tx, the anchor if present
// and the coinbase.
block := mineBlocks(t, net, 1, expectedTxes)[0]
if len(block.Transactions) != expectedTxes+1 {
t.Fatalf("expected %v transactions in block, got %v",
expectedTxes+1, len(block.Transactions))
}
require.Len(t.t, block.Transactions, expectedTxes+1)
assertTxInBlock(t, block, &closingTxid)
// Restart bob again.
if err := restartBob(); err != nil {
t.Fatalf("unable to restart bob: %v", err)
}
err = restartBob()
require.NoError(t.t, err)
// After the force close transacion is mined, Carol should broadcast her
// second level HTLC transacion. Bob will broadcast a sweep tx to sweep
@ -198,9 +169,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
txes, err := getNTxsFromMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
require.NoError(t.t, err)
// Both Carol's second level transaction and Bob's sweep should be
// spending from the commitment transaction.
@ -209,16 +178,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// At this point we suspend Alice to make sure she'll handle the
// on-chain settle after a restart.
restartAlice, err := net.SuspendNode(alice)
if err != nil {
t.Fatalf("unable to suspend alice: %v", err)
}
require.NoError(t.t, err)
// Mine a block to confirm the two transactions (+ the coinbase).
block = mineBlocks(t, net, 1, expectedTxes)[0]
if len(block.Transactions) != expectedTxes+1 {
t.Fatalf("expected 3 transactions in block, got %v",
len(block.Transactions))
}
require.Len(t.t, block.Transactions, expectedTxes+1)
// Keep track of the second level tx maturity.
carolSecondLevelCSV := uint32(defaultCSV)
@ -226,73 +190,48 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// When Bob notices Carol's second level transaction in the block, he
// will extract the preimage and broadcast a second level tx to claim
// the HTLC in his (already closed) channel with Alice.
bobSecondLvlTx, err := waitForTxInMempool(net.Miner.Node,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobSecondLvlTx, err := waitForTxInMempool(
net.Miner.Node, minerMempoolTimeout,
)
require.NoError(t.t, err)
// It should spend from the commitment in the channel with Alice.
tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
require.NoError(t.t, err)
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobForceClose {
t.Fatalf("tx did not spend from bob's force close tx")
}
require.Equal(
t.t, *bobForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
)
// At this point, Bob should have broadcast his second layer success
// transaction, and should have sent it to the nursery for incubation.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
for _, forceCloseChan := range pendingChanResp.PendingForceClosingChannels {
if forceCloseChan.Channel.LocalBalance != 0 {
continue
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
if c.Channel.LocalBalance != 0 {
return nil
}
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
if len(c.PendingHtlcs) != 1 {
return fmt.Errorf("bob should have pending " +
"htlc but doesn't")
}
stage := forceCloseChan.PendingHtlcs[0].Stage
if stage != 1 {
predErr = fmt.Errorf("bob's htlc should have "+
if c.PendingHtlcs[0].Stage != 1 {
return fmt.Errorf("bob's htlc should have "+
"advanced to the first stage but was "+
"stage: %v", stage)
return false
"stage: %v", c.PendingHtlcs[0].Stage)
}
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
return nil
},
)
require.NoError(t.t, err)
// We'll now mine a block which should confirm Bob's second layer
// transaction.
block = mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 {
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
require.Len(t.t, block.Transactions, 2)
assertTxInBlock(t, block, bobSecondLvlTx)
// Keep track of Bob's second level maturity, and decrement our track
@ -302,21 +241,17 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
// Now that the preimage from Bob has hit the chain, restart Alice to
// ensure she'll pick it up.
if err := restartAlice(); err != nil {
t.Fatalf("unable to restart alice: %v", err)
}
err = restartAlice()
require.NoError(t.t, err)
// If we then mine 3 additional blocks, Carol's second level tx should
// mature, and she can pull the funds from it with a sweep tx.
if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
_, err = net.Miner.Node.Generate(carolSecondLevelCSV)
require.NoError(t.t, err)
bobSecondLevelCSV -= carolSecondLevelCSV
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find Carol's sweeping transaction: %v", err)
}
require.NoError(t.t, err)
// Mining one additional block, Bob's second level tx is mature, and he
// can sweep the output.
@ -324,18 +259,14 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
assertTxInBlock(t, block, carolSweep)
bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction")
}
require.NoError(t.t, err)
// Make sure it spends from the second level tx.
tx, err = net.Miner.Node.GetRawTransaction(bobSweep)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobSecondLvlTx {
t.Fatalf("tx did not spend from bob's second level tx")
}
require.NoError(t.t, err)
require.Equal(
t.t, *bobSecondLvlTx, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
)
// When we mine one additional block, that will confirm Bob's sweep.
// Now Bob should have no pending channels anymore, as this just
@ -343,77 +274,15 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
block = mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, bobSweep)
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
req := &lnrpc.ListChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanInfo, err := bob.ListChannels(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query for open "+
"channels: %v", err)
return false
}
if len(chanInfo.Channels) != 0 {
predErr = fmt.Errorf("Bob should have no open "+
"channels, instead he has %v",
len(chanInfo.Channels))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
require.NoError(t.t, err)
assertNodeNumChannels(t, bob, 0)
// Also Carol should have no channels left (open nor pending).
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob carol has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
req := &lnrpc.ListChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanInfo, err := carol.ListChannels(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query for open "+
"channels: %v", err)
return false
}
if len(chanInfo.Channels) != 0 {
predErr = fmt.Errorf("carol should have no open "+
"channels, instead she has %v",
len(chanInfo.Channels))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
require.NoError(t.t, err)
assertNodeNumChannels(t, carol, 0)
// Finally, check that the Alice's payment is correctly marked
// succeeded.
@ -421,7 +290,5 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
err = checkPaymentStatus(
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
)
if err != nil {
t.Fatalf(err.Error())
}
require.NoError(t.t, err)
}

@ -2,19 +2,18 @@ package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the
@ -58,8 +57,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
payHash := makeFakePayHash(t)
_, err := alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
Dest: carolPubKey,
Amt: int64(dustHtlcAmt),
PaymentHash: dustPayHash,
@ -68,13 +66,10 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
require.NoError(t.t, err)
_, err = alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
Dest: carolPubKey,
Amt: int64(htlcAmt),
PaymentHash: payHash,
@ -83,21 +78,15 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
require.NoError(t.t, err)
// Verify that all nodes in the path now have two HTLC's with the
// proper parameters.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, dustPayHash, payHash)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, dustPayHash, payHash)
}, defaultTimeout)
require.NoError(t.t, err)
// Increase the fee estimate so that the following force close tx will
// be cpfp'ed.
@ -110,9 +99,8 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
numBlocks := padCLTV(
uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = net.Miner.Node.Generate(numBlocks)
require.NoError(t.t, err)
// Bob's force close transaction should now be found in the mempool. If
// there are anchors, we also expect Bob's anchor sweep.
@ -122,15 +110,11 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
}
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
require.NoError(t.t, err)
_, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("unable to find closing txid: %v", err)
}
require.NoError(t.t, err)
closeTx := getSpendingTxInMempool(
t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{
Hash: *bobFundingTxid,
@ -146,30 +130,25 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
// that we sent earlier. This means Alice should now only have a single
// HTLC on her channel.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash)
}, defaultTimeout)
require.NoError(t.t, err)
// With the closing transaction confirmed, we should expect Bob's HTLC
// timeout transaction to be broadcast due to the expiry being reached.
// If there are anchors, we also expect Carol's anchor sweep now.
txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's htlc timeout tx: %v", err)
}
txes, err := getNTxsFromMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
// Lookup the timeout transaction that is expected to spend from the
// closing tx. We distinguish it from a possibly anchor sweep by value.
var htlcTimeout *chainhash.Hash
for _, tx := range txes {
prevOp := tx.TxIn[0].PreviousOutPoint
if prevOp.Hash != closeTxid {
t.Fatalf("tx not spending from closing tx")
}
require.Equal(t.t, closeTxid, prevOp.Hash)
// Assume that the timeout tx doesn't spend an output of exactly
// the size of the anchor.
@ -178,9 +157,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
htlcTimeout = &hash
}
}
if htlcTimeout == nil {
t.Fatalf("htlc timeout tx not found in mempool")
}
require.NotNil(t.t, htlcTimeout)
// We'll mine the remaining blocks in order to generate the sweep
// transaction of Bob's commitment output.
@ -188,9 +165,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
// Check that the sweep spends from the mined commitment.
txes, err = getNTxsFromMempool(net.Miner.Node, 1, minerMempoolTimeout)
if err != nil {
t.Fatalf("sweep not found: %v", err)
}
require.NoError(t.t, err)
assertAllTxesSpendFrom(t, txes, closeTxid)
// Bob's pending channel report should show that he has a commitment
@ -199,21 +174,12 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
require.NoError(t.t, err)
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
t.Fatalf("bob should have pending for close chan but doesn't")
}
require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels))
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
t.Fatalf("bob should have nonzero limbo balance instead "+
"has: %v", forceCloseChan.LimboBalance)
}
if len(forceCloseChan.PendingHtlcs) == 0 {
t.Fatalf("bob should have pending htlc but doesn't")
}
require.NotZero(t.t, forceCloseChan.LimboBalance)
require.NotZero(t.t, len(forceCloseChan.PendingHtlcs))
// Now we'll mine an additional block, which should confirm Bob's commit
// sweep. This block should also prompt Bob to broadcast their second
@ -230,60 +196,33 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
// Therefore, at this point, there should be no active HTLC's on the
// commitment transaction from Alice -> Bob.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
}
err = wait.NoError(func() error {
return assertNumActiveHtlcs(nodes, 0)
}, defaultTimeout)
require.NoError(t.t, err)
// At this point, Bob should show that the pending HTLC has advanced to
// the second stage and is to be swept.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
require.NoError(t.t, err)
forceCloseChan = pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
t.Fatalf("bob's htlc should have advanced to the second stage: %v", err)
}
require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage)
// Next, we'll mine a final block that should confirm the second-layer
// sweeping transaction.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = net.Miner.Node.Generate(1)
require.NoError(t.t, err)
// Once this transaction has been confirmed, Bob should detect that he
// no longer has any pending channels.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending "+
"channels but shouldn't: %v",
spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
require.NoError(t.t, err)
// Coop close channel, expect no anchors.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssertType(
ctxt, t, net, alice, aliceChanPoint, false,
false,
ctxt, t, net, alice, aliceChanPoint, false, false,
)
}

@ -2,11 +2,9 @@ package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
@ -15,6 +13,7 @@ import (
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the
@ -53,9 +52,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
require.NoError(t.t, err)
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
@ -64,32 +61,21 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
defer cancel()
_, err = alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
require.NoError(t.t, err)
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash[:])
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash[:])
}, defaultTimeout)
require.NoError(t.t, err)
// Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit
@ -97,9 +83,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
waitForInvoiceAccepted(t, carol, payHash)
restartBob, err := net.SuspendNode(bob)
if err != nil {
t.Fatalf("unable to suspend bob: %v", err)
}
require.NoError(t.t, err)
// Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For
@ -111,9 +95,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:],
})
if err != nil {
t.Fatalf("settle invoice: %v", err)
}
require.NoError(t.t, err)
// Increase the fee estimate so that the following force close tx will
// be cpfp'ed.
@ -125,9 +107,8 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
numBlocks := padCLTV(uint32(
invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta,
))
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
_, err = net.Miner.Node.Generate(numBlocks)
require.NoError(t.t, err)
// At this point, Carol should broadcast her active commitment
// transaction in order to go to the chain and sweep her HTLC. If there
@ -139,14 +120,10 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
_, err = getNTxsFromMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("expected transaction not found in mempool: %v", err)
}
require.NoError(t.t, err)
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
require.NoError(t.t, err)
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
@ -164,9 +141,8 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
mineBlocks(t, net, 1, expectedTxes)
// Restart bob again.
if err := restartBob(); err != nil {
t.Fatalf("unable to restart bob: %v", err)
}
err = restartBob()
require.NoError(t.t, err)
// After the force close transaction is mined, Carol should broadcast
// her second level HTLC transaction. Bob will broadcast a sweep tx to
@ -178,20 +154,18 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
if c == commitTypeAnchors {
expectedTxes = 3
}
txes, err := getNTxsFromMempool(net.Miner.Node,
expectedTxes, minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
txes, err := getNTxsFromMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
// All transactions should be spending from the commitment transaction.
assertAllTxesSpendFrom(t, txes, closingTxid)
// We'll now mine an additional block which should confirm both the
// second layer transactions.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
_, err = net.Miner.Node.Generate(1)
require.NoError(t.t, err)
time.Sleep(time.Second * 4)
@ -203,98 +177,52 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
require.NoError(t.t, err)
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
t.Fatalf("carol should have pending for close chan but doesn't")
}
require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels))
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
t.Fatalf("carol should have nonzero limbo balance instead "+
"has: %v", forceCloseChan.LimboBalance)
}
require.NotZero(t.t, forceCloseChan.LimboBalance)
// The pending HTLC carol has should also now be in stage 2.
if len(forceCloseChan.PendingHtlcs) != 1 {
t.Fatalf("carol should have pending htlc but doesn't")
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
t.Fatalf("carol's htlc should have advanced to the second "+
"stage: %v", err)
}
require.Len(t.t, forceCloseChan.PendingHtlcs, 1)
require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage)
// Once the second-level transaction confirmed, Bob should have
// extracted the preimage from the chain, and sent it back to Alice,
// clearing the HTLC off-chain.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertNumActiveHtlcs(nodes, 0)
}, defaultTimeout)
require.NoError(t.t, err)
// If we mine 4 additional blocks, then both outputs should now be
// mature.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = net.Miner.Node.Generate(defaultCSV)
require.NoError(t.t, err)
// We should have a new transaction in the mempool.
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction: %v", err)
}
require.NoError(t.t, err)
// Finally, if we mine an additional block to confirm these two sweep
// transactions, Carol should not show a pending channel in her report
// afterwards.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to mine block: %v", err)
}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err = carol.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
predErr = fmt.Errorf("unable to query for pending channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("carol still has pending channels: %v",
spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
_, err = net.Miner.Node.Generate(1)
require.NoError(t.t, err)
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
require.NoError(t.t, err)
// The invoice should show as settled for Carol, indicating that it was
// swept on-chain.
invoicesReq := &lnrpc.ListInvoiceRequest{}
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
if err != nil {
t.Fatalf("unable to retrieve invoices: %v", err)
}
if len(invoicesResp.Invoices) != 1 {
t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices))
}
require.NoError(t.t, err)
require.Len(t.t, invoicesResp.Invoices, 1)
invoice := invoicesResp.Invoices[0]
if invoice.State != lnrpc.Invoice_SETTLED {
t.Fatalf("expected invoice to be settled on chain")
}
if invoice.AmtPaidSat != invoiceAmt {
t.Fatalf("expected invoice to be settled with %d sat, got "+
"%d sat", invoiceAmt, invoice.AmtPaidSat)
}
require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State)
require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat)
// Finally, check that the Alice's payment is correctly marked
// succeeded.
@ -302,9 +230,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
err = checkPaymentStatus(
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
)
if err != nil {
t.Fatalf(err.Error())
}
require.NoError(t.t, err)
// We'll close out the channel between Alice and Bob, then shutdown
// carol to conclude the test.

@ -2,11 +2,8 @@ package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lncfg"
"github.com/lightningnetwork/lnd/lnrpc"
@ -15,6 +12,7 @@ import (
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)
// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario,
@ -51,9 +49,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
require.NoError(t.t, err)
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
@ -62,32 +58,21 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
defer cancel()
_, err = alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
PaymentRequest: carolInvoice.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
require.NoError(t.t, err)
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash[:])
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash[:])
}, defaultTimeout)
require.NoError(t.t, err)
// Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit
@ -102,22 +87,20 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// immediately force close the channel by broadcast her commitment
// transaction.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
aliceForceClose := closeChannelAndAssertType(ctxt, t, net, alice,
aliceChanPoint, c == commitTypeAnchors, true)
aliceForceClose := closeChannelAndAssertType(
ctxt, t, net, alice, aliceChanPoint, c == commitTypeAnchors,
true,
)
// Wait for the channel to be marked pending force close.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForChannelPendingForceClose(ctxt, alice, aliceChanPoint)
if err != nil {
t.Fatalf("channel not pending force close: %v", err)
}
require.NoError(t.t, err)
// Mine enough blocks for Alice to sweep her funds from the force
// closed channel.
_, err = net.Miner.Node.Generate(defaultCSV)
if err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
require.NoError(t.t, err)
// Alice should now sweep her funds. If there are anchors, Alice should
// also sweep hers.
@ -125,16 +108,14 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
if c == commitTypeAnchors {
expectedTxes = 2
}
_, err = waitForNTxsInMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find sweeping tx in mempool: %v", err)
}
_, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
// Suspend bob, so Carol is forced to go on chain.
restartBob, err := net.SuspendNode(bob)
if err != nil {
t.Fatalf("unable to suspend bob: %v", err)
}
require.NoError(t.t, err)
// Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For
@ -146,31 +127,25 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:],
})
if err != nil {
t.Fatalf("settle invoice: %v", err)
}
require.NoError(t.t, err)
// We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive.
numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry-
lncfg.DefaultIncomingBroadcastDelta) - defaultCSV)
numBlocks := padCLTV(uint32(
invoiceReq.CltvExpiry-lncfg.DefaultIncomingBroadcastDelta,
) - defaultCSV)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
_, err = net.Miner.Node.Generate(numBlocks)
require.NoError(t.t, err)
// Carol's commitment transaction should now be in the mempool. If there
// are anchors, Carol also sweeps her anchor.
_, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("unable to find carol's txes: %v", err)
}
require.NoError(t.t, err)
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
require.NoError(t.t, err)
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
@ -186,16 +161,12 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// Mine a block, which should contain: the commitment, possibly an
// anchor sweep and the coinbase tx.
block := mineBlocks(t, net, 1, expectedTxes)[0]
if len(block.Transactions) != expectedTxes+1 {
t.Fatalf("expected %v transactions in block, got %v",
expectedTxes, len(block.Transactions))
}
require.Len(t.t, block.Transactions, expectedTxes+1)
assertTxInBlock(t, block, &closingTxid)
// Restart bob again.
if err := restartBob(); err != nil {
t.Fatalf("unable to restart bob: %v", err)
}
err = restartBob()
require.NoError(t.t, err)
// After the force close transacion is mined, Carol should broadcast her
// second level HTLC transacion. Bob will broadcast a sweep tx to sweep
@ -206,21 +177,17 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
if c == commitTypeAnchors {
expectedTxes = 3
}
txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
txes, err := getNTxsFromMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
require.NoError(t.t, err)
// All transactions should be pending from the commitment transaction.
assertAllTxesSpendFrom(t, txes, closingTxid)
// Mine a block to confirm the two transactions (+ coinbase).
block = mineBlocks(t, net, 1, expectedTxes)[0]
if len(block.Transactions) != expectedTxes+1 {
t.Fatalf("expected 3 transactions in block, got %v",
len(block.Transactions))
}
require.Len(t.t, block.Transactions, expectedTxes+1)
// Keep track of the second level tx maturity.
carolSecondLevelCSV := uint32(defaultCSV)
@ -228,114 +195,60 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// When Bob notices Carol's second level transaction in the block, he
// will extract the preimage and broadcast a sweep tx to directly claim
// the HTLC in his (already closed) channel with Alice.
bobHtlcSweep, err := waitForTxInMempool(net.Miner.Node,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobHtlcSweep, err := waitForTxInMempool(
net.Miner.Node, minerMempoolTimeout,
)
require.NoError(t.t, err)
// It should spend from the commitment in the channel with Alice.
tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *aliceForceClose {
t.Fatalf("tx did not spend from alice's force close tx")
}
require.NoError(t.t, err)
require.Equal(
t.t, *aliceForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
)
// We'll now mine a block which should confirm Bob's HTLC sweep
// transaction.
block = mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 {
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
require.Len(t.t, block.Transactions, 2)
assertTxInBlock(t, block, bobHtlcSweep)
carolSecondLevelCSV--
// Now that the sweeping transaction has been confirmed, Bob should now
// recognize that all contracts have been fully resolved, and show no
// pending close channels.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
require.NoError(t.t, err)
// If we then mine 3 additional blocks, Carol's second level tx will
// mature, and she should pull the funds.
if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
_, err = net.Miner.Node.Generate(carolSecondLevelCSV)
require.NoError(t.t, err)
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find Carol's sweeping transaction: %v", err)
}
carolSweep, err := waitForTxInMempool(
net.Miner.Node, minerMempoolTimeout,
)
require.NoError(t.t, err)
// When Carol's sweep gets confirmed, she should have no more pending
// channels.
block = mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, carolSweep)
pendingChansRequest = &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("carol still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
require.NoError(t.t, err)
// The invoice should show as settled for Carol, indicating that it was
// swept on-chain.
invoicesReq := &lnrpc.ListInvoiceRequest{}
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
if err != nil {
t.Fatalf("unable to retrieve invoices: %v", err)
}
if len(invoicesResp.Invoices) != 1 {
t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices))
}
require.NoError(t.t, err)
require.Len(t.t, invoicesResp.Invoices, 1)
invoice := invoicesResp.Invoices[0]
if invoice.State != lnrpc.Invoice_SETTLED {
t.Fatalf("expected invoice to be settled on chain")
}
if invoice.AmtPaidSat != invoiceAmt {
t.Fatalf("expected invoice to be settled with %d sat, got "+
"%d sat", invoiceAmt, invoice.AmtPaidSat)
}
require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State)
require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat)
// Finally, check that the Alice's payment is correctly marked
// succeeded.
@ -343,7 +256,5 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
err = checkPaymentStatus(
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
)
if err != nil {
t.Fatalf(err.Error())
}
require.NoError(t.t, err)
}

@ -3,14 +3,12 @@ package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC
@ -47,8 +45,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
carolPubKey := carol.PubKey[:]
payHash := makeFakePayHash(t)
_, err := alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
Dest: carolPubKey,
Amt: int64(htlcAmt),
PaymentHash: payHash,
@ -57,21 +54,15 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
require.NoError(t.t, err)
// Once the HTLC has cleared, all channels in our mini network should
// have the it locked in.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash)
}, defaultTimeout)
require.NoError(t.t, err)
// Increase the fee estimate so that the following force close tx will
// be cpfp'ed.
@ -87,105 +78,64 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// At this point, Bob should have a pending force close channel as he
// just went to chain.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(ctxt,
pendingChansRequest)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
if c.LimboBalance == 0 {
return fmt.Errorf("bob should have nonzero "+
"limbo balance instead has: %v",
c.LimboBalance)
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
predErr = fmt.Errorf("bob should have nonzero limbo "+
"balance instead has: %v",
forceCloseChan.LimboBalance)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
return nil
},
)
require.NoError(t.t, err)
// We'll mine defaultCSV blocks in order to generate the sweep
// transaction of Bob's funding output. If there are anchors, mine
// Carol's anchor sweep too.
if c == commitTypeAnchors {
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find carol's anchor sweep tx: %v", err)
}
require.NoError(t.t, err)
}
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = net.Miner.Node.Generate(defaultCSV)
require.NoError(t.t, err)
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's funding output sweep tx: %v", err)
}
require.NoError(t.t, err)
// We'll now mine enough blocks for the HTLC to expire. After this, Bob
// should hand off the now expired HTLC output to the utxo nursery.
numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV - 1))
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = net.Miner.Node.Generate(numBlocks)
require.NoError(t.t, err)
// Bob's pending channel report should show that he has a single HTLC
// that's now in stage one.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
if len(c.PendingHtlcs) != 1 {
return fmt.Errorf("bob should have pending " +
"htlc but doesn't")
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending force " +
"close chan but doesn't")
return false
}
if c.PendingHtlcs[0].Stage != 1 {
return fmt.Errorf("bob's htlc should have "+
"advanced to the first stage: %v", err)
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 1 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the first stage: %v", err)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
return nil
},
)
require.NoError(t.t, err)
// We should also now find a transaction in the mempool, as Bob should
// have broadcast his second layer timeout transaction.
timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's htlc timeout tx: %v", err)
}
require.NoError(t.t, err)
// Next, we'll mine an additional block. This should serve to confirm
// the second layer timeout transaction.
@ -195,62 +145,39 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// With the second layer timeout transaction confirmed, Bob should have
// canceled backwards the HTLC that carol sent.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
}
err = wait.NoError(func() error {
return assertNumActiveHtlcs(nodes, 0)
}, defaultTimeout)
require.NoError(t.t, err)
// Additionally, Bob should now show that HTLC as being advanced to the
// second stage.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
if len(c.PendingHtlcs) != 1 {
return fmt.Errorf("bob should have pending " +
"htlc but doesn't")
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
if c.PendingHtlcs[0].Stage != 2 {
return fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
return nil
},
)
require.NoError(t.t, err)
// We'll now mine 4 additional blocks. This should be enough for Bob's
// CSV timelock to expire and the sweeping transaction of the HTLC to be
// broadcast.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to mine blocks: %v", err)
}
_, err = net.Miner.Node.Generate(defaultCSV)
require.NoError(t.t, err)
sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's htlc sweep tx: %v", err)
}
require.NoError(t.t, err)
// We'll then mine a final block which should confirm this second layer
// sweep transaction.
@ -259,27 +186,9 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// At this point, Bob should no longer show any channels as pending
// close.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
require.NoError(t.t, err)
// Coop close, no anchors.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)

@ -3,14 +3,12 @@ package itest
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a
@ -48,8 +46,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
carolPubKey := carol.PubKey[:]
payHash := makeFakePayHash(t)
_, err := alice.RouterClient.SendPaymentV2(
ctx,
&routerrpc.SendPaymentRequest{
ctx, &routerrpc.SendPaymentRequest{
Dest: carolPubKey,
Amt: int64(htlcAmt),
PaymentHash: payHash,
@ -58,21 +55,15 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
FeeLimitMsat: noFeeLimitMsat,
},
)
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
}
require.NoError(t.t, err)
// Once the HTLC has cleared, all the nodes in our mini network should
// show that the HTLC has been locked in.
var predErr error
nodes := []*lntest.HarnessNode{alice, bob, carol}
err = wait.Predicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
err = wait.NoError(func() error {
return assertActiveHtlcs(nodes, payHash)
}, defaultTimeout)
require.NoError(t.t, err)
// Increase the fee estimate so that the following force close tx will
// be cpfp'ed.
@ -90,28 +81,9 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// At this point, Bob should have a pending force close channel as
// Carol has gone directly to chain.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for "+
"pending channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending " +
"force close channels but doesn't")
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, bob, 1, nil)
require.NoError(t.t, err)
// Bob can sweep his output immediately. If there is an anchor, Bob will
// sweep that as well.
@ -123,55 +95,39 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
_, err = waitForNTxsInMempool(
net.Miner.Node, expectedTxes, minerMempoolTimeout,
)
if err != nil {
t.Fatalf("failed to find txes in miner mempool: %v", err)
}
require.NoError(t.t, err)
// Next, we'll mine enough blocks for the HTLC to expire. At this
// point, Bob should hand off the output to his internal utxo nursery,
// which will broadcast a sweep transaction.
numBlocks := padCLTV(finalCltvDelta - 1)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
_, err = net.Miner.Node.Generate(numBlocks)
require.NoError(t.t, err)
// If we check Bob's pending channel report, it should show that he has
// a single HTLC that's now in the second stage, as skip the initial
// first stage since this is a direct HTLC.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
if len(c.PendingHtlcs) != 1 {
return fmt.Errorf("bob should have pending " +
"htlc but doesn't")
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
if c.PendingHtlcs[0].Stage != 2 {
return fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the second stage: %v", err)
return false
}
return nil
},
)
require.NoError(t.t, err)
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// We need to generate an additional block to trigger the sweep.
_, err = net.Miner.Node.Generate(1)
require.NoError(t.t, err)
// Bob's sweeping transaction should now be found in the mempool at
// this point.
@ -185,14 +141,10 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// we'll fail.
// TODO(halseth): can we use waitForChannelPendingForceClose to
// avoid this hack?
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
_, err = net.Miner.Node.Generate(1)
require.NoError(t.t, err)
sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction: "+
"%v", err)
}
require.NoError(t.t, err)
}
// If we mine an additional block, then this should confirm Bob's
@ -204,45 +156,23 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// cancel back that HTLC. As a result, Alice should not know of any
// active HTLC's.
nodes = []*lntest.HarnessNode{alice}
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
}
err = wait.NoError(func() error {
return assertNumActiveHtlcs(nodes, 0)
}, defaultTimeout)
require.NoError(t.t, err)
// Now we'll check Bob's pending channel report. Since this was Carol's
// commitment, he doesn't have to wait for any CSV delays. As a result,
// he should show no additional pending transactions.
err = wait.Predicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
require.NoError(t.t, err)
// We'll close out the test by closing the channel from Alice to Bob,
// and then shutting down the new node we created as its no longer
// needed. Coop close, no anchors.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssertType(
ctxt, t, net, alice, aliceChanPoint, false,
false,
ctxt, t, net, alice, aliceChanPoint, false, false,
)
}

@ -330,37 +330,68 @@ func waitForChannelPendingForceClose(ctx context.Context,
Index: fundingChanPoint.OutputIndex,
}
var predErr error
err = wait.Predicate(func() bool {
return wait.NoError(func() error {
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
pendingChanResp, err := node.PendingChannels(
ctx, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to get pending "+
"channels: %v", err)
return false
return fmt.Errorf("unable to get pending channels: %v",
err)
}
forceClose, err := findForceClosedChannel(pendingChanResp, &op)
if err != nil {
predErr = err
return false
return err
}
// We must wait until the UTXO nursery has received the channel
// and is aware of its maturity height.
if forceClose.MaturityHeight == 0 {
predErr = fmt.Errorf("channel had maturity height of 0")
return false
return fmt.Errorf("channel had maturity height of 0")
}
return true
}, time.Second*15)
if err != nil {
return predErr
}
return nil
return nil
}, defaultTimeout)
}
// lnrpcForceCloseChannel is a short type alias for a ridiculously long type
// name in the lnrpc package.
type lnrpcForceCloseChannel = lnrpc.PendingChannelsResponse_ForceClosedChannel
// waitForNumChannelPendingForceClose waits for the node to report a certain
// number of channels in state pending force close.
func waitForNumChannelPendingForceClose(ctx context.Context,
node *lntest.HarnessNode, expectedNum int,
perChanCheck func(channel *lnrpcForceCloseChannel) error) error {
return wait.NoError(func() error {
resp, err := node.PendingChannels(
ctx, &lnrpc.PendingChannelsRequest{},
)
if err != nil {
return fmt.Errorf("unable to get pending channels: %v",
err)
}
forceCloseChans := resp.PendingForceClosingChannels
if len(forceCloseChans) != expectedNum {
return fmt.Errorf("bob should have %d pending "+
"force close channels but has %d", expectedNum,
len(forceCloseChans))
}
if perChanCheck != nil {
for _, forceCloseChan := range forceCloseChans {
err := perChanCheck(forceCloseChan)
if err != nil {
return err
}
}
}
return nil
}, defaultTimeout)
}
// cleanupForceClose mines a force close commitment found in the mempool and
@ -1518,7 +1549,7 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) {
// testPaymentFollowingChannelOpen tests that the channel transition from
// 'pending' to 'open' state does not cause any inconsistencies within other
// subsystems trying to udpate the channel state in the db. We follow this
// subsystems trying to update the channel state in the db. We follow this
// transition with a payment that updates the commitment state and verify that
// the pending state is up to date.
func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) {
@ -1550,7 +1581,7 @@ func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest)
t.Fatalf("Bob restart failed: %v", err)
}
// We ensure that Bob reconnets to Alice.
// We ensure that Bob reconnects to Alice.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
if err := net.EnsureConnected(ctxt, net.Bob, net.Alice); err != nil {
t.Fatalf("peers unable to reconnect after restart: %v", err)
@ -1559,7 +1590,7 @@ func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest)
// We mine one block for the channel to be confirmed.
_ = mineBlocks(t, net, 6, 1)[0]
// We verify that the chanel is open from both nodes point of view.
// We verify that the channel is open from both nodes point of view.
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 0)
@ -1575,14 +1606,11 @@ func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest)
// Send payment to Bob so that a channel update to disk will be
// executed.
sendAndAssertSuccess(
t, net.Alice,
&routerrpc.SendPaymentRequest{
PaymentRequest: bobPayReqs[0],
TimeoutSeconds: 60,
FeeLimitSat: 1000000,
},
)
sendAndAssertSuccess(t, net.Alice, &routerrpc.SendPaymentRequest{
PaymentRequest: bobPayReqs[0],
TimeoutSeconds: 60,
FeeLimitSat: 1000000,
})
// At this point we want to make sure the channel is opened and not
// pending.
@ -13987,19 +14015,26 @@ func sendAndAssertSuccess(t *harnessTest, node *lntest.HarnessNode,
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout)
defer cancel()
stream, err := node.RouterClient.SendPaymentV2(ctx, req)
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
var result *lnrpc.Payment
err := wait.NoError(func() error {
stream, err := node.RouterClient.SendPaymentV2(ctx, req)
if err != nil {
return fmt.Errorf("unable to send payment: %v", err)
}
result, err := getPaymentResult(stream)
if err != nil {
t.Fatalf("unable to get payment result: %v", err)
}
result, err = getPaymentResult(stream)
if err != nil {
return fmt.Errorf("unable to get payment result: %v",
err)
}
if result.Status != lnrpc.Payment_SUCCEEDED {
t.Fatalf("payment failed: %v", result.Status)
}
if result.Status != lnrpc.Payment_SUCCEEDED {
return fmt.Errorf("payment failed: %v", result.Status)
}
return nil
}, defaultTimeout)
require.NoError(t.t, err)
return result
}
@ -14054,7 +14089,7 @@ func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) (
// TestLightningNetworkDaemon performs a series of integration tests amongst a
// programmatically driven network of lnd nodes.
func TestLightningNetworkDaemon(t *testing.T) {
// If no tests are regsitered, then we can exit early.
// If no tests are registered, then we can exit early.
if len(testsCases) == 0 {
t.Skip("integration tests not selected with flag 'rpctest'")
}
@ -14075,14 +14110,9 @@ func TestLightningNetworkDaemon(t *testing.T) {
//
// We will also connect it to our chain backend.
minerLogDir := "./.minerlogs"
handlers := &rpcclient.NotificationHandlers{
OnTxAccepted: func(hash *chainhash.Hash, amt btcutil.Amount) {
lndHarness.OnTxAccepted(hash)
},
}
miner, minerCleanUp, err := lntest.NewMiner(
minerLogDir, "output_btcd_miner.log",
harnessNetParams, handlers,
harnessNetParams, &rpcclient.NotificationHandlers{},
)
require.NoError(t, err, "failed to create new miner")
defer func() {

@ -2,12 +2,12 @@
<time> [ERR] BRAR: Unable to register for conf for txid(<hex>): TxNotifier is exiting
<time> [ERR] BTCN: Broadcast attempt failed: rejected by <ip>: replacement transaction <hex> has an insufficient absolute fee: needs <amt>, has <amt>
<time> [ERR] BTCN: Broadcast attempt failed: rejected by <ip>: replacement transaction <hex> has an insufficient fee rate: needs more than <amt>, has <amt>
<time> [ERR] BTCN: Can't accept connection: unable to accept connection from <ip>: chacha20poly1305: message authentication failed
<time> [ERR] BTCN: Can't accept connection: unable to accept connection from <ip>: EOF
<time> [ERR] BTCN: Can't accept connection: unable to accept connection from <ip>: read tcp <ip>-><ip>: i/o timeout
<time> [ERR] BTCN: Unable to process block connected (height=<height>, hash=<hex>): out of order block <hex>: expected PrevBlock <hex>, got <hex>
<time> [ERR] BTCN: Unknown connid=<id>
<time> [ERR] CHFT: Close channel <chan_point> unknown to store
<time> [ERR] CNCT: *contractcourt.commitSweepResolver(<chan_point>): unable to sweep input: remote party swept utxo
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to advance state: channel not found
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to broadcast close tx: Transaction rejected: output already spent
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to force close: channel not found
@ -20,6 +20,7 @@
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcSuccessResolver: Transaction rejected: output already spent
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcTimeoutResolver: htlcswitch shutting down
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcTimeoutResolver: TxNotifier is exiting
<time> [ERR] CNCT: *contractcourt.commitSweepResolver(<chan_point>): unable to sweep input: remote party swept utxo
<time> [ERR] CNCT: Unable to advance state: channel not found
<time> [ERR] CNCT: unable to hand breached contract off to breachArbiter: server is shutting down
<time> [ERR] CNCT: unable to handle channel breach for chan_point=<chan_point>: server is shutting down
@ -35,6 +36,7 @@
<time> [ERR] CRTR: Payment with hash <hex> failed: insufficient_balance
<time> [ERR] CRTR: Payment with hash <hex> failed: no_route
<time> [ERR] CRTR: Payment with hash <hex> failed: router shutting down
<time> [ERR] CRTR: Payment with hash <hex> failed: timeout
<time> [ERR] CRTR: Resuming payment with hash <hex> failed: error.
<time> [ERR] CRTR: Resuming payment with hash <hex> failed: incorrect_payment_details.
<time> [ERR] CRTR: Resuming payment with hash <hex> failed: no_route.
@ -47,19 +49,26 @@
<time> [ERR] DISC: Unable to reply to peer query: set tcp <ip>: use of closed network connection
<time> [ERR] DISC: Unable to reply to peer query: write tcp <ip>-><ip>: use of closed network connection
<time> [ERR] DISC: Unable to reply to peer query: write tcp <ip>-><ip>: write: broken pipe
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=channel too large
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=chan size of 0.16777216 BTC exceeds maximum chan size of 0.16777215 BTC
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 0.16777215 BTC
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 10 BTC
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=Number of pending channels exceed maximum
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=Synchronizing blockchain
<time> [ERR] FNDG: Unable to add new channel <chan_point> with peer <hex>: canceled adding new channel
<time> [ERR] FNDG: Unable to add new channel <chan_point> with peer <hex>: peer exiting
<time> [ERR] FNDG: Unable to add new channel <chan_point> with peer <hex>: unable to get best block: the client has been shutdown
<time> [ERR] FNDG: Unable to advance pending state of ChannelPoint(<chan_point>): error waiting for funding confirmation for ChannelPoint(<chan_point>): epoch client shutting down
<time> [ERR] FNDG: Unable to advance pending state of ChannelPoint(<chan_point>): error waiting for funding confirmation for ChannelPoint(<chan_point>): funding manager shutting down
<time> [ERR] FNDG: Unable to advance pending state of ChannelPoint(<chan_point>): error waiting for funding confirmation for ChannelPoint(<chan_point>): waiting for fundingconfirmation failed
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: ChainNotifier shutting down, cannot complete funding flow for ChannelPoint(<chan_point>)
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: channel announcement proof for short_chan_id=<cid> isn't valid: can't verify first bitcoin signature
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: funding manager shutting down
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: gossiper is shutting down
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: router shutting down
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: unable add proof to the channel chanID=<hex>: edge marked as zombie
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: unable add proof to the channel chanID=<hex>: edge not found
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: error sending channel announcement: gossiper is shutting down
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: error sending channel announcement: router shutting down
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: funding manager shutting down
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed sending fundingLocked: funding manager shutting down
@ -72,34 +81,30 @@
<time> [ERR] FNDG: Unable to send node announcement: gossiper is shutting down
<time> [ERR] FNDG: Unable to send node announcement: router shutting down
<time> [ERR] HSWC: AmountBelowMinimum(amt=<amt>, update=(lnwire.ChannelUpdate) {
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: ChannelPoint(<chan_point>): received error from peer: chan_id=<hex>, err=sync error with error: remote error
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: ChannelPoint(<chan_point>): received error from peer: chan_id=<hex>, err=invalid update with error: remote error
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: ChannelPoint(<chan_point>): received error from peer: chan_id=<hex>, err=sync error with error: remote error
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: ChannelPoint(<chan_point>): received error from peer: chan_id=<hex>, err=unable to resume channel, recovery required with error: remote error
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to handle upstream settle HTLC: Invalid payment preimage <hex> for hash <hex> with error: invalid update
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: ChannelPoint(<chan_point>) with CommitPoint(<hex>) had possible local commitment state data loss with error: unable to resume channel, recovery required
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: possible remote commitment state data loss with error: sync error
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: Unable to send chan sync message for ChannelPoint(<chan_point>): peer exiting with error: unable to resume channel, recovery required
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: unable to send chan sync message for ChannelPoint(<chan_point>): set tcp <ip>: use of closed network connection with error: unable to resume channel, recovery required
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: unable to send chan sync message for ChannelPoint(<chan_point>): set tcp <ip>: use of closed network connection with error: unable to resume channel, recovery required
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: Unable to send chan sync message for ChannelPoint(<chan_point>): write tcp <ip>-><ip>: use of closed network connection with error: unable to resume channel, recovery required
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: Unable to send chan sync message for ChannelPoint(<chan_point>): write tcp <ip>-><ip>: write: broken pipe with error: unable to resume channel, recovery required
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: unable to send chan sync message for ChannelPoint(<chan_point>): write tcp <ip>-><ip>: write: connection reset by peer with error: unable to resume channel, recovery required
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to update commitment: link shutting down with error: internal error
<time> [ERR] HSWC: ChannelLink(<chan>): link failed, exiting htlcManager
<time> [ERR] HSWC: ChannelLink(<chan>): outgoing htlc(<hex>) has insufficient fee: expected 575000, got 1075
<time> [ERR] HSWC: ChannelLink(<chan>): outgoing htlc(<hex>) is too small: min_htlc=<amt>, htlc_value=<amt>
<time> [ERR] HSWC: ChannelLink(<chan>): unable to cancel incoming HTLC for circuit-key=(Chan ID=<chan>, HTLC ID=0): HTLC with ID 0 has already been failed
<time> [ERR] HSWC: ChannelLink(<chan>): unable to decode onion hop iterator: TemporaryChannelFailure
<time> [ERR] HSWC: ChannelLink(<chan>): unable to update signals
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: AmountBelowMinimum(amt=4000 mSAT, update=(lnwire.ChannelUpdate) {
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: circuit has already been closed
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: insufficient bandwidth to route htlc
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: node configured to disallow forwards
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: UnknownNextPeer
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: AmountBelowMinimum(amt=<amt>, update=(lnwire.ChannelUpdate) {
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: circuit has already been closed
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: FeeInsufficient(htlc_amt==<amt>, update=(lnwire.ChannelUpdate) {
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: insufficient bandwidth to route htlc
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: node configured to disallow forwards
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: UnknownNextPeer
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: unable to send chan sync message for ChannelPoint(<chan_point>): set tcp <ip>: use of closed network connection with error: unable to resume channel, recovery required
<time> [ERR] HSWC: FeeInsufficient(htlc_amt==<amt>, update=(lnwire.ChannelUpdate) {
<time> [ERR] HSWC: insufficient bandwidth to route htlc
<time> [ERR] HSWC: Link <chan> not found
@ -109,10 +114,16 @@
<time> [ERR] HSWC: unable to find target channel for HTLC fail: channel ID = <chan>, HTLC ID = <id>
<time> [ERR] HSWC: Unable to forward resolution msg: unable to find target channel for HTLC fail: channel ID = <chan>, HTLC ID = <id>
<time> [ERR] HSWC: unable to process onion packet: sphinx packet replay attempted
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: AmountBelowMinimum(amt=<amt>, update=(lnwire.ChannelUpdate) {
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: circuit has already been closed
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: FeeInsufficient(htlc_amt==<amt>, update=(lnwire.ChannelUpdate) {
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: insufficient bandwidth to route htlc
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: node configured to disallow forwards
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: UnknownNextPeer
<time> [ERR] HSWC: UnknownNextPeer
<time> [ERR] LNWL: ChannelPoint(<chan_point>): sync failed with local data loss: remote believes our tail height is <height>, while we have <height>!
<time> [ERR] LNWL: ChannelPoint(<chan_point>): sync failed: remote believes our tail height is <height>, while we have <height>!
<time> [ERR] LNWL: ChannelPoint(<chan_point>): sync failed: remote's next commit height is <height>, while we believe it is <height>!
<time> [ERR] LNWL: ChannelPoint(<chan_point>): sync failed with local data loss: remote believes our tail height is <height>, while we have <height>!
<time> [ERR] LNWL: Neutrino rescan ended with error: rescan exited
<time> [ERR] LNWL: Notifying unmined tx notification (<hex>) while creating notification for blocks
<time> [ERR] LNWL: Rescan for <num> addresses failed: the client has been shutdown
@ -120,14 +131,20 @@
<time> [ERR] NANN: Unable to retrieve chan status for Channel(<chan_point>): edge not found
<time> [ERR] NANN: Unable to retrieve chan status for Channel(<chan_point>): unable to extract ChannelUpdate for channel <chan_point>
<time> [ERR] NANN: Unable to sign update disabling channel(<chan_point>): edge not found
<time> [ERR] NTFN: chain notifier shutting down
<time> [ERR] NTFN: Error during rescan: rescan exited
<time> [ERR] NTFN: Failed getting UTXO: get utxo request cancelled
<time> [ERR] NTFN: Rescan to determine the spend details of <chan_point> failed: the client has been shutdown
<time> [ERR] NTFN: Unable to fetch block header: the client has been shutdown
<time> [ERR] NTFN: unable to find blockhash for height=<height>: -1: Block number out of range
<time> [ERR] NTFN: unable to get block: the client has been shutdown
<time> [ERR] NTFN: unable to get hash from block with height 790
<time> [ERR] NTFN: unable to get missed blocks: starting height <height> is greater than ending height <height>
<time> [ERR] NTFN: Unable to rewind chain from height <height> to height <height>: unable to find blockhash for disconnected height=<height>: -1: Block number out of range
<time> [ERR] NTNF: unable to get hash from block with height <height>
<time> [ERR] PEER: Allowed test error from <ip> (inbound): ReadMessage: unhandled command [sendaddrv2]
<time> [ERR] PEER: resend failed: unable to fetch channel sync messages for peer <hex>@<ip>: unable to find closed channel summary
<time> [ERR] PEER: unable to close channel, ChannelID(<hex>) is unknown
<time> [ERR] PEER: unable to force close link(<chan>): ChainArbitrator exiting
<time> [ERR] PEER: unable to force close link(<chan>): channel not found
<time> [ERR] PEER: unable to force close link(<chan>): unable to find arbitrator
@ -137,17 +154,38 @@
<time> [ERR] PEER: unable to send msg to remote peer: write tcp <ip>-><ip>: write: connection reset by peer
<time> [ERR] RPCS: [/chainrpc.ChainNotifier/RegisterBlockEpochNtfn]: chain notifier shutting down
<time> [ERR] RPCS: [/chainrpc.ChainNotifier/RegisterBlockEpochNtfn]: context canceled
<time> [ERR] RPCS: [closechannel] unable to close ChannelPoint(<chan_point>): chain notifier shutting down
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: already connected to peer: <hex>@<ip>
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: dial tcp <ip>: i/o timeout
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: dial tcp <ip>: i/o timeout
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: read tcp <ip>-><ip>: i/o timeout
<time> [ERR] RPCS: Failed receiving from stream: rpc error: code = Canceled desc = context canceled
<time> [ERR] RPCS: Failed receiving from stream: rpc error: code = DeadlineExceeded desc = context deadline exceeded
<time> [ERR] RPCS: Failed sending error response: rpc error: code = Canceled desc = context canceled
<time> [ERR] RPCS: Failed sending error response: rpc error: code = Internal desc = transport: transport: the stream is done or WriteHeader was already called
<time> [ERR] RPCS: Failed sending response: rpc error: code = Canceled desc = context canceled
<time> [ERR] RPCS: Failed sending response: rpc error: code = Internal desc = transport: transport: the stream is done or WriteHeader was already called
<time> [ERR] RPCS: [/invoicesrpc.Invoices/SubscribeSingleInvoice]: rpc error: code = Canceled desc = context canceled
<time> [ERR] RPCS: [/lnrpc.Lightning/BakeMacaroon]: invalid permission action. supported actions are [read write generate], supported entities are [onchain offchain address message peers info invoices signer macaroon uri]
<time> [ERR] RPCS: [/lnrpc.Lightning/BakeMacaroon]: invalid permission entity. supported actions are [read write generate], supported entities are [onchain offchain address message peers info invoices signer macaroon uri]
<time> [ERR] RPCS: [/lnrpc.Lightning/BakeMacaroon]: permission list cannot be empty. specify at least one action/entity pair. supported actions are [read write generate], supported entities are [onchain offchain address message peers info invoices signer macaroon uri]
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: cannot close channel with state: ChanStatusRestored
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: cannot co-op close frozen channel as initiator until height=<height>, (current_height=<height>)
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: cannot co-op close frozen channel as initiator until height=3059, (current_height=3055)
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: cannot co-op close frozen channel as initiator until height=<height>, (current_height=<height>)
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: chain notifier shutting down
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: already connected to peer: <hex>@<ip>
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: dial tcp <ip>: i/o timeout
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: dial tcp <ip>: i/o timeout
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: read tcp <ip>-><ip>: i/o timeout
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: server is still in the process of starting
<time> [ERR] RPCS: [/lnrpc.Lightning/DeleteMacaroonID]: the specified ID cannot be deleted
<time> [ERR] RPCS: [/lnrpc.Lightning/FundingStateStep]: pendingChanID(<hex>) already has intent registered
<time> [ERR] RPCS: [/lnrpc.Lightning/GetChanInfo]: edge marked as zombie
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: channels cannot be created before the wallet is fully synced
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=channel too large
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=chan size of 0.16777216 BTC exceeds maximum chan size of 0.16777215 BTC
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 0.16777215 BTC
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 10 BTC
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=Number of pending channels exceed maximum
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=Synchronizing blockchain
<time> [ERR] RPCS: [/lnrpc.Lightning/PendingChannels]: unable to find arbitrator
@ -168,69 +206,33 @@
<time> [ERR] RPCS: [/lnrpc.Lightning/SubscribeChannelGraph]: rpc error: code = DeadlineExceeded desc = context deadline exceeded
<time> [ERR] RPCS: [/lnrpc.Lightning/SubscribeChannelGraph]: rpc error: code = Internal desc = transport: transport: the stream is done or WriteHeader was already called
<time> [ERR] RPCS: [/lnrpc.Lightning/SubscribeInvoices]: rpc error: code = Canceled desc = context canceled
<time> [ERR] RPCS: [/routerrpc.Router/HtlcInterceptor]: rpc error: code = Canceled desc = context canceled
<time> [ERR] RPCS: [/routerrpc.Router/SendPayment]: routerrpc server shutting down
<time> [ERR] RPCS: [/routerrpc.Router/SendPaymentV2]: context canceled
<time> [ERR] RPCS: [/routerrpc.Router/SendPaymentV2]: routerrpc server shutting down
<time> [ERR] RPCS: [/routerrpc.Router/SubscribeHtlcEvents]: context canceled
<time> [ERR] RPCS: [/routerrpc.Router/SubscribeHtlcEvents]: htlc event subscription terminated
<time> [ERR] RPCS: [closechannel] unable to close ChannelPoint(<chan_point>): chain notifier shutting down
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: already connected to peer: <hex>@<ip>
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: read tcp <ip>-><ip>: i/o timeout
<time> [ERR] RPCS: Failed receiving from stream: rpc error: code = Canceled desc = context canceled
<time> [ERR] RPCS: Failed receiving from stream: rpc error: code = DeadlineExceeded desc = context deadline exceeded
<time> [ERR] RPCS: Failed sending error response: rpc error: code = Canceled desc = context canceled
<time> [ERR] RPCS: Failed sending error response: rpc error: code = Internal desc = transport: transport: the stream is done or WriteHeader was already called
<time> [ERR] RPCS: Failed sending response: rpc error: code = Canceled desc = context canceled
<time> [ERR] RPCS: Failed sending response: rpc error: code = Internal desc = transport: transport: the stream is done or WriteHeader was already called
<time> [ERR] RPCS: [/routerrpc.Route<time> [INF] LTND: Listening on the p2p interface is disabled!
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: must provide ephemeral pubkey
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: use either key_desc or key_loc
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: use either raw_key_bytes or key_index
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: when setting key_desc the field key_desc.key_loc must also be set
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=channel too large
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=chan size of 0.16777216 BTC exceeds maximum chan size of 0.16777215 BTC
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 0.16777215 BTC
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 10 BTC
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=Number of pending channels exceed maximum
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=Synchronizing blockchain
<time> [ERR] RPCS: [/walletrpc.WalletKit/LabelTransaction]: cannot label transaction with empty label
<time> [ERR] RPCS: [/walletrpc.WalletKit/LabelTransaction]: transaction already labelled
<time> [ERR] RPCS: Websocket receive error from <ip>: read tcp4 <ip>-><ip>: use of closed network connection
<time> [ERR] RPCS: Websocket receive error from <ip>: websocket: close 1006 unexpected EOF
<time> [ERR] RPCS: WS: error closing upgraded conn: write tcp4 <ip>-><ip>: write: connection reset by peer
<time> [ERR] SRVR: Unable to connect to <hex>@<ip>: dial tcp <ip>: i/o timeout
<time> [ERR] SRVR: Unable to connect to <hex>@<ip>: dial tcp <ip>: i/o timeout
<time> [ERR] SRVR: Unable to connect to <hex>@<ip>: read tcp <ip>-><ip>: i/o timeout
<time> [ERR] SRVR: Unable to retrieve advertised address for node <hex>: no advertised addresses found
<time> [ERR] SRVR: Unable to retrieve advertised address for node <hex>: unable to find node
<time> [ERR] UTXN: error while graduating class at height=<height>: TxNotifier is exiting
<time> [ERR] UTXN: Failed to sweep first-stage HTLC (CLTV-delayed) output <chan_point>
<time> [ERR] UTXN: Notification chan closed, can't advance output <chan_point>
<time> [ERR] RPCS: [/walletrpc.WalletKit/LabelTransaction]: cannot label transaction with empty label
<time> [ERR] RPCS: [/walletrpc.WalletKit/LabelTransaction]: transaction already labelled
<time> [ERR] NTFN: unable to get hash from block with height 790
<time> [ERR] CRTR: Payment with hash <hex> failed: timeout
<time> [ERR] RPCS: [/routerrpc.Route<time> [INF] LTND: Listening on the p2p interface is disabled!
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: error sending channel announcement: gossiper is shutting down
<time> [ERR] PEER: unable to close channel, ChannelID(<hex>) is unknown
<time> [ERR] HSWC: ChannelLink(<chan>): unable to update signals
<time> [ERR] RPCS: [/routerrpc.Router/HtlcInterceptor]: rpc error: code = Canceled desc = context canceled
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=channel too large
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=channel too large
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=channel too large
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=chan size of 0.16777216 BTC exceeds maximum chan size of 0.16777215 BTC
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=chan size of 0.16777216 BTC exceeds maximum chan size of 0.16777215 BTC
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=chan size of 0.16777216 BTC exceeds maximum chan size of 0.16777215 BTC
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 10 BTC
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 10 BTC
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 10 BTC
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 0.16777215 BTC
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 0.16777215 BTC
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 0.16777215 BTC
<time> [ERR] NTNF: unable to get hash from block with height <height>
<time> [ERR] FNDG: Unable to add new channel <chan_point> with peer <hex>: canceled adding new channel
<time> [ERR] RPCS: WS: error closing upgraded conn: write tcp4 <ip>-><ip>: write: connection reset by peer
<time> [ERR] NTFN: chain notifier shutting down
<time> [ERR] NTFN: Failed getting UTXO: get utxo request cancelled
<time> [ERR] RPCS: [/lnrpc.Lightning/BakeMacaroon]: invalid permission action. supported actions are [read write generate], supported entities are [onchain offchain address message peers info invoices signer macaroon uri]
<time> [ERR] RPCS: [/lnrpc.Lightning/BakeMacaroon]: invalid permission entity. supported actions are [read write generate], supported entities are [onchain offchain address message peers info invoices signer macaroon uri]
<time> [ERR] RPCS: [/lnrpc.Lightning/BakeMacaroon]: permission list cannot be empty. specify at least one action/entity pair. supported actions are [read write generate], supported entities are [onchain offchain address message peers info invoices signer macaroon uri]
<time> [ERR] RPCS: [/lnrpc.Lightning/DeleteMacaroonID]: the specified ID cannot be deleted
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: dial tcp <ip>: i/o timeout
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: dial tcp <ip>: i/o timeout
<time> [ERR] SRVR: Unable to connect to <hex>@<ip>: dial tcp <ip>: i/o timeout
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: dial tcp <ip>: i/o timeout
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: dial tcp <ip>: i/o timeout
<time> [ERR] SRVR: Unable to connect to <hex>@<ip>: dial tcp <ip>: i/o timeout
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: use either key_desc or key_loc
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: use either raw_key_bytes or key_index
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: when setting key_desc the field key_desc.key_loc must also be set
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: must provide ephemeral pubkey
<time> [ERR] BTCN: Can't accept connection: unable to accept connection from <ip>: chacha20poly1305: message authentication failed
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to update commitment: link shutting down with error: internal error