From 003441d7e9397f5e55fcf2668982d945ed833839 Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Thu, 19 Sep 2019 12:46:17 -0700 Subject: [PATCH 1/5] build: adding missing unit test dep to go.sum --- go.sum | 1 + 1 file changed, 1 insertion(+) diff --git a/go.sum b/go.sum index 96320895..27321d10 100644 --- a/go.sum +++ b/go.sum @@ -51,6 +51,7 @@ github.com/btcsuite/snappy-go v1.0.0 h1:ZxaA6lo2EpxGddsA8JwWOcxlzRybb444sgmeJQMJ github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= From 1d41d4d6665d80955c5cd226846ad1053c55099b Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Thu, 19 Sep 2019 12:46:29 -0700 Subject: [PATCH 2/5] multi: move WaitPredicate, WaitNoError, WaitInvariant to lntest/wait --- breacharbiter_test.go | 4 +- discovery/gossiper_test.go | 6 +- discovery/reliable_sender_test.go | 4 +- discovery/sync_manager_test.go | 6 +- lntest/harness.go | 85 +--------- ...d_multi-hop_htlc_local_chain_claim_test.go | 9 +- ...ulti-hop_htlc_receiver_chain_claim_test.go | 7 +- ..._multi-hop_htlc_remote_chain_claim_test.go | 7 +- lntest/itest/lnd_test.go | 157 +++++++++--------- lntest/itest/onchain.go | 3 +- lntest/node.go | 7 +- lntest/wait/wait.go | 78 +++++++++ 12 files changed, 193 insertions(+), 180 deletions(-) create mode 100644 lntest/wait/wait.go diff --git a/breacharbiter_test.go b/breacharbiter_test.go index aff7459c..9d95ea4f 100644 --- a/breacharbiter_test.go +++ b/breacharbiter_test.go @@ -29,7 +29,7 @@ import ( "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" - "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/shachain" @@ -1537,7 +1537,7 @@ func assertBrarCleanup(t *testing.T, brar *breachArbiter, t.Helper() - err := lntest.WaitNoError(func() error { + err := wait.NoError(func() error { isBreached, err := brar.IsBreached(chanPoint) if err != nil { return err diff --git a/discovery/gossiper_test.go b/discovery/gossiper_test.go index 5f224d58..40d8148e 100644 --- a/discovery/gossiper_test.go +++ b/discovery/gossiper_test.go @@ -24,7 +24,7 @@ import ( "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lnpeer" - "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing" "github.com/lightningnetwork/lnd/routing/route" @@ -3432,7 +3432,7 @@ func TestSendChannelUpdateReliably(t *testing.T) { // Since the messages above are now deemed as stale, they should be // removed from the message store. - err = lntest.WaitNoError(func() error { + err = wait.NoError(func() error { msgs, err := ctx.gossiper.cfg.MessageStore.Messages() if err != nil { return fmt.Errorf("unable to retrieve pending "+ @@ -3491,7 +3491,7 @@ func assertBroadcastMsg(t *testing.T, ctx *testCtx, // predicate returns true for any of the messages, so we'll continue to // retry until either we hit our timeout, or it returns with no error // (message found). - err := lntest.WaitNoError(func() error { + err := wait.NoError(func() error { select { case msg := <-ctx.broadcastedMessage: return predicate(msg.msg) diff --git a/discovery/reliable_sender_test.go b/discovery/reliable_sender_test.go index 1e503a0b..66d1767f 100644 --- a/discovery/reliable_sender_test.go +++ b/discovery/reliable_sender_test.go @@ -8,7 +8,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/lnpeer" - "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwire" ) @@ -244,7 +244,7 @@ func TestReliableSenderStaleMessages(t *testing.T) { // message store since it is seen as stale and has been sent at least // once. Once the message is removed, the peerHandler should be torn // down as there are no longer any pending messages within the store. - err := lntest.WaitNoError(func() error { + err := wait.NoError(func() error { msgs, err := reliableSender.cfg.MessageStore.MessagesForPeer( peerPubKey, ) diff --git a/discovery/sync_manager_test.go b/discovery/sync_manager_test.go index 18404979..d165447f 100644 --- a/discovery/sync_manager_test.go +++ b/discovery/sync_manager_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/davecgh/go-spew/spew" - "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/ticker" ) @@ -504,7 +504,7 @@ func assertSyncerStatus(t *testing.T, s *GossipSyncer, syncState syncerState, // We'll check the status of our syncer within a WaitPredicate as some // sync transitions might cause this to be racy. - err := lntest.WaitNoError(func() error { + err := wait.NoError(func() error { state := s.syncState() if s.syncState() != syncState { return fmt.Errorf("expected syncState %v for peer "+ @@ -545,7 +545,7 @@ func assertTransitionToChansSynced(t *testing.T, s *GossipSyncer, peer *mockPeer t.Fatal("expected to receive FilterKnownChanIDs request") } - err := lntest.WaitNoError(func() error { + err := wait.NoError(func() error { state := syncerState(atomic.LoadUint32(&s.state)) if state != chansSynced { return fmt.Errorf("expected syncerState %v, got %v", diff --git a/lntest/harness.go b/lntest/harness.go index 317271c2..f010c829 100644 --- a/lntest/harness.go +++ b/lntest/harness.go @@ -22,6 +22,7 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwire" ) @@ -493,7 +494,7 @@ func (n *NetworkHarness) EnsureConnected(ctx context.Context, a, b *HarnessNode) return false } - err := WaitPredicate(func() bool { + err := wait.Predicate(func() bool { return findSelfInPeerList(a, b) && findSelfInPeerList(b, a) }, time.Second*15) if err != nil { @@ -526,7 +527,7 @@ func (n *NetworkHarness) ConnectNodes(ctx context.Context, a, b *HarnessNode) er return err } - err = WaitPredicate(func() bool { + err = wait.Predicate(func() bool { // If node B is seen in the ListPeers response from node A, // then we can exit early as the connection has been fully // established. @@ -1068,12 +1069,12 @@ func (n *NetworkHarness) CloseChannel(ctx context.Context, // Before proceeding, we'll ensure that the channel is active // for both nodes. - err = WaitPredicate(activeChanPredicate(lnNode), timeout) + err = wait.Predicate(activeChanPredicate(lnNode), timeout) if err != nil { return nil, nil, fmt.Errorf("channel of closing " + "node not active in time") } - err = WaitPredicate(activeChanPredicate(receivingNode), timeout) + err = wait.Predicate(activeChanPredicate(receivingNode), timeout) if err != nil { return nil, nil, fmt.Errorf("channel of receiving " + "node not active in time") @@ -1194,85 +1195,13 @@ func (n *NetworkHarness) AssertChannelExists(ctx context.Context, return false } - if err := WaitPredicate(pred, time.Second*15); err != nil { + if err := wait.Predicate(pred, time.Second*15); err != nil { return fmt.Errorf("channel not found: %v", predErr) } return nil } -// WaitPredicate is a helper test function that will wait for a timeout period -// of time until the passed predicate returns true. This function is helpful as -// timing doesn't always line up well when running integration tests with -// several running lnd nodes. This function gives callers a way to assert that -// some property is upheld within a particular time frame. -func WaitPredicate(pred func() bool, timeout time.Duration) error { - const pollInterval = 20 * time.Millisecond - - exitTimer := time.After(timeout) - for { - <-time.After(pollInterval) - - select { - case <-exitTimer: - return fmt.Errorf("predicate not satisfied after time out") - default: - } - - if pred() { - return nil - } - } -} - -// WaitNoError is a wrapper around WaitPredicate that waits for the passed -// method f to execute without error, and returns the last error encountered if -// this doesn't happen within the timeout. -func WaitNoError(f func() error, timeout time.Duration) error { - var predErr error - pred := func() bool { - if err := f(); err != nil { - predErr = err - return false - } - return true - } - - // If f() doesn't succeed within the timeout, return the last - // encountered error. - if err := WaitPredicate(pred, timeout); err != nil { - return predErr - } - - return nil -} - -// WaitInvariant is a helper test function that will wait for a timeout period -// of time, verifying that a statement remains true for the entire duration. -// This function is helpful as timing doesn't always line up well when running -// integration tests with several running lnd nodes. This function gives callers -// a way to assert that some property is maintained over a particular time -// frame. -func WaitInvariant(statement func() bool, timeout time.Duration) error { - const pollInterval = 20 * time.Millisecond - - exitTimer := time.After(timeout) - for { - <-time.After(pollInterval) - - // Fail if the invariant is broken while polling. - if !statement() { - return fmt.Errorf("invariant broken before time out") - } - - select { - case <-exitTimer: - return nil - default: - } - } -} - // DumpLogs reads the current logs generated by the passed node, and returns // the logs as a single string. This function is useful for examining the logs // of a particular node in the case of a test failure. @@ -1373,7 +1302,7 @@ func (n *NetworkHarness) sendCoins(ctx context.Context, amt btcutil.Amount, // Now, wait for ListUnspent to show the unconfirmed transaction // containing the correct pkscript. - err = WaitNoError(func() error { + err = wait.NoError(func() error { // Since neutrino doesn't support unconfirmed outputs, skip // this check. if target.cfg.BackendCfg.Name() == "neutrino" { diff --git a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go index 7b478b00..0b155c97 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -13,6 +13,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" ) @@ -73,7 +74,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) // the created HTLC pending on all of them. var predErr error nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash[:]) if predErr != nil { return false @@ -229,7 +230,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) // At this point, Bob should have broadcast his second layer success // transaction, and should have sent it to the nursery for incubation. pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Bob.PendingChannels( ctxt, pendingChansRequest, @@ -321,7 +322,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) block = mineBlocks(t, net, 1, 1)[0] assertTxInBlock(t, block, bobSweep) - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Bob.PendingChannels( ctxt, pendingChansRequest, @@ -357,7 +358,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) } // Also Carol should have no channels left (open nor pending). - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := carol.PendingChannels( ctxt, pendingChansRequest, diff --git a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go index 4f9eff93..fc497aec 100644 --- a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go @@ -14,6 +14,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" ) @@ -75,7 +76,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // the created HTLC pending on all of them. var predErr error nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash[:]) if predErr != nil { return false @@ -230,7 +231,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // extracted the preimage from the chain, and sent it back to Alice, // clearing the HTLC off-chain. nodes = []*lntest.HarnessNode{net.Alice} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { return false @@ -259,7 +260,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) if _, err := net.Miner.Node.Generate(1); err != nil { t.Fatalf("unable to mine block: %v", err) } - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err = carol.PendingChannels(ctxt, pendingChansRequest) if err != nil { diff --git a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go index a5cfbb06..14894e99 100644 --- a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go @@ -13,6 +13,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" ) @@ -72,7 +73,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // the created HTLC pending on all of them. var predErr error nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash[:]) if predErr != nil { return false @@ -252,7 +253,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // recognize that all contracts have been fully resolved, and show no // pending close channels. pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Bob.PendingChannels( ctxt, pendingChansRequest, @@ -291,7 +292,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest assertTxInBlock(t, block, carolSweep) pendingChansRequest = &lnrpc.PendingChannelsRequest{} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := carol.PendingChannels( ctxt, pendingChansRequest, diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index e92f7ca4..fbc0ea1e 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -37,6 +37,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc" "github.com/lightningnetwork/lnd/lnrpc/wtclientrpc" "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/routing" @@ -366,7 +367,7 @@ func assertChannelClosed(ctx context.Context, t *harnessTest, // Finally, the transaction should no longer be in the waiting close // state as we've just mined a block that should include the closing // transaction. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { pendingChansRequest := &lnrpc.PendingChannelsRequest{} pendingChanResp, err := node.PendingChannels( ctx, pendingChansRequest, @@ -406,7 +407,7 @@ func waitForChannelPendingForceClose(ctx context.Context, } var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { pendingChansRequest := &lnrpc.PendingChannelsRequest{} pendingChanResp, err := node.PendingChannels( ctx, pendingChansRequest, @@ -480,7 +481,7 @@ func numOpenChannelsPending(ctxt context.Context, node *lntest.HarnessNode) (int func assertNumOpenChannelsPending(ctxt context.Context, t *harnessTest, alice, bob *lntest.HarnessNode, expected int) { - err := lntest.WaitNoError(func() error { + err := wait.NoError(func() error { aliceNumChans, err := numOpenChannelsPending(ctxt, alice) if err != nil { return fmt.Errorf("error fetching alice's node (%v) "+ @@ -643,7 +644,7 @@ func completePaymentRequests(ctx context.Context, client lnrpc.LightningClient, // should still wait long enough for the server to receive and handle // the send before cancelling the request. We wait for the number of // updates to one of our channels has increased before we return. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctx, defaultTimeout) newListResp, err := client.ListChannels(ctxt, req) if err != nil { @@ -783,7 +784,7 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) { currBalance int64 currNumUTXOs uint32 ) - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { req := &lnrpc.WalletBalanceRequest{} ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) resp, err := node.WalletBalance(ctxt, req) @@ -1741,7 +1742,7 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { func waitForNodeBlockHeight(ctx context.Context, node *lntest.HarnessNode, height int32) error { var predErr error - err := lntest.WaitPredicate(func() bool { + err := wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctx, 10*time.Second) info, err := node.GetInfo(ctxt, &lnrpc.GetInfoRequest{}) if err != nil { @@ -1769,7 +1770,7 @@ func assertMinerBlockHeightDelta(t *harnessTest, // Ensure the chain lengths are what we expect. var predErr error - err := lntest.WaitPredicate(func() bool { + err := wait.Predicate(func() bool { _, tempMinerHeight, err := tempMiner.Node.GetBestBlock() if err != nil { predErr = fmt.Errorf("unable to get current "+ @@ -2003,7 +2004,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) { } var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) chanGraph, err = net.Alice.DescribeGraph(ctxt, req) if err != nil { @@ -2132,7 +2133,7 @@ func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) { // Disconnect Alice-peer from Bob-peer without getting error // about existing channels. var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { if err := net.DisconnectNodes(ctxt, net.Alice, net.Bob); err != nil { predErr = err return false @@ -2442,7 +2443,7 @@ func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) { // is equal to the amount of invoices * payAmt. var unsettledErr error nodes := []*lntest.HarnessNode{net.Alice, carol} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { // There should be a number of PendingHtlcs equal // to the amount of Invoices sent. unsettledErr = assertNumActiveHtlcs(nodes, numInvoices) @@ -2715,7 +2716,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // show that the HTLC has been locked in. nodes := []*lntest.HarnessNode{net.Alice, carol} var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, numInvoices) if predErr != nil { return false @@ -2818,7 +2819,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Now that the commitment has been confirmed, the channel should be // marked as force closed. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Alice.PendingChannels( ctxt, pendingChansRequest, @@ -2902,7 +2903,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Alice should see the channel in her set of pending force closed // channels with her funds still in limbo. - err = lntest.WaitNoError(func() error { + err = wait.NoError(func() error { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Alice.PendingChannels( ctxt, pendingChansRequest, @@ -3006,7 +3007,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to get best block height") } - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { // Now that the commit output has been fully swept, check to see // that the channel remains open for the pending htlc outputs. ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) @@ -3079,7 +3080,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Alice should now see the channel in her set of pending force closed // channels with one pending HTLC. - err = lntest.WaitNoError(func() error { + err = wait.NoError(func() error { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Alice.PendingChannels( ctxt, pendingChansRequest, @@ -3207,7 +3208,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Now that the channel has been fully swept, it should no longer show // incubated, check to see that Alice's node still reports the channel // as pending force closed. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err = net.Alice.PendingChannels( ctxt, pendingChansRequest, @@ -3308,7 +3309,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Now that the channel has been fully swept, it should no longer show // incubated, check to see that Alice's node still reports the channel // as pending force closed. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Alice.PendingChannels( ctxt, pendingChansRequest, @@ -3357,7 +3358,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { // Now that the channel has been fully swept, it should no longer show // up within the pending channels RPC. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Alice.PendingChannels( ctxt, pendingChansRequest, @@ -4798,7 +4799,7 @@ func testUnannouncedChannels(net *lntest.NetworkHarness, t *harnessTest) { // Give the network a chance to learn that auth proof is confirmed. var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { // The channel should now be announced. Check that Alice has 1 // announced edge. req.IncludeUnannounced = false @@ -5126,7 +5127,7 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) { } var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { aliceChans := numChannels(net.Alice, true) if aliceChans != 4 { predErr = fmt.Errorf("expected Alice to know 4 edges, "+ @@ -5319,7 +5320,7 @@ func testInvoiceRoutingHints(net *lntest.NetworkHarness, t *harnessTest) { // Alice and Bob should be the only channel used as a routing hint. var predErr error var decoded *lnrpc.PayReq - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) resp, err := net.Alice.AddInvoice(ctxt, invoice) if err != nil { @@ -6329,7 +6330,7 @@ func testFailingChannel(net *lntest.NetworkHarness, t *harnessTest) { // Since Alice detects that Carol is trying to trick her by providing a // fake preimage, she should fail and force close the channel. var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Alice.PendingChannels(ctxt, @@ -6359,7 +6360,7 @@ func testFailingChannel(net *lntest.NetworkHarness, t *harnessTest) { // The channel should now show up as force closed both for Alice and // Carol. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Alice.PendingChannels(ctxt, @@ -6387,7 +6388,7 @@ func testFailingChannel(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("%v", predErr) } - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := carol.PendingChannels(ctxt, @@ -6435,7 +6436,7 @@ func testFailingChannel(net *lntest.NetworkHarness, t *harnessTest) { } // No pending channels should be left. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Alice.PendingChannels(ctxt, @@ -6545,13 +6546,13 @@ func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to restart carol's node: %v", err) } - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { return isConnected(net.Bob.PubKeyStr) }, 15*time.Second) if err != nil { t.Fatalf("alice did not reconnect to bob") } - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { return isConnected(carol.PubKeyStr) }, 15*time.Second) if err != nil { @@ -6564,19 +6565,19 @@ func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to restart alice's node: %v", err) } - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { return isConnected(net.Bob.PubKeyStr) }, 15*time.Second) if err != nil { t.Fatalf("alice did not reconnect to bob") } - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { return isConnected(carol.PubKeyStr) }, 15*time.Second) if err != nil { t.Fatalf("alice did not reconnect to carol") } - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { return isConnected(dave.PubKeyStr) }, 15*time.Second) if err != nil { @@ -6604,7 +6605,7 @@ func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to restart %v's node: %v", node.Name(), err) } - err = lntest.WaitInvariant(func() bool { + err = wait.Invariant(func() bool { return !isConnected(node.PubKeyStr) }, 5*time.Second) if err != nil { @@ -6614,14 +6615,14 @@ func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) { if err := net.RestartNode(net.Alice, nil); err != nil { t.Fatalf("unable to restart alice's node: %v", err) } - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { return isConnected(dave.PubKeyStr) }, 20*time.Second) if err != nil { t.Fatalf("alice didn't reconnect to Dave") } - err = lntest.WaitInvariant(func() bool { + err = wait.Invariant(func() bool { return !isConnected(node.PubKeyStr) }, 5*time.Second) if err != nil { @@ -6656,7 +6657,7 @@ func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) { // fully cleaned up for both Carol and Alice. var predErr error pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Alice.PendingChannels( ctxt, pendingChansRequest, @@ -6801,7 +6802,7 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) { // satoshis each, Bob should now see his balance as being 30k satoshis. var bobChan *lnrpc.Channel var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) bChan, err := getChanInfo(ctxt, net.Bob) if err != nil { @@ -6884,7 +6885,7 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) { // feel the wrath of Carol's retribution. var closeUpdates lnrpc.Lightning_CloseChannelClient force := true - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) closeUpdates, _, err = net.CloseChannel(ctxt, net.Bob, chanPoint, force) if err != nil { @@ -7129,7 +7130,7 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness closeErr error force bool = true ) - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) closeUpdates, closeTxId, closeErr = net.CloseChannel( ctxt, carol, chanPoint, force, @@ -7532,7 +7533,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness, return nil, errNotFound } - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { txid, err := findJusticeTx() if err != nil { predErr = err @@ -7552,7 +7553,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness, // as the last argument, indicating we don't care what's in the // mempool. mineBlocks(t, net, 1, 0) - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { txid, err := findJusticeTx() if err != nil { predErr = err @@ -7938,7 +7939,7 @@ func testRevokedCloseRetributionAltruistWatchtower(net *lntest.NetworkHarness, // Ensure that Willy doesn't get any funds, as he is acting as an // altruist watchtower. var predErr error - err = lntest.WaitInvariant(func() bool { + err = wait.Invariant(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) willyBalReq := &lnrpc.WalletBalanceRequest{} willyBalResp, err := willy.WalletBalance(ctxt, willyBalReq) @@ -7967,7 +7968,7 @@ func testRevokedCloseRetributionAltruistWatchtower(net *lntest.NetworkHarness, t.Fatalf("unable to restart dave: %v", err) } - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) daveBalReq := &lnrpc.ChannelBalanceRequest{} daveBalResp, err := dave.ChannelBalance(ctxt, daveBalReq) @@ -7990,7 +7991,7 @@ func testRevokedCloseRetributionAltruistWatchtower(net *lntest.NetworkHarness, assertNumPendingChannels(t, dave, 0, 0) - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) daveBalReq := &lnrpc.WalletBalanceRequest{} daveBalResp, err := dave.WalletBalance(ctxt, daveBalReq) @@ -8023,7 +8024,7 @@ func assertNumPendingChannels(t *harnessTest, node *lntest.HarnessNode, ctxb := context.Background() var predErr error - err := lntest.WaitPredicate(func() bool { + err := wait.Predicate(func() bool { pendingChansRequest := &lnrpc.PendingChannelsRequest{} ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := node.PendingChannels(ctxt, @@ -8256,7 +8257,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { // as being 30k satoshis. var nodeChan *lnrpc.Channel var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) bChan, err := getChanInfo(ctxt, node) if err != nil { @@ -8455,7 +8456,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) { mineBlocks(t, net, 1, 1) assertNodeNumChannels(t, dave, 0) - err = lntest.WaitNoError(func() error { + err = wait.NoError(func() error { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) daveBalResp, err := dave.WalletBalance(ctxt, balReq) if err != nil { @@ -8507,7 +8508,7 @@ func assertNodeNumChannels(t *harnessTest, node *lntest.HarnessNode, return true } - if err := lntest.WaitPredicate(pred, time.Second*15); err != nil { + if err := wait.Predicate(pred, time.Second*15); err != nil { t.Fatalf("node has incorrect number of channels: %v", predErr) } } @@ -9591,7 +9592,7 @@ func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) { // Wait for the revocation to be received so alice no longer has pending // htlcs listed and has correct balances. This is needed due to the fact // that we now pipeline the settles. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) aliceChan, err := getChanInfo(ctxt, net.Alice) if err != nil { @@ -10124,7 +10125,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { // proper parameters. var predErr error nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, dustPayHash, payHash) if predErr != nil { return false @@ -10170,7 +10171,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { // that we sent earlier. This means Alice should now only have a single // HTLC on her channel. nodes = []*lntest.HarnessNode{net.Alice} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash) if predErr != nil { return false @@ -10236,7 +10237,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { // Therefore, at this point, there should be no active HTLC's on the // commitment transaction from Alice -> Bob. nodes = []*lntest.HarnessNode{net.Alice} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { return false @@ -10267,7 +10268,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { // Once this transaction has been confirmed, Bob should detect that he // no longer has any pending channels. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err = net.Bob.PendingChannels(ctxt, pendingChansRequest) if err != nil { @@ -10343,7 +10344,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // have the it locked in. var predErr error nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash) if predErr != nil { return false @@ -10364,7 +10365,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // At this point, Bob should have a pending force close channel as he // just went to chain. pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Bob.PendingChannels(ctxt, pendingChansRequest) @@ -10413,7 +10414,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // Bob's pending channel report should show that he has a single HTLC // that's now in stage one. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Bob.PendingChannels( ctxt, pendingChansRequest, @@ -10463,7 +10464,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // With the second layer timeout transaction confirmed, Bob should have // cancelled backwards the HTLC that carol sent. nodes = []*lntest.HarnessNode{net.Alice} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { return false @@ -10476,7 +10477,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // Additionally, Bob should now show that HTLC as being advanced to the // second stage. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Bob.PendingChannels( ctxt, pendingChansRequest, @@ -10530,7 +10531,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // At this point, Bob should no longer show any channels as pending // close. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Bob.PendingChannels( ctxt, pendingChansRequest, @@ -10607,7 +10608,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // show that the HTLC has been locked in. var predErr error nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash) if predErr != nil { return false @@ -10628,7 +10629,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // At this point, Bob should have a pending force close channel as // Carol has gone directly to chain. pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Bob.PendingChannels( ctxt, pendingChansRequest, @@ -10668,7 +10669,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // If we check Bob's pending channel report, it should show that he has // a single HTLC that's now in the second stage, as skip the initial // first stage since this is a direct HTLC. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Bob.PendingChannels( ctxt, pendingChansRequest, @@ -10734,7 +10735,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // cancel back that HTLC. As a result, Alice should not know of any // active HTLC's. nodes = []*lntest.HarnessNode{net.Alice} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { return false @@ -10748,7 +10749,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // Now we'll check Bob's pending channel report. Since this was Carol's // commitment, he doesn't have to wait for any CSV delays. As a result, // he should show no additional pending transactions. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) pendingChanResp, err := net.Bob.PendingChannels( ctxt, pendingChansRequest, @@ -10952,7 +10953,7 @@ func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) { // Wait until all nodes in the network have 5 outstanding htlcs. var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, numPayments) if predErr != nil { return false @@ -10990,7 +10991,7 @@ func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) { } // Ensure all nodes in the network still have 5 outstanding htlcs. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, numPayments) if predErr != nil { return false @@ -11017,7 +11018,7 @@ func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) { // After the payments settle, there should be no active htlcs on any of // the nodes in the network. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { return false @@ -11275,7 +11276,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) { // Wait for all of the payments to reach Carol. var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, numPayments) if predErr != nil { return false @@ -11301,7 +11302,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) { // Wait to ensure that the payment remain are not failed back after // reconnecting. All node should report the number payments initiated // for the duration of the interval. - err = lntest.WaitInvariant(func() bool { + err = wait.Invariant(func() bool { predErr = assertNumActiveHtlcs(nodes, numPayments) if predErr != nil { return false @@ -11328,7 +11329,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) { // Wait for Carol to report no outstanding htlcs. carolNode := []*lntest.HarnessNode{carol} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(carolNode, 0) if predErr != nil { return false @@ -11347,7 +11348,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) { } // Wait until all outstanding htlcs in the network have been settled. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { return false @@ -11603,7 +11604,7 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness } var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, numPayments) if predErr != nil { return false @@ -11640,7 +11641,7 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness // Wait for Carol to report no outstanding htlcs, and also for Dav to // receive all the settles from Carol. carolNode := []*lntest.HarnessNode{carol} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(carolNode, 0) if predErr != nil { return false @@ -11675,7 +11676,7 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness // After reconnection succeeds, the settles should be propagated all // the way back to the sender. All nodes should report no active htlcs. - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) if predErr != nil { return false @@ -11938,7 +11939,7 @@ func testSwitchOfflineDeliveryOutgoingOffline( // Wait for all payments to reach Carol. var predErr error - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, numPayments) if predErr != nil { return false @@ -11965,7 +11966,7 @@ func testSwitchOfflineDeliveryOutgoingOffline( // Wait for Carol to report no outstanding htlcs. carolNode := []*lntest.HarnessNode{carol} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(carolNode, 0) if predErr != nil { return false @@ -12016,7 +12017,7 @@ func testSwitchOfflineDeliveryOutgoingOffline( // Since Carol has been shutdown permanently, we will wait until all // other nodes in the network report no active htlcs. nodesMinusCarol := []*lntest.HarnessNode{net.Bob, net.Alice, dave} - err = lntest.WaitPredicate(func() bool { + err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodesMinusCarol, 0) if predErr != nil { return false @@ -13110,7 +13111,7 @@ func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) { // assertBackupFileState is a helper function that we'll use to compare // the on disk back up file to our currentBackup pointer above. assertBackupFileState := func() { - err := lntest.WaitNoError(func() error { + err := wait.NoError(func() error { packedBackup, err := ioutil.ReadFile(backupFilePath) if err != nil { return fmt.Errorf("unable to read backup "+ @@ -13254,7 +13255,7 @@ func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) { // Before we proceed, we'll make two utility methods we'll use below // for our primary assertions. assertNumSingleBackups := func(numSingles int) { - err := lntest.WaitNoError(func() error { + err := wait.NoError(func() error { ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) req := &lnrpc.ChanBackupExportRequest{} chanSnapshot, err := carol.ExportAllChannelBackups( @@ -13896,7 +13897,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { // The payments should now show up in Alice's ListInvoices, with a zero // preimage, indicating they are not yet settled. - err = lntest.WaitNoError(func() error { + err = wait.NoError(func() error { req := &lnrpc.ListPaymentsRequest{ IncludeIncomplete: true, } diff --git a/lntest/itest/onchain.go b/lntest/itest/onchain.go index 2d10db52..ccc73feb 100644 --- a/lntest/itest/onchain.go +++ b/lntest/itest/onchain.go @@ -12,6 +12,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/sweep" ) @@ -140,7 +141,7 @@ func testCPFP(net *lntest.NetworkHarness, t *harnessTest) { mineBlocks(t, net, 1, 2) // The input used to CPFP should no longer be pending. - err = lntest.WaitNoError(func() error { + err = wait.NoError(func() error { req := &walletrpc.PendingSweepsRequest{} ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) resp, err := net.Bob.WalletKitClient.PendingSweeps(ctxt, req) diff --git a/lntest/node.go b/lntest/node.go index 2eeda37e..75a0d37f 100644 --- a/lntest/node.go +++ b/lntest/node.go @@ -27,6 +27,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc" "github.com/lightningnetwork/lnd/lnrpc/wtclientrpc" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/macaroons" "golang.org/x/net/context" "google.golang.org/grpc" @@ -468,7 +469,7 @@ func (hn *HarnessNode) initClientWhenReady() error { conn *grpc.ClientConn connErr error ) - if err := WaitNoError(func() error { + if err := wait.NoError(func() error { conn, connErr = hn.ConnectRPC(true) return connErr }, 5*time.Second); err != nil { @@ -543,7 +544,7 @@ func (hn *HarnessNode) initLightningClient(conn *grpc.ClientConn) error { // until then, we'll create a dummy subscription to ensure we can do so // successfully before proceeding. We use a dummy subscription in order // to not consume an update from the real one. - err = WaitNoError(func() error { + err = wait.NoError(func() error { req := &lnrpc.GraphTopologySubscription{} ctx, cancelFunc := context.WithCancel(context.Background()) topologyClient, err := hn.SubscribeChannelGraph(ctx, req) @@ -1064,7 +1065,7 @@ func (hn *HarnessNode) WaitForBalance(expectedBalance btcutil.Amount, confirmed return btcutil.Amount(balance.UnconfirmedBalance) == expectedBalance } - err := WaitPredicate(doesBalanceMatch, 30*time.Second) + err := wait.Predicate(doesBalanceMatch, 30*time.Second) if err != nil { return fmt.Errorf("balances not synced after deadline: "+ "expected %v, only have %v", expectedBalance, lastBalance) diff --git a/lntest/wait/wait.go b/lntest/wait/wait.go new file mode 100644 index 00000000..1ff16914 --- /dev/null +++ b/lntest/wait/wait.go @@ -0,0 +1,78 @@ +package wait + +import ( + "fmt" + "time" +) + +// Predicate is a helper test function that will wait for a timeout period of +// time until the passed predicate returns true. This function is helpful as +// timing doesn't always line up well when running integration tests with +// several running lnd nodes. This function gives callers a way to assert that +// some property is upheld within a particular time frame. +func Predicate(pred func() bool, timeout time.Duration) error { + const pollInterval = 20 * time.Millisecond + + exitTimer := time.After(timeout) + for { + <-time.After(pollInterval) + + select { + case <-exitTimer: + return fmt.Errorf("predicate not satisfied after time out") + default: + } + + if pred() { + return nil + } + } +} + +// NoError is a wrapper around Predicate that waits for the passed method f to +// execute without error, and returns the last error encountered if this doesn't +// happen within the timeout. +func NoError(f func() error, timeout time.Duration) error { + var predErr error + pred := func() bool { + if err := f(); err != nil { + predErr = err + return false + } + return true + } + + // If f() doesn't succeed within the timeout, return the last + // encountered error. + if err := Predicate(pred, timeout); err != nil { + return predErr + } + + return nil +} + +// Invariant is a helper test function that will wait for a timeout period of +// time, verifying that a statement remains true for the entire duration. This +// function is helpful as timing doesn't always line up well when running +// integration tests with several running lnd nodes. This function gives callers +// a way to assert that some property is maintained over a particular time +// frame. +func Invariant(statement func() bool, timeout time.Duration) error { + const pollInterval = 20 * time.Millisecond + + exitTimer := time.After(timeout) + for { + <-time.After(pollInterval) + + // Fail if the invariant is broken while polling. + if !statement() { + return fmt.Errorf("invariant broken before time out") + } + + select { + case <-exitTimer: + return nil + default: + } + } +} From 6dca07577d8a1925491587217bcbccb8764014f4 Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Thu, 19 Sep 2019 12:46:44 -0700 Subject: [PATCH 3/5] multi: move active/inactive ntfns from switch to link Since we will now wait to deliver the event after channel reestablish, notifying when the link is added to the switch will no longer be sufficient. Later, we will add receiving reestablish as an additional requirement for EligibleToForward returning true. The inactive ntfn is also moved, to ensure that we don't fire inactive notifications if no corresponding active notification was sent. --- htlcswitch/link.go | 16 ++++++++++++++++ htlcswitch/link_test.go | 4 ++++ htlcswitch/mock.go | 10 ++++------ htlcswitch/switch.go | 13 ------------- htlcswitch/test_utils.go | 2 ++ peer.go | 2 ++ server.go | 2 -- 7 files changed, 28 insertions(+), 21 deletions(-) diff --git a/htlcswitch/link.go b/htlcswitch/link.go index c904aa3e..5ad55bc7 100644 --- a/htlcswitch/link.go +++ b/htlcswitch/link.go @@ -262,6 +262,14 @@ type ChannelLinkConfig struct { // commitment fee to be of its balance. This only applies to the // initiator of the channel. MaxFeeAllocation float64 + + // NotifyActiveChannel allows the link to tell the ChannelNotifier when + // channels becomes active. + NotifyActiveChannel func(wire.OutPoint) + + // NotifyInactiveChannel allows the switch to tell the ChannelNotifier + // when channels become inactive. + NotifyInactiveChannel func(wire.OutPoint) } // channelLink is the service which drives a channel's commitment update @@ -870,6 +878,14 @@ func (l *channelLink) htlcManager() { log.Infof("HTLC manager for ChannelPoint(%v) started, "+ "bandwidth=%v", l.channel.ChannelPoint(), l.Bandwidth()) + // Funding locked has already been received, so we'll go ahead and + // deliver the active channel notification since EligibleToForward + // returns true now that the link has been added to the switch. We'll + // also defer the inactive notification for when the link exits to + // ensure that every active notification is matched by an inactive one. + l.cfg.NotifyActiveChannel(*l.ChannelPoint()) + defer l.cfg.NotifyInactiveChannel(*l.ChannelPoint()) + // TODO(roasbeef): need to call wipe chan whenever D/C? // If this isn't the first time that this channel link has been diff --git a/htlcswitch/link_test.go b/htlcswitch/link_test.go index 722c785c..e7a18751 100644 --- a/htlcswitch/link_test.go +++ b/htlcswitch/link_test.go @@ -1688,6 +1688,8 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) ( MaxFeeUpdateTimeout: 40 * time.Minute, MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry, MaxFeeAllocation: DefaultMaxLinkFeeAllocation, + NotifyActiveChannel: func(wire.OutPoint) {}, + NotifyInactiveChannel: func(wire.OutPoint) {}, } aliceLink := NewChannelLink(aliceCfg, aliceLc.channel) @@ -4249,6 +4251,8 @@ func (h *persistentLinkHarness) restartLink( HodlMask: hodl.MaskFromFlags(hodlFlags...), MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry, MaxFeeAllocation: DefaultMaxLinkFeeAllocation, + NotifyActiveChannel: func(wire.OutPoint) {}, + NotifyInactiveChannel: func(wire.OutPoint) {}, } aliceLink := NewChannelLink(aliceCfg, aliceChannel) diff --git a/htlcswitch/mock.go b/htlcswitch/mock.go index 98f3596e..a3174b8f 100644 --- a/htlcswitch/mock.go +++ b/htlcswitch/mock.go @@ -171,12 +171,10 @@ func initSwitchWithDB(startingHeight uint32, db *channeldb.DB) (*Switch, error) FetchLastChannelUpdate: func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) { return nil, nil }, - Notifier: &mockNotifier{}, - FwdEventTicker: ticker.NewForce(DefaultFwdEventInterval), - LogEventTicker: ticker.NewForce(DefaultLogInterval), - AckEventTicker: ticker.NewForce(DefaultAckInterval), - NotifyActiveChannel: func(wire.OutPoint) {}, - NotifyInactiveChannel: func(wire.OutPoint) {}, + Notifier: &mockNotifier{}, + FwdEventTicker: ticker.NewForce(DefaultFwdEventInterval), + LogEventTicker: ticker.NewForce(DefaultLogInterval), + AckEventTicker: ticker.NewForce(DefaultAckInterval), } return New(cfg, startingHeight) diff --git a/htlcswitch/switch.go b/htlcswitch/switch.go index c11c707e..0364b0c4 100644 --- a/htlcswitch/switch.go +++ b/htlcswitch/switch.go @@ -163,11 +163,6 @@ type Config struct { // fails in forwarding packages. AckEventTicker ticker.Ticker - // NotifyActiveChannel and NotifyInactiveChannel allow the link to tell - // the ChannelNotifier when channels become active and inactive. - NotifyActiveChannel func(wire.OutPoint) - NotifyInactiveChannel func(wire.OutPoint) - // RejectHTLC is a flag that instructs the htlcswitch to reject any // HTLCs that are not from the source hop. RejectHTLC bool @@ -2012,11 +2007,6 @@ func (s *Switch) addLiveLink(link ChannelLink) { s.interfaceIndex[peerPub] = make(map[lnwire.ChannelID]ChannelLink) } s.interfaceIndex[peerPub][link.ChanID()] = link - - // Inform the channel notifier if the link has become active. - if link.EligibleToForward() { - s.cfg.NotifyActiveChannel(*link.ChannelPoint()) - } } // GetLink is used to initiate the handling of the get link command. The @@ -2092,9 +2082,6 @@ func (s *Switch) removeLink(chanID lnwire.ChannelID) ChannelLink { return nil } - // Inform the Channel Notifier about the link becoming inactive. - s.cfg.NotifyInactiveChannel(*link.ChannelPoint()) - // Remove the channel from live link indexes. delete(s.pendingLinkIndex, link.ChanID()) delete(s.linkIndex, link.ChanID()) diff --git a/htlcswitch/test_utils.go b/htlcswitch/test_utils.go index cdb0aebb..7bd46531 100644 --- a/htlcswitch/test_utils.go +++ b/htlcswitch/test_utils.go @@ -1122,6 +1122,8 @@ func (h *hopNetwork) createChannelLink(server, peer *mockServer, OutgoingCltvRejectDelta: 3, MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry, MaxFeeAllocation: DefaultMaxLinkFeeAllocation, + NotifyActiveChannel: func(wire.OutPoint) {}, + NotifyInactiveChannel: func(wire.OutPoint) {}, }, channel, ) diff --git a/peer.go b/peer.go index 77161ce6..2b4460a4 100644 --- a/peer.go +++ b/peer.go @@ -576,6 +576,8 @@ func (p *peer) addLink(chanPoint *wire.OutPoint, TowerClient: p.server.towerClient, MaxOutgoingCltvExpiry: cfg.MaxOutgoingCltvExpiry, MaxFeeAllocation: cfg.MaxChannelFeeAllocation, + NotifyActiveChannel: p.server.channelNotifier.NotifyActiveChannelEvent, + NotifyInactiveChannel: p.server.channelNotifier.NotifyInactiveChannelEvent, } link := htlcswitch.NewChannelLink(linkCfg, lnChan) diff --git a/server.go b/server.go index a002aab9..f90dcd97 100644 --- a/server.go +++ b/server.go @@ -430,8 +430,6 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, FwdEventTicker: ticker.New(htlcswitch.DefaultFwdEventInterval), LogEventTicker: ticker.New(htlcswitch.DefaultLogInterval), AckEventTicker: ticker.New(htlcswitch.DefaultAckInterval), - NotifyActiveChannel: s.channelNotifier.NotifyActiveChannelEvent, - NotifyInactiveChannel: s.channelNotifier.NotifyInactiveChannelEvent, RejectHTLC: cfg.RejectHTLC, }, uint32(currentHeight)) if err != nil { From 9d6ee2ebd90d625937a87861ba5619fa6fcd014e Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Thu, 19 Sep 2019 12:46:56 -0700 Subject: [PATCH 4/5] htlcswitch/link: restrict EligibleToForward to wait for reestablish This commit modifies the link's EligibleToForward() method only return true once the peers have successfully exchanged channel reestablish messages. This is a preliminary step to increasing the reestablish timeout, ensuring the switch won't try to forward over links while we're waiting for the remote peer to resume the connection. --- htlcswitch/link.go | 40 +++++++++++++++++++++++++++++----------- htlcswitch/test_utils.go | 30 +++++++++++++++++++++++++++--- 2 files changed, 56 insertions(+), 14 deletions(-) diff --git a/htlcswitch/link.go b/htlcswitch/link.go index 5ad55bc7..f405fa7c 100644 --- a/htlcswitch/link.go +++ b/htlcswitch/link.go @@ -279,8 +279,9 @@ type ChannelLinkConfig struct { // message ordering and updates. type channelLink struct { // The following fields are only meant to be used *atomically* - started int32 - shutdown int32 + started int32 + reestablished int32 + shutdown int32 // failed should be set to true in case a link error happens, making // sure we don't process any more updates. @@ -540,7 +541,21 @@ func (l *channelLink) WaitForShutdown() { // the all-zero source ID, meaning that the channel has had its ID finalized. func (l *channelLink) EligibleToForward() bool { return l.channel.RemoteNextRevocation() != nil && - l.ShortChanID() != hop.Source + l.ShortChanID() != hop.Source && + l.isReestablished() +} + +// isReestablished returns true if the link has successfully completed the +// channel reestablishment dance. +func (l *channelLink) isReestablished() bool { + return atomic.LoadInt32(&l.reestablished) == 1 +} + +// markReestablished signals that the remote peer has successfully exchanged +// channel reestablish messages and that the channel is ready to process +// subsequent messages. +func (l *channelLink) markReestablished() { + atomic.StoreInt32(&l.reestablished, 1) } // sampleNetworkFee samples the current fee rate on the network to get into the @@ -878,14 +893,6 @@ func (l *channelLink) htlcManager() { log.Infof("HTLC manager for ChannelPoint(%v) started, "+ "bandwidth=%v", l.channel.ChannelPoint(), l.Bandwidth()) - // Funding locked has already been received, so we'll go ahead and - // deliver the active channel notification since EligibleToForward - // returns true now that the link has been added to the switch. We'll - // also defer the inactive notification for when the link exits to - // ensure that every active notification is matched by an inactive one. - l.cfg.NotifyActiveChannel(*l.ChannelPoint()) - defer l.cfg.NotifyInactiveChannel(*l.ChannelPoint()) - // TODO(roasbeef): need to call wipe chan whenever D/C? // If this isn't the first time that this channel link has been @@ -961,6 +968,17 @@ func (l *channelLink) htlcManager() { } } + // We've successfully reestablished the channel, mark it as such to + // allow the switch to forward HTLCs in the outbound direction. + l.markReestablished() + + // Now that we've received both funding locked and channel reestablish, + // we can go ahead and send the active channel notification. We'll also + // defer the inactive notification for when the link exits to ensure + // that every active notification is matched by an inactive one. + l.cfg.NotifyActiveChannel(*l.ChannelPoint()) + defer l.cfg.NotifyInactiveChannel(*l.ChannelPoint()) + // With the channel states synced, we now reset the mailbox to ensure // we start processing all unacked packets in order. This is done here // to ensure that all acknowledgments that occur during channel diff --git a/htlcswitch/test_utils.go b/htlcswitch/test_utils.go index 7bd46531..a908afb3 100644 --- a/htlcswitch/test_utils.go +++ b/htlcswitch/test_utils.go @@ -29,6 +29,7 @@ import ( "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnpeer" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" @@ -837,7 +838,12 @@ func (n *threeHopNetwork) start() error { return err } - return nil + return waitLinksEligible(map[string]*channelLink{ + "alice": n.aliceChannelLink, + "bob first": n.firstBobChannelLink, + "bob second": n.secondBobChannelLink, + "carol": n.carolChannelLink, + }) } // stop stops nodes and cleanup its databases. @@ -1130,6 +1136,7 @@ func (h *hopNetwork) createChannelLink(server, peer *mockServer, if err := server.htlcSwitch.AddLink(link); err != nil { return nil, fmt.Errorf("unable to add channel link: %v", err) } + go func() { for { select { @@ -1230,7 +1237,10 @@ func (n *twoHopNetwork) start() error { return err } - return nil + return waitLinksEligible(map[string]*channelLink{ + "alice": n.aliceChannelLink, + "bob": n.bobChannelLink, + }) } // stop stops nodes and cleanup its databases. @@ -1320,12 +1330,26 @@ func (n *twoHopNetwork) makeHoldPayment(sendingPeer, receivingPeer lnpeer.Peer, return paymentErr } +// waitLinksEligible blocks until all links the provided name-to-link map are +// eligible to forward HTLCs. +func waitLinksEligible(links map[string]*channelLink) error { + return wait.NoError(func() error { + for name, link := range links { + if link.EligibleToForward() { + continue + } + return fmt.Errorf("%s channel link not eligible", name) + } + return nil + }, 3*time.Second) +} + // timeout implements a test level timeout. func timeout(t *testing.T) func() { done := make(chan struct{}) go func() { select { - case <-time.After(5 * time.Second): + case <-time.After(10 * time.Second): pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) panic("test timeout") From 3276bf29607c8758014b5c2c9ca1f7aa51fdd4e1 Mon Sep 17 00:00:00 2001 From: Conner Fromknecht Date: Thu, 19 Sep 2019 12:47:08 -0700 Subject: [PATCH 5/5] htlcswitch/link: remove channel reestablish deadline Now that the link will remain ineligible until it receives channel_reestablish from the remote peer, we can remove the channel reestablish timeout entirely. --- htlcswitch/link.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/htlcswitch/link.go b/htlcswitch/link.go index f405fa7c..64e6dd00 100644 --- a/htlcswitch/link.go +++ b/htlcswitch/link.go @@ -640,10 +640,8 @@ func (l *channelLink) syncChanStates() error { var msgsToReSend []lnwire.Message - // Next, we'll wait to receive the ChanSync message with a timeout - // period. The first message sent MUST be the ChanSync message, - // otherwise, we'll terminate the connection. - chanSyncDeadline := time.After(time.Second * 30) + // Next, we'll wait indefinitely to receive the ChanSync message. The + // first message sent MUST be the ChanSync message. select { case msg := <-l.upstream: remoteChanSyncMsg, ok := msg.(*lnwire.ChannelReestablish) @@ -727,10 +725,6 @@ func (l *channelLink) syncChanStates() error { case <-l.quit: return ErrLinkShuttingDown - - case <-chanSyncDeadline: - return fmt.Errorf("didn't receive ChannelReestablish before " + - "deadline") } return nil