lntest: Rename structs with proper visibility so lnd_test runs.
This commit is contained in:
parent
3cb0705b8e
commit
43e501feb9
108
lnd_test.go
108
lnd_test.go
@ -25,6 +25,7 @@ import (
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/roasbeef/btcd/chaincfg"
|
||||
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
||||
@ -36,6 +37,10 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
harnessNetParams = &chaincfg.SimNetParams
|
||||
)
|
||||
|
||||
// harnessTest wraps a regular testing.T providing enhanced error detection
|
||||
// and propagation. All error will be augmented with a full stack-trace in
|
||||
// order to aid in debugging. Additionally, any panics caused by active
|
||||
@ -70,7 +75,9 @@ func (h *harnessTest) Fatalf(format string, a ...interface{}) {
|
||||
|
||||
// RunTestCase executes a harness test case. Any errors or panics will be
|
||||
// represented as fatal.
|
||||
func (h *harnessTest) RunTestCase(testCase *testCase, net *networkHarness) {
|
||||
func (h *harnessTest) RunTestCase(testCase *testCase,
|
||||
net *lntest.NetworkHarness) {
|
||||
|
||||
h.testCase = testCase
|
||||
defer func() {
|
||||
h.testCase = nil
|
||||
@ -110,7 +117,9 @@ func assertTxInBlock(t *harnessTest, block *wire.MsgBlock, txid *chainhash.Hash)
|
||||
|
||||
// mineBlocks mine 'num' of blocks and check that blocks are present in
|
||||
// node blockchain.
|
||||
func mineBlocks(t *harnessTest, net *networkHarness, num uint32) []*wire.MsgBlock {
|
||||
func mineBlocks(t *harnessTest, net *lntest.NetworkHarness, num uint32,
|
||||
) []*wire.MsgBlock {
|
||||
|
||||
blocks := make([]*wire.MsgBlock, num)
|
||||
|
||||
blockHashes, err := net.Miner.Node.Generate(num)
|
||||
@ -135,9 +144,9 @@ func mineBlocks(t *harnessTest, net *networkHarness, num uint32) []*wire.MsgBloc
|
||||
// after the channel is considered open: the funding transaction should be
|
||||
// found within a block, and that Alice can report the status of the new
|
||||
// channel.
|
||||
func openChannelAndAssert(ctx context.Context, t *harnessTest, net *networkHarness,
|
||||
alice, bob *lightningNode, fundingAmt btcutil.Amount,
|
||||
pushAmt btcutil.Amount) *lnrpc.ChannelPoint {
|
||||
func openChannelAndAssert(ctx context.Context, t *harnessTest,
|
||||
net *lntest.NetworkHarness, alice, bob *lntest.HarnessNode,
|
||||
fundingAmt btcutil.Amount, pushAmt btcutil.Amount) *lnrpc.ChannelPoint {
|
||||
|
||||
chanOpenUpdate, err := net.OpenChannel(ctx, alice, bob, fundingAmt,
|
||||
pushAmt)
|
||||
@ -182,8 +191,9 @@ func openChannelAndAssert(ctx context.Context, t *harnessTest, net *networkHarne
|
||||
// via timeout from a base parent. Additionally, once the channel has been
|
||||
// detected as closed, an assertion checks that the transaction is found within
|
||||
// a block.
|
||||
func closeChannelAndAssert(ctx context.Context, t *harnessTest, net *networkHarness,
|
||||
node *lightningNode, fundingChanPoint *lnrpc.ChannelPoint, force bool) *chainhash.Hash {
|
||||
func closeChannelAndAssert(ctx context.Context, t *harnessTest,
|
||||
net *lntest.NetworkHarness, node *lntest.HarnessNode,
|
||||
fundingChanPoint *lnrpc.ChannelPoint, force bool) *chainhash.Hash {
|
||||
|
||||
closeUpdates, _, err := net.CloseChannel(ctx, node, fundingChanPoint, force)
|
||||
if err != nil {
|
||||
@ -234,7 +244,7 @@ func closeChannelAndAssert(ctx context.Context, t *harnessTest, net *networkHarn
|
||||
// numOpenChannelsPending sends an RPC request to a node to get a count of the
|
||||
// node's channels that are currently in a pending state (with a broadcast, but
|
||||
// not confirmed funding transaction).
|
||||
func numOpenChannelsPending(ctxt context.Context, node *lightningNode) (int, error) {
|
||||
func numOpenChannelsPending(ctxt context.Context, node *lntest.HarnessNode) (int, error) {
|
||||
pendingChansRequest := &lnrpc.PendingChannelRequest{}
|
||||
resp, err := node.PendingChannels(ctxt, pendingChansRequest)
|
||||
if err != nil {
|
||||
@ -246,7 +256,7 @@ func numOpenChannelsPending(ctxt context.Context, node *lightningNode) (int, err
|
||||
// assertNumOpenChannelsPending asserts that a pair of nodes have the expected
|
||||
// number of pending channels between them.
|
||||
func assertNumOpenChannelsPending(ctxt context.Context, t *harnessTest,
|
||||
alice, bob *lightningNode, expected int) {
|
||||
alice, bob *lntest.HarnessNode, expected int) {
|
||||
|
||||
const nPolls = 10
|
||||
|
||||
@ -257,12 +267,12 @@ func assertNumOpenChannelsPending(ctxt context.Context, t *harnessTest,
|
||||
aliceNumChans, err := numOpenChannelsPending(ctxt, alice)
|
||||
if err != nil {
|
||||
t.Fatalf("error fetching alice's node (%v) pending channels %v",
|
||||
alice.nodeID, err)
|
||||
alice.NodeID, err)
|
||||
}
|
||||
bobNumChans, err := numOpenChannelsPending(ctxt, bob)
|
||||
if err != nil {
|
||||
t.Fatalf("error fetching bob's node (%v) pending channels %v",
|
||||
bob.nodeID, err)
|
||||
bob.NodeID, err)
|
||||
}
|
||||
|
||||
isLastIteration := i == nPolls-1
|
||||
@ -290,7 +300,7 @@ func assertNumOpenChannelsPending(ctxt context.Context, t *harnessTest,
|
||||
|
||||
// assertNumConnections asserts number current connections between two peers.
|
||||
func assertNumConnections(ctxt context.Context, t *harnessTest,
|
||||
alice, bob *lightningNode, expected int) {
|
||||
alice, bob *lntest.HarnessNode, expected int) {
|
||||
|
||||
const nPolls = 10
|
||||
|
||||
@ -303,12 +313,12 @@ func assertNumConnections(ctxt context.Context, t *harnessTest,
|
||||
aNumPeers, err := alice.ListPeers(ctxt, &lnrpc.ListPeersRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch alice's node (%v) list peers %v",
|
||||
alice.nodeID, err)
|
||||
alice.NodeID, err)
|
||||
}
|
||||
bNumPeers, err := bob.ListPeers(ctxt, &lnrpc.ListPeersRequest{})
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch bob's node (%v) list peers %v",
|
||||
bob.nodeID, err)
|
||||
bob.NodeID, err)
|
||||
}
|
||||
if len(aNumPeers.Peers) != expected {
|
||||
// Continue polling if this is not the final
|
||||
@ -401,7 +411,7 @@ func completePaymentRequests(ctx context.Context, client lnrpc.LightningClient,
|
||||
// Bob, then immediately closes the channel after asserting some expected post
|
||||
// conditions. Finally, the chain itself is checked to ensure the closing
|
||||
// transaction was mined.
|
||||
func testBasicChannelFunding(net *networkHarness, t *harnessTest) {
|
||||
func testBasicChannelFunding(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
timeout := time.Duration(time.Second * 5)
|
||||
ctxb := context.Background()
|
||||
|
||||
@ -458,7 +468,7 @@ func testBasicChannelFunding(net *networkHarness, t *harnessTest) {
|
||||
// testOpenChannelAfterReorg tests that in the case where we have an open
|
||||
// channel where the funding tx gets reorged out, the channel will no
|
||||
// longer be present in the node's routing table.
|
||||
func testOpenChannelAfterReorg(net *networkHarness, t *harnessTest) {
|
||||
func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
timeout := time.Duration(time.Second * 5)
|
||||
ctxb := context.Background()
|
||||
|
||||
@ -639,7 +649,7 @@ func testOpenChannelAfterReorg(net *networkHarness, t *harnessTest) {
|
||||
|
||||
// testDisconnectingTargetPeer performs a test which
|
||||
// disconnects Alice-peer from Bob-peer and then re-connects them again
|
||||
func testDisconnectingTargetPeer(net *networkHarness, t *harnessTest) {
|
||||
func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
ctxb := context.Background()
|
||||
|
||||
@ -768,7 +778,7 @@ func testDisconnectingTargetPeer(net *networkHarness, t *harnessTest) {
|
||||
// representation of channels if the system is restarted or disconnected.
|
||||
// testFundingPersistence mirrors testBasicChannelFunding, but adds restarts
|
||||
// and checks for the state of channels with unconfirmed funding transactions.
|
||||
func testChannelFundingPersistence(net *networkHarness, t *harnessTest) {
|
||||
func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
chanAmt := maxFundingAmount
|
||||
@ -914,7 +924,7 @@ peersPoll:
|
||||
|
||||
// testChannelBalance creates a new channel between Alice and Bob, then
|
||||
// checks channel balance to be equal amount specified while creation of channel.
|
||||
func testChannelBalance(net *networkHarness, t *harnessTest) {
|
||||
func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
timeout := time.Duration(time.Second * 5)
|
||||
|
||||
// Open a channel with 0.16 BTC between Alice and Bob, ensuring the
|
||||
@ -1069,8 +1079,7 @@ func assertPendingHtlcStageAndMaturity(t *harnessTest,
|
||||
// process.
|
||||
//
|
||||
// TODO(roasbeef): also add an unsettled HTLC before force closing.
|
||||
func testChannelForceClosure(net *networkHarness, t *harnessTest) {
|
||||
|
||||
func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
const (
|
||||
timeout = time.Duration(time.Second * 10)
|
||||
@ -1714,7 +1723,7 @@ func testChannelForceClosure(net *networkHarness, t *harnessTest) {
|
||||
}
|
||||
}
|
||||
|
||||
func testSingleHopInvoice(net *networkHarness, t *harnessTest) {
|
||||
func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
timeout := time.Duration(time.Second * 5)
|
||||
|
||||
@ -1851,7 +1860,7 @@ func testSingleHopInvoice(net *networkHarness, t *harnessTest) {
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
|
||||
}
|
||||
|
||||
func testListPayments(net *networkHarness, t *harnessTest) {
|
||||
func testListPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
timeout := time.Duration(time.Second * 5)
|
||||
|
||||
@ -1986,7 +1995,7 @@ func testListPayments(net *networkHarness, t *harnessTest) {
|
||||
closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false)
|
||||
}
|
||||
|
||||
func testMultiHopPayments(net *networkHarness, t *harnessTest) {
|
||||
func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
const chanAmt = btcutil.Amount(100000)
|
||||
ctxb := context.Background()
|
||||
timeout := time.Duration(time.Second * 5)
|
||||
@ -2115,7 +2124,7 @@ func testMultiHopPayments(net *networkHarness, t *harnessTest) {
|
||||
// creating the seed nodes in the network.
|
||||
const baseFee = 1
|
||||
|
||||
assertAmountPaid := func(node *lightningNode, chanPoint wire.OutPoint,
|
||||
assertAmountPaid := func(node *lntest.HarnessNode, chanPoint wire.OutPoint,
|
||||
amountSent, amountReceived int64) {
|
||||
|
||||
channelName := ""
|
||||
@ -2220,7 +2229,7 @@ func testMultiHopPayments(net *networkHarness, t *harnessTest) {
|
||||
}
|
||||
}
|
||||
|
||||
func testInvoiceSubscriptions(net *networkHarness, t *harnessTest) {
|
||||
func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
const chanAmt = btcutil.Amount(500000)
|
||||
ctxb := context.Background()
|
||||
timeout := time.Duration(time.Second * 5)
|
||||
@ -2313,7 +2322,7 @@ func testInvoiceSubscriptions(net *networkHarness, t *harnessTest) {
|
||||
}
|
||||
|
||||
// testBasicChannelCreation test multiple channel opening and closing.
|
||||
func testBasicChannelCreation(net *networkHarness, t *harnessTest) {
|
||||
func testBasicChannelCreation(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
const (
|
||||
numChannels = 2
|
||||
timeout = time.Duration(time.Second * 5)
|
||||
@ -2340,7 +2349,7 @@ func testBasicChannelCreation(net *networkHarness, t *harnessTest) {
|
||||
// testMaxPendingChannels checks that error is returned from remote peer if
|
||||
// max pending channel number was exceeded and that '--maxpendingchannels' flag
|
||||
// exists and works properly.
|
||||
func testMaxPendingChannels(net *networkHarness, t *harnessTest) {
|
||||
func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
maxPendingChannels := defaultMaxPendingChannels + 1
|
||||
amount := maxFundingAmount
|
||||
|
||||
@ -2534,7 +2543,7 @@ func waitForNTxsInMempool(miner *rpcclient.Client, n int,
|
||||
// testRevokedCloseRetributinPostBreachConf tests that Alice is able carry out
|
||||
// retribution in the event that she fails immediately after detecting Bob's
|
||||
// breach txn in the mempool.
|
||||
func testRevokedCloseRetribution(net *networkHarness, t *harnessTest) {
|
||||
func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
const (
|
||||
timeout = time.Duration(time.Second * 10)
|
||||
@ -2630,8 +2639,7 @@ func testRevokedCloseRetribution(net *networkHarness, t *harnessTest) {
|
||||
// With the temporary file created, copy Bob's current state into the
|
||||
// temporary file we created above. Later after more updates, we'll
|
||||
// restore this state.
|
||||
bobDbPath := filepath.Join(net.Bob.cfg.DataDir, "simnet/bitcoin/channel.db")
|
||||
if err := copyFile(bobTempDbFile, bobDbPath); err != nil {
|
||||
if err := copyFile(bobTempDbFile, net.Bob.DBPath()); err != nil {
|
||||
t.Fatalf("unable to copy database files: %v", err)
|
||||
}
|
||||
|
||||
@ -2654,7 +2662,7 @@ func testRevokedCloseRetribution(net *networkHarness, t *harnessTest) {
|
||||
// state. With this, we essentially force Bob to travel back in time
|
||||
// within the channel's history.
|
||||
if err = net.RestartNode(net.Bob, func() error {
|
||||
return os.Rename(bobTempDbFile, bobDbPath)
|
||||
return os.Rename(bobTempDbFile, net.Bob.DBPath())
|
||||
}); err != nil {
|
||||
t.Fatalf("unable to restart node: %v", err)
|
||||
}
|
||||
@ -2769,8 +2777,7 @@ func testRevokedCloseRetribution(net *networkHarness, t *harnessTest) {
|
||||
// testRevokedCloseRetributionZeroValueRemoteOutput tests that Alice is able
|
||||
// carry out retribution in the event that she fails in state where the remote
|
||||
// commitment output has zero-value.
|
||||
func testRevokedCloseRetributionZeroValueRemoteOutput(
|
||||
net *networkHarness,
|
||||
func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness,
|
||||
t *harnessTest) {
|
||||
|
||||
ctxb := context.Background()
|
||||
@ -2871,8 +2878,7 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(
|
||||
// With the temporary file created, copy Carol's current state into the
|
||||
// temporary file we created above. Later after more updates, we'll
|
||||
// restore this state.
|
||||
carolDbPath := filepath.Join(carol.cfg.DataDir, "simnet/bitcoin/channel.db")
|
||||
if err := copyFile(carolTempDbFile, carolDbPath); err != nil {
|
||||
if err := copyFile(carolTempDbFile, carol.DBPath()); err != nil {
|
||||
t.Fatalf("unable to copy database files: %v", err)
|
||||
}
|
||||
|
||||
@ -2893,7 +2899,7 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(
|
||||
// state. With this, we essentially force Carol to travel back in time
|
||||
// within the channel's history.
|
||||
if err = net.RestartNode(carol, func() error {
|
||||
return os.Rename(carolTempDbFile, carolDbPath)
|
||||
return os.Rename(carolTempDbFile, carol.DBPath())
|
||||
}); err != nil {
|
||||
t.Fatalf("unable to restart node: %v", err)
|
||||
}
|
||||
@ -3000,8 +3006,7 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(
|
||||
// testRevokedCloseRetributionRemoteHodl tests that Alice properly responds to a
|
||||
// channel breach made by the remote party, specifically in the case that the
|
||||
// remote party breaches before settling extended HTLCs.
|
||||
func testRevokedCloseRetributionRemoteHodl(
|
||||
net *networkHarness,
|
||||
func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
|
||||
t *harnessTest) {
|
||||
|
||||
ctxb := context.Background()
|
||||
@ -3144,8 +3149,7 @@ func testRevokedCloseRetributionRemoteHodl(
|
||||
// With the temporary file created, copy Carol's current state into the
|
||||
// temporary file we created above. Later after more updates, we'll
|
||||
// restore this state.
|
||||
carolDbPath := filepath.Join(carol.cfg.DataDir, "simnet/bitcoin/channel.db")
|
||||
if err := copyFile(carolTempDbFile, carolDbPath); err != nil {
|
||||
if err := copyFile(carolTempDbFile, carol.DBPath()); err != nil {
|
||||
t.Fatalf("unable to copy database files: %v", err)
|
||||
}
|
||||
|
||||
@ -3167,7 +3171,7 @@ func testRevokedCloseRetributionRemoteHodl(
|
||||
// state. With this, we essentially force Carol to travel back in time
|
||||
// within the channel's history.
|
||||
if err = net.RestartNode(carol, func() error {
|
||||
return os.Rename(carolTempDbFile, carolDbPath)
|
||||
return os.Rename(carolTempDbFile, carol.DBPath())
|
||||
}); err != nil {
|
||||
t.Fatalf("unable to restart node: %v", err)
|
||||
}
|
||||
@ -3296,7 +3300,7 @@ func testRevokedCloseRetributionRemoteHodl(
|
||||
}
|
||||
}
|
||||
|
||||
func testHtlcErrorPropagation(net *networkHarness, t *harnessTest) {
|
||||
func testHtlcErrorPropagation(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// In this test we wish to exercise the daemon's correct parsing,
|
||||
// handling, and propagation of errors that occur while processing a
|
||||
// multi-hop payment.
|
||||
@ -3582,7 +3586,7 @@ out:
|
||||
}
|
||||
}
|
||||
|
||||
func testGraphTopologyNotifications(net *networkHarness, t *harnessTest) {
|
||||
func testGraphTopologyNotifications(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
const chanAmt = maxFundingAmount
|
||||
timeout := time.Duration(time.Second * 5)
|
||||
ctxb := context.Background()
|
||||
@ -3808,7 +3812,7 @@ func testGraphTopologyNotifications(net *networkHarness, t *harnessTest) {
|
||||
// testNodeAnnouncement ensures that when a node is started with one or more
|
||||
// external IP addresses specified on the command line, that those addresses
|
||||
// announced to the network and reported in the network graph.
|
||||
func testNodeAnnouncement(net *networkHarness, t *harnessTest) {
|
||||
func testNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
ipAddresses := map[string]bool{
|
||||
@ -3883,7 +3887,7 @@ func testNodeAnnouncement(net *networkHarness, t *harnessTest) {
|
||||
}
|
||||
}
|
||||
|
||||
func testNodeSignVerify(net *networkHarness, t *harnessTest) {
|
||||
func testNodeSignVerify(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
timeout := time.Duration(time.Second * 5)
|
||||
ctxb := context.Background()
|
||||
|
||||
@ -3961,12 +3965,12 @@ func testNodeSignVerify(net *networkHarness, t *harnessTest) {
|
||||
// testAsyncPayments tests the performance of the async payments, and also
|
||||
// checks that balances of both sides can't be become negative under stress
|
||||
// payment strikes.
|
||||
func testAsyncPayments(net *networkHarness, t *harnessTest) {
|
||||
func testAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
// As we'll be querying the channels state frequently we'll
|
||||
// create a closure helper function for the purpose.
|
||||
getChanInfo := func(node *lightningNode) (*lnrpc.ActiveChannel, error) {
|
||||
getChanInfo := func(node *lntest.HarnessNode) (*lnrpc.ActiveChannel, error) {
|
||||
req := &lnrpc.ListChannelsRequest{}
|
||||
channelInfo, err := node.ListChannels(ctxb, req)
|
||||
if err != nil {
|
||||
@ -4139,12 +4143,12 @@ func testAsyncPayments(net *networkHarness, t *harnessTest) {
|
||||
|
||||
// testBidirectionalAsyncPayments tests that nodes are able to send the
|
||||
// payments to each other in async manner without blocking.
|
||||
func testBidirectionalAsyncPayments(net *networkHarness, t *harnessTest) {
|
||||
func testBidirectionalAsyncPayments(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
// As we'll be querying the channels state frequently we'll
|
||||
// create a closure helper function for the purpose.
|
||||
getChanInfo := func(node *lightningNode) (*lnrpc.ActiveChannel, error) {
|
||||
getChanInfo := func(node *lntest.HarnessNode) (*lnrpc.ActiveChannel, error) {
|
||||
req := &lnrpc.ListChannelsRequest{}
|
||||
channelInfo, err := node.ListChannels(ctxb, req)
|
||||
if err != nil {
|
||||
@ -4376,7 +4380,7 @@ func testBidirectionalAsyncPayments(net *networkHarness, t *harnessTest) {
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
test func(net *networkHarness, t *harnessTest)
|
||||
test func(net *lntest.NetworkHarness, t *harnessTest)
|
||||
}
|
||||
|
||||
var testsCases = []*testCase{
|
||||
@ -4476,7 +4480,7 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
||||
|
||||
// First create the network harness to gain access to its
|
||||
// 'OnTxAccepted' call back.
|
||||
lndHarness, err := newNetworkHarness()
|
||||
lndHarness, err := lntest.NewNetworkHarness()
|
||||
if err != nil {
|
||||
ht.Fatalf("unable to create lightning network harness: %v", err)
|
||||
}
|
||||
|
@ -1,19 +1,41 @@
|
||||
package lntest
|
||||
|
||||
// networkHarness is an integration testing harness for the lightning network.
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/roasbeef/btcd/chaincfg"
|
||||
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
||||
"github.com/roasbeef/btcd/integration/rpctest"
|
||||
"github.com/roasbeef/btcd/rpcclient"
|
||||
"github.com/roasbeef/btcd/txscript"
|
||||
"github.com/roasbeef/btcd/wire"
|
||||
"github.com/roasbeef/btcutil"
|
||||
)
|
||||
|
||||
// NetworkHarness is an integration testing harness for the lightning network.
|
||||
// The harness by default is created with two active nodes on the network:
|
||||
// Alice and Bob.
|
||||
type networkHarness struct {
|
||||
type NetworkHarness struct {
|
||||
rpcConfig rpcclient.ConnConfig
|
||||
netParams *chaincfg.Params
|
||||
Miner *rpctest.Harness
|
||||
|
||||
activeNodes map[int]*lightningNode
|
||||
// Miner is a reference to a running full node that can be used to create
|
||||
// new blocks on the network.
|
||||
Miner *rpctest.Harness
|
||||
|
||||
activeNodes map[int]*HarnessNode
|
||||
|
||||
// Alice and Bob are the initial seeder nodes that are automatically
|
||||
// created to be the initial participants of the test network.
|
||||
Alice *lightningNode
|
||||
Bob *lightningNode
|
||||
Alice *HarnessNode
|
||||
Bob *HarnessNode
|
||||
|
||||
seenTxns chan chainhash.Hash
|
||||
bitcoinWatchRequests chan *txWatchRequest
|
||||
@ -27,13 +49,13 @@ type networkHarness struct {
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// newNetworkHarness creates a new network test harness.
|
||||
// NewNetworkHarness creates a new network test harness.
|
||||
// TODO(roasbeef): add option to use golang's build library to a binary of the
|
||||
// current repo. This'll save developers from having to manually `go install`
|
||||
// within the repo each time before changes
|
||||
func newNetworkHarness() (*networkHarness, error) {
|
||||
return &networkHarness{
|
||||
activeNodes: make(map[int]*lightningNode),
|
||||
func NewNetworkHarness() (*NetworkHarness, error) {
|
||||
return &NetworkHarness{
|
||||
activeNodes: make(map[int]*HarnessNode),
|
||||
seenTxns: make(chan chainhash.Hash),
|
||||
bitcoinWatchRequests: make(chan *txWatchRequest),
|
||||
lndErrorChan: make(chan error),
|
||||
@ -44,10 +66,10 @@ func newNetworkHarness() (*networkHarness, error) {
|
||||
// InitializeSeedNodes initializes alice and bob nodes given an already
|
||||
// running instance of btcd's rpctest harness and extra command line flags,
|
||||
// which should be formatted properly - "--arg=value".
|
||||
func (n *networkHarness) InitializeSeedNodes(r *rpctest.Harness, lndArgs []string) error {
|
||||
func (n *NetworkHarness) InitializeSeedNodes(r *rpctest.Harness, lndArgs []string) error {
|
||||
n.netParams = r.ActiveNet
|
||||
n.Miner = r
|
||||
n.rpcConfig = nodeConfig
|
||||
n.rpcConfig = r.RPCConfig()
|
||||
|
||||
config := nodeConfig{
|
||||
RPCConfig: &n.rpcConfig,
|
||||
@ -56,17 +78,17 @@ func (n *networkHarness) InitializeSeedNodes(r *rpctest.Harness, lndArgs []strin
|
||||
}
|
||||
|
||||
var err error
|
||||
n.Alice, err = newLightningNode(config)
|
||||
n.Alice, err = newNode(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.Bob, err = newLightningNode(config)
|
||||
n.Bob, err = newNode(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
n.activeNodes[n.Alice.nodeID] = n.Alice
|
||||
n.activeNodes[n.Bob.nodeID] = n.Bob
|
||||
n.activeNodes[n.Alice.NodeID] = n.Alice
|
||||
n.activeNodes[n.Bob.NodeID] = n.Bob
|
||||
|
||||
return err
|
||||
}
|
||||
@ -74,7 +96,7 @@ func (n *networkHarness) InitializeSeedNodes(r *rpctest.Harness, lndArgs []strin
|
||||
// ProcessErrors returns a channel used for reporting any fatal process errors.
|
||||
// If any of the active nodes within the harness' test network incur a fatal
|
||||
// error, that error is sent over this channel.
|
||||
func (n *networkHarness) ProcessErrors() <-chan error {
|
||||
func (n *NetworkHarness) ProcessErrors() <-chan error {
|
||||
return n.lndErrorChan
|
||||
}
|
||||
|
||||
@ -93,7 +115,7 @@ func (f *fakeLogger) Println(args ...interface{}) {}
|
||||
// node's wallets will be funded wallets with ten 1 BTC outputs each. Finally
|
||||
// rpc clients capable of communicating with the initial seeder nodes are
|
||||
// created.
|
||||
func (n *networkHarness) SetUp() error {
|
||||
func (n *NetworkHarness) SetUp() error {
|
||||
// Swap out grpc's default logger with out fake logger which drops the
|
||||
// statements on the floor.
|
||||
grpclog.SetLogger(&fakeLogger{})
|
||||
@ -105,13 +127,13 @@ func (n *networkHarness) SetUp() error {
|
||||
wg.Add(2)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := n.Alice.Start(n.lndErrorChan); err != nil {
|
||||
if err := n.Alice.start(n.lndErrorChan); err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := n.Bob.Start(n.lndErrorChan); err != nil {
|
||||
if err := n.Bob.start(n.lndErrorChan); err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
@ -200,7 +222,7 @@ out:
|
||||
}
|
||||
|
||||
// TearDownAll tears down all active nodes within the test lightning network.
|
||||
func (n *networkHarness) TearDownAll() error {
|
||||
func (n *NetworkHarness) TearDownAll() error {
|
||||
for _, node := range n.activeNodes {
|
||||
if err := node.Shutdown(); err != nil {
|
||||
return err
|
||||
@ -213,14 +235,14 @@ func (n *networkHarness) TearDownAll() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewNode fully initializes a returns a new lightningNode binded to the
|
||||
// NewNode fully initializes a returns a new HarnessNode binded to the
|
||||
// current instance of the network harness. The created node is running, but
|
||||
// not yet connected to other nodes within the network.
|
||||
func (n *networkHarness) NewNode(extraArgs []string) (*lightningNode, error) {
|
||||
func (n *NetworkHarness) NewNode(extraArgs []string) (*HarnessNode, error) {
|
||||
n.Lock()
|
||||
defer n.Unlock()
|
||||
|
||||
node, err := newLightningNode(nodeConfig{
|
||||
node, err := newNode(nodeConfig{
|
||||
RPCConfig: &n.rpcConfig,
|
||||
NetParams: n.netParams,
|
||||
ExtraArgs: extraArgs,
|
||||
@ -231,9 +253,9 @@ func (n *networkHarness) NewNode(extraArgs []string) (*lightningNode, error) {
|
||||
|
||||
// Put node in activeNodes to ensure Shutdown is called even if Start
|
||||
// returns an error.
|
||||
n.activeNodes[node.nodeID] = node
|
||||
n.activeNodes[node.NodeID] = node
|
||||
|
||||
if err := node.Start(n.lndErrorChan); err != nil {
|
||||
if err := node.start(n.lndErrorChan); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -246,7 +268,7 @@ func (n *networkHarness) NewNode(extraArgs []string) (*lightningNode, error) {
|
||||
//
|
||||
// NOTE: This function may block for up to 15-seconds as it will not return
|
||||
// until the new connection is detected as being known to both nodes.
|
||||
func (n *networkHarness) ConnectNodes(ctx context.Context, a, b *lightningNode) error {
|
||||
func (n *NetworkHarness) ConnectNodes(ctx context.Context, a, b *HarnessNode) error {
|
||||
bobInfo, err := b.GetInfo(ctx, &lnrpc.GetInfoRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -288,7 +310,7 @@ func (n *networkHarness) ConnectNodes(ctx context.Context, a, b *lightningNode)
|
||||
|
||||
// DisconnectNodes disconnects node a from node b by sending RPC message
|
||||
// from a node to b node
|
||||
func (n *networkHarness) DisconnectNodes(ctx context.Context, a, b *lightningNode) error {
|
||||
func (n *NetworkHarness) DisconnectNodes(ctx context.Context, a, b *HarnessNode) error {
|
||||
bobInfo, err := b.GetInfo(ctx, &lnrpc.GetInfoRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -315,8 +337,8 @@ func (n *networkHarness) DisconnectNodes(ctx context.Context, a, b *lightningNod
|
||||
// This method can be useful when testing edge cases such as a node broadcast
|
||||
// and invalidated prior state, or persistent state recovery, simulating node
|
||||
// crashes, etc.
|
||||
func (n *networkHarness) RestartNode(node *lightningNode, callback func() error) error {
|
||||
return node.Restart(n.lndErrorChan, callback)
|
||||
func (n *NetworkHarness) RestartNode(node *HarnessNode, callback func() error) error {
|
||||
return node.restart(n.lndErrorChan, callback)
|
||||
}
|
||||
|
||||
// TODO(roasbeef): add a WithChannel higher-order function?
|
||||
@ -335,7 +357,7 @@ type txWatchRequest struct {
|
||||
// bitcoinNetworkWatcher is a goroutine which accepts async notification
|
||||
// requests for the broadcast of a target transaction, and then dispatches the
|
||||
// transaction once its seen on the Bitcoin network.
|
||||
func (n *networkHarness) networkWatcher() {
|
||||
func (n *NetworkHarness) networkWatcher() {
|
||||
seenTxns := make(map[chainhash.Hash]struct{})
|
||||
clients := make(map[chainhash.Hash][]chan struct{})
|
||||
|
||||
@ -381,7 +403,7 @@ func (n *networkHarness) networkWatcher() {
|
||||
|
||||
// OnTxAccepted is a callback to be called each time a new transaction has been
|
||||
// broadcast on the network.
|
||||
func (n *networkHarness) OnTxAccepted(hash *chainhash.Hash, amt btcutil.Amount) {
|
||||
func (n *NetworkHarness) OnTxAccepted(hash *chainhash.Hash, amt btcutil.Amount) {
|
||||
// Return immediately if harness has been torn down.
|
||||
select {
|
||||
case <-n.quit:
|
||||
@ -398,11 +420,11 @@ func (n *networkHarness) OnTxAccepted(hash *chainhash.Hash, amt btcutil.Amount)
|
||||
// the transaction isn't seen within the network before the passed timeout,
|
||||
// then an error is returned.
|
||||
// TODO(roasbeef): add another method which creates queue of all seen transactions
|
||||
func (n *networkHarness) WaitForTxBroadcast(ctx context.Context, txid chainhash.Hash) error {
|
||||
func (n *NetworkHarness) WaitForTxBroadcast(ctx context.Context, txid chainhash.Hash) error {
|
||||
// Return immediately if harness has been torn down.
|
||||
select {
|
||||
case <-n.quit:
|
||||
return fmt.Errorf("networkHarness has been torn down")
|
||||
return fmt.Errorf("NetworkHarness has been torn down")
|
||||
default:
|
||||
}
|
||||
|
||||
@ -417,7 +439,7 @@ func (n *networkHarness) WaitForTxBroadcast(ctx context.Context, txid chainhash.
|
||||
case <-eventChan:
|
||||
return nil
|
||||
case <-n.quit:
|
||||
return fmt.Errorf("networkHarness has been torn down")
|
||||
return fmt.Errorf("NetworkHarness has been torn down")
|
||||
case <-ctx.Done():
|
||||
return fmt.Errorf("tx not seen before context timeout")
|
||||
}
|
||||
@ -427,8 +449,8 @@ func (n *networkHarness) WaitForTxBroadcast(ctx context.Context, txid chainhash.
|
||||
// passed channel funding parameters. If the passed context has a timeout, then
|
||||
// if the timeout is reached before the channel pending notification is
|
||||
// received, an error is returned.
|
||||
func (n *networkHarness) OpenChannel(ctx context.Context,
|
||||
srcNode, destNode *lightningNode, amt btcutil.Amount,
|
||||
func (n *NetworkHarness) OpenChannel(ctx context.Context,
|
||||
srcNode, destNode *HarnessNode, amt btcutil.Amount,
|
||||
pushAmt btcutil.Amount) (lnrpc.Lightning_OpenChannelClient, error) {
|
||||
|
||||
// Wait until srcNode and destNode have the latest chain synced.
|
||||
@ -489,8 +511,8 @@ func (n *networkHarness) OpenChannel(ctx context.Context,
|
||||
// passed channel funding parameters. If the passed context has a timeout, then
|
||||
// if the timeout is reached before the channel pending notification is
|
||||
// received, an error is returned.
|
||||
func (n *networkHarness) OpenPendingChannel(ctx context.Context,
|
||||
srcNode, destNode *lightningNode, amt btcutil.Amount,
|
||||
func (n *NetworkHarness) OpenPendingChannel(ctx context.Context,
|
||||
srcNode, destNode *HarnessNode, amt btcutil.Amount,
|
||||
pushAmt btcutil.Amount) (*lnrpc.PendingUpdate, error) {
|
||||
|
||||
// Wait until srcNode and destNode have blockchain synced
|
||||
@ -549,7 +571,7 @@ func (n *networkHarness) OpenPendingChannel(ctx context.Context,
|
||||
// consuming a message from the past open channel stream. If the passed context
|
||||
// has a timeout, then if the timeout is reached before the channel has been
|
||||
// opened, then an error is returned.
|
||||
func (n *networkHarness) WaitForChannelOpen(ctx context.Context,
|
||||
func (n *NetworkHarness) WaitForChannelOpen(ctx context.Context,
|
||||
openChanStream lnrpc.Lightning_OpenChannelClient) (*lnrpc.ChannelPoint, error) {
|
||||
|
||||
errChan := make(chan error)
|
||||
@ -585,8 +607,8 @@ func (n *networkHarness) WaitForChannelOpen(ctx context.Context,
|
||||
// passed channel point, initiated by the passed lnNode. If the passed context
|
||||
// has a timeout, then if the timeout is reached before the channel close is
|
||||
// pending, then an error is returned.
|
||||
func (n *networkHarness) CloseChannel(ctx context.Context,
|
||||
lnNode *lightningNode, cp *lnrpc.ChannelPoint,
|
||||
func (n *NetworkHarness) CloseChannel(ctx context.Context,
|
||||
lnNode *HarnessNode, cp *lnrpc.ChannelPoint,
|
||||
force bool) (lnrpc.Lightning_CloseChannelClient, *chainhash.Hash, error) {
|
||||
|
||||
// Create a channel outpoint that we can use to compare to channels
|
||||
@ -684,7 +706,7 @@ CheckActive:
|
||||
// stream that the node has deemed the channel has been fully closed. If the
|
||||
// passed context has a timeout, then if the timeout is reached before the
|
||||
// notification is received then an error is returned.
|
||||
func (n *networkHarness) WaitForChannelClose(ctx context.Context,
|
||||
func (n *NetworkHarness) WaitForChannelClose(ctx context.Context,
|
||||
closeChanStream lnrpc.Lightning_CloseChannelClient) (*chainhash.Hash, error) {
|
||||
|
||||
errChan := make(chan error)
|
||||
@ -720,8 +742,8 @@ func (n *networkHarness) WaitForChannelClose(ctx context.Context,
|
||||
|
||||
// AssertChannelExists asserts that an active channel identified by
|
||||
// channelPoint is known to exist from the point-of-view of node..
|
||||
func (n *networkHarness) AssertChannelExists(ctx context.Context,
|
||||
node *lightningNode, chanPoint *wire.OutPoint) error {
|
||||
func (n *NetworkHarness) AssertChannelExists(ctx context.Context,
|
||||
node *HarnessNode, chanPoint *wire.OutPoint) error {
|
||||
|
||||
req := &lnrpc.ListChannelsRequest{}
|
||||
resp, err := node.ListChannels(ctx, req)
|
||||
@ -743,7 +765,7 @@ func (n *networkHarness) AssertChannelExists(ctx context.Context,
|
||||
// of a particular node in the case of a test failure.
|
||||
// Logs from lightning node being generated with delay - you should
|
||||
// add time.Sleep() in order to get all logs.
|
||||
func (n *networkHarness) DumpLogs(node *lightningNode) (string, error) {
|
||||
func (n *NetworkHarness) DumpLogs(node *HarnessNode) (string, error) {
|
||||
logFile := fmt.Sprintf("%v/simnet/lnd.log", node.cfg.LogDir)
|
||||
|
||||
buf, err := ioutil.ReadFile(logFile)
|
||||
@ -756,8 +778,8 @@ func (n *networkHarness) DumpLogs(node *lightningNode) (string, error) {
|
||||
|
||||
// SendCoins attempts to send amt satoshis from the internal mining node to the
|
||||
// targeted lightning node.
|
||||
func (n *networkHarness) SendCoins(ctx context.Context, amt btcutil.Amount,
|
||||
target *lightningNode) error {
|
||||
func (n *NetworkHarness) SendCoins(ctx context.Context, amt btcutil.Amount,
|
||||
target *HarnessNode) error {
|
||||
|
||||
balReq := &lnrpc.WalletBalanceRequest{}
|
||||
initialBalance, err := target.WalletBalance(ctx, balReq)
|
||||
|
201
lntest/node.go
201
lntest/node.go
@ -1,5 +1,34 @@
|
||||
package lntest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
macaroon "gopkg.in/macaroon.v1"
|
||||
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/macaroons"
|
||||
"github.com/roasbeef/btcd/chaincfg"
|
||||
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
||||
"github.com/roasbeef/btcd/rpcclient"
|
||||
"github.com/roasbeef/btcd/wire"
|
||||
)
|
||||
|
||||
var (
|
||||
// numActiveNodes is the number of active nodes within the test network.
|
||||
numActiveNodes = 0
|
||||
@ -18,8 +47,6 @@ var (
|
||||
// as such: defaultP2pPort + (2 * harness.nodeNum).
|
||||
defaultClientPort = 19556
|
||||
|
||||
harnessNetParams = &chaincfg.SimNetParams
|
||||
|
||||
// logOutput is a flag that can be set to append the output from the
|
||||
// seed nodes to log files.
|
||||
logOutput = flag.Bool("logoutput", false,
|
||||
@ -119,13 +146,14 @@ func (cfg nodeConfig) genArgs() []string {
|
||||
return args
|
||||
}
|
||||
|
||||
// lightningNode represents an instance of lnd running within our test network
|
||||
// harness. Each lightningNode instance also fully embedds an RPC client in
|
||||
// HarnessNode represents an instance of lnd running within our test network
|
||||
// harness. Each HarnessNode instance also fully embedds an RPC client in
|
||||
// order to pragmatically drive the node.
|
||||
type lightningNode struct {
|
||||
type HarnessNode struct {
|
||||
cfg *nodeConfig
|
||||
|
||||
nodeID int
|
||||
// NodeID is a unique identifier for the node within a NetworkHarness.
|
||||
NodeID int
|
||||
|
||||
// PubKey is the serialized compressed identity public key of the node.
|
||||
// This field will only be populated once the node itself has been
|
||||
@ -137,7 +165,7 @@ type lightningNode struct {
|
||||
pidFile string
|
||||
|
||||
// processExit is a channel that's closed once it's detected that the
|
||||
// process this instance of lightningNode is bound to has exited.
|
||||
// process this instance of HarnessNode is bound to has exited.
|
||||
processExit chan struct{}
|
||||
|
||||
chanWatchRequests chan *chanWatchRequest
|
||||
@ -148,9 +176,11 @@ type lightningNode struct {
|
||||
lnrpc.LightningClient
|
||||
}
|
||||
|
||||
// newLightningNode creates a new test lightning node instance from the passed
|
||||
// rpc config and slice of extra arguments.
|
||||
func newLightningNode(cfg nodeConfig) (*lightningNode, error) {
|
||||
// Assert *HarnessNode implements the lnrpc.LightningClient interface.
|
||||
var _ lnrpc.LightningClient = (*HarnessNode)(nil)
|
||||
|
||||
// newNode creates a new test lightning node instance from the passed config.
|
||||
func newNode(cfg nodeConfig) (*HarnessNode, error) {
|
||||
if cfg.BaseDir == "" {
|
||||
var err error
|
||||
cfg.BaseDir, err = ioutil.TempDir("", "lndtest-node")
|
||||
@ -170,30 +200,35 @@ func newLightningNode(cfg nodeConfig) (*lightningNode, error) {
|
||||
nodeNum := numActiveNodes
|
||||
numActiveNodes++
|
||||
|
||||
return &lightningNode{
|
||||
return &HarnessNode{
|
||||
cfg: &cfg,
|
||||
nodeID: nodeNum,
|
||||
NodeID: nodeNum,
|
||||
chanWatchRequests: make(chan *chanWatchRequest),
|
||||
processExit: make(chan struct{}),
|
||||
quit: make(chan struct{}),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DBPath returns the filepath to the channeldb database file for this node.
|
||||
func (hn *HarnessNode) DBPath() string {
|
||||
return hn.cfg.DBPath()
|
||||
}
|
||||
|
||||
// Start launches a new process running lnd. Additionally, the PID of the
|
||||
// launched process is saved in order to possibly kill the process forcibly
|
||||
// later.
|
||||
func (l *lightningNode) Start(lndError chan<- error) error {
|
||||
args := l.cfg.genArgs()
|
||||
l.cmd = exec.Command("lnd", args...)
|
||||
func (hn *HarnessNode) start(lndError chan<- error) error {
|
||||
args := hn.cfg.genArgs()
|
||||
hn.cmd = exec.Command("lnd", args...)
|
||||
|
||||
// Redirect stderr output to buffer
|
||||
var errb bytes.Buffer
|
||||
l.cmd.Stderr = &errb
|
||||
hn.cmd.Stderr = &errb
|
||||
|
||||
// If the logoutput flag is passed, redirect output from the nodes to
|
||||
// log files.
|
||||
if *logOutput {
|
||||
logFile := fmt.Sprintf("output%d.log", l.nodeID)
|
||||
logFile := fmt.Sprintf("output%d.log", hn.NodeID)
|
||||
|
||||
// Create file if not exists, otherwise append.
|
||||
file, err := os.OpenFile(logFile,
|
||||
@ -204,69 +239,70 @@ func (l *lightningNode) Start(lndError chan<- error) error {
|
||||
|
||||
// Pass node's stderr to both errb and the file.
|
||||
w := io.MultiWriter(&errb, file)
|
||||
l.cmd.Stderr = w
|
||||
hn.cmd.Stderr = w
|
||||
|
||||
// Pass the node's stdout only to the file.
|
||||
l.cmd.Stdout = file
|
||||
hn.cmd.Stdout = file
|
||||
}
|
||||
|
||||
if err := l.cmd.Start(); err != nil {
|
||||
if err := hn.cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Launch a new goroutine which that bubbles up any potential fatal
|
||||
// process errors to the goroutine running the tests.
|
||||
go func() {
|
||||
err := l.cmd.Wait()
|
||||
err := hn.cmd.Wait()
|
||||
|
||||
if err != nil {
|
||||
lndError <- errors.Errorf("%v\n%v\n", err, errb.String())
|
||||
}
|
||||
|
||||
// Signal any onlookers that this process has exited.
|
||||
close(l.processExit)
|
||||
close(hn.processExit)
|
||||
}()
|
||||
|
||||
// Write process ID to a file.
|
||||
if err := l.writePidFile(); err != nil {
|
||||
l.cmd.Process.Kill()
|
||||
if err := hn.writePidFile(); err != nil {
|
||||
hn.cmd.Process.Kill()
|
||||
return err
|
||||
}
|
||||
|
||||
// Since Stop uses the LightningClient to stop the node, if we fail to get a
|
||||
// connected client, we have to kill the process.
|
||||
conn, err := l.connectRPC()
|
||||
conn, err := hn.connectRPC()
|
||||
if err != nil {
|
||||
l.cmd.Process.Kill()
|
||||
hn.cmd.Process.Kill()
|
||||
return err
|
||||
}
|
||||
l.LightningClient = lnrpc.NewLightningClient(conn)
|
||||
hn.LightningClient = lnrpc.NewLightningClient(conn)
|
||||
|
||||
// Obtain the lnid of this node for quick identification purposes.
|
||||
ctxb := context.Background()
|
||||
info, err := l.GetInfo(ctxb, &lnrpc.GetInfoRequest{})
|
||||
info, err := hn.GetInfo(ctxb, &lnrpc.GetInfoRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.PubKeyStr = info.IdentityPubkey
|
||||
hn.PubKeyStr = info.IdentityPubkey
|
||||
|
||||
pubkey, err := hex.DecodeString(info.IdentityPubkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copy(l.PubKey[:], pubkey)
|
||||
copy(hn.PubKey[:], pubkey)
|
||||
|
||||
// Launch the watcher that'll hook into graph related topology change
|
||||
// from the PoV of this node.
|
||||
l.wg.Add(1)
|
||||
go l.lightningNetworkWatcher()
|
||||
hn.wg.Add(1)
|
||||
go hn.lightningNetworkWatcher()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writePidFile writes the process ID of the running lnd process to a .pid file.
|
||||
func (l *lightningNode) writePidFile() error {
|
||||
filePath := filepath.Join(l.cfg.BaseDir, fmt.Sprintf("%v.pid", l.nodeID))
|
||||
func (hn *HarnessNode) writePidFile() error {
|
||||
filePath := filepath.Join(hn.cfg.BaseDir, fmt.Sprintf("%v.pid", hn.NodeID))
|
||||
|
||||
pid, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
@ -274,22 +310,22 @@ func (l *lightningNode) writePidFile() error {
|
||||
}
|
||||
defer pid.Close()
|
||||
|
||||
_, err = fmt.Fprintf(pid, "%v\n", l.cmd.Process.Pid)
|
||||
_, err = fmt.Fprintf(pid, "%v\n", hn.cmd.Process.Pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.pidFile = filePath
|
||||
hn.pidFile = filePath
|
||||
return nil
|
||||
}
|
||||
|
||||
// connectRPC uses the TLS certificate and admin macaroon files written by the
|
||||
// lnd node to create a gRPC client connection.
|
||||
func (l *lightningNode) connectRPC() (*grpc.ClientConn, error) {
|
||||
func (hn *HarnessNode) connectRPC() (*grpc.ClientConn, error) {
|
||||
// Wait until TLS certificate and admin macaroon are created before
|
||||
// using them, up to 20 sec.
|
||||
tlsTimeout := time.After(30 * time.Second)
|
||||
for !fileExists(l.cfg.TLSCertPath) || !fileExists(l.cfg.AdminMacPath) {
|
||||
for !fileExists(hn.cfg.TLSCertPath) || !fileExists(hn.cfg.AdminMacPath) {
|
||||
select {
|
||||
case <-tlsTimeout:
|
||||
return nil, fmt.Errorf("timeout waiting for TLS cert file " +
|
||||
@ -299,11 +335,11 @@ func (l *lightningNode) connectRPC() (*grpc.ClientConn, error) {
|
||||
}
|
||||
}
|
||||
|
||||
tlsCreds, err := credentials.NewClientTLSFromFile(l.cfg.TLSCertPath, "")
|
||||
tlsCreds, err := credentials.NewClientTLSFromFile(hn.cfg.TLSCertPath, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
macBytes, err := ioutil.ReadFile(l.cfg.AdminMacPath)
|
||||
macBytes, err := ioutil.ReadFile(hn.cfg.AdminMacPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -317,26 +353,26 @@ func (l *lightningNode) connectRPC() (*grpc.ClientConn, error) {
|
||||
grpc.WithBlock(),
|
||||
grpc.WithTimeout(time.Second * 20),
|
||||
}
|
||||
return grpc.Dial(l.rpcAddr, opts...)
|
||||
return grpc.Dial(hn.cfg.RPCAddr(), opts...)
|
||||
}
|
||||
|
||||
// cleanup cleans up all the temporary files created by the node's process.
|
||||
func (l *lightningNode) cleanup() error {
|
||||
return os.RemoveAll(l.cfg.BaseDir)
|
||||
func (hn *HarnessNode) cleanup() error {
|
||||
return os.RemoveAll(hn.cfg.BaseDir)
|
||||
}
|
||||
|
||||
// Stop attempts to stop the active lnd process.
|
||||
func (l *lightningNode) Stop() error {
|
||||
func (hn *HarnessNode) stop() error {
|
||||
// Do nothing if the process never started successfully.
|
||||
if l.LightningClient == nil {
|
||||
if hn.LightningClient == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do nothing if the process already finished.
|
||||
select {
|
||||
case <-l.quit:
|
||||
case <-hn.quit:
|
||||
return nil
|
||||
case <-l.processExit:
|
||||
case <-hn.processExit:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
@ -345,10 +381,10 @@ func (l *lightningNode) Stop() error {
|
||||
// closed before a response is returned.
|
||||
req := lnrpc.StopRequest{}
|
||||
ctx := context.Background()
|
||||
l.LightningClient.StopDaemon(ctx, &req)
|
||||
hn.LightningClient.StopDaemon(ctx, &req)
|
||||
|
||||
close(l.quit)
|
||||
l.wg.Wait()
|
||||
close(hn.quit)
|
||||
hn.wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -358,17 +394,17 @@ func (l *lightningNode) Stop() error {
|
||||
// connection attempt is successful. Additionally, if a callback is passed, the
|
||||
// closure will be executed after the node has been shutdown, but before the
|
||||
// process has been started up again.
|
||||
func (l *lightningNode) Restart(errChan chan error, callback func() error) error {
|
||||
if err := l.Stop(); err != nil {
|
||||
func (hn *HarnessNode) restart(errChan chan error, callback func() error) error {
|
||||
if err := hn.stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
<-l.processExit
|
||||
<-hn.processExit
|
||||
|
||||
l.LightningClient = nil
|
||||
l.processExit = make(chan struct{})
|
||||
l.quit = make(chan struct{})
|
||||
l.wg = sync.WaitGroup{}
|
||||
hn.LightningClient = nil
|
||||
hn.processExit = make(chan struct{})
|
||||
hn.quit = make(chan struct{})
|
||||
hn.wg = sync.WaitGroup{}
|
||||
|
||||
if callback != nil {
|
||||
if err := callback(); err != nil {
|
||||
@ -376,16 +412,16 @@ func (l *lightningNode) Restart(errChan chan error, callback func() error) error
|
||||
}
|
||||
}
|
||||
|
||||
return l.Start(errChan)
|
||||
return hn.start(errChan)
|
||||
}
|
||||
|
||||
// Shutdown stops the active lnd process and clean up any temporary directories
|
||||
// created along the way.
|
||||
func (l *lightningNode) Shutdown() error {
|
||||
if err := l.Stop(); err != nil {
|
||||
func (hn *HarnessNode) Shutdown() error {
|
||||
if err := hn.stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := l.cleanup(); err != nil {
|
||||
if err := hn.cleanup(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -407,17 +443,17 @@ type chanWatchRequest struct {
|
||||
// closed or opened within the network. In order to dispatch these
|
||||
// notifications, the GraphTopologySubscription client exposed as part of the
|
||||
// gRPC interface is used.
|
||||
func (l *lightningNode) lightningNetworkWatcher() {
|
||||
defer l.wg.Done()
|
||||
func (hn *HarnessNode) lightningNetworkWatcher() {
|
||||
defer hn.wg.Done()
|
||||
|
||||
graphUpdates := make(chan *lnrpc.GraphTopologyUpdate)
|
||||
l.wg.Add(1)
|
||||
hn.wg.Add(1)
|
||||
go func() {
|
||||
defer l.wg.Done()
|
||||
defer hn.wg.Done()
|
||||
|
||||
ctxb := context.Background()
|
||||
req := &lnrpc.GraphTopologySubscription{}
|
||||
topologyClient, err := l.SubscribeChannelGraph(ctxb, req)
|
||||
topologyClient, err := hn.SubscribeChannelGraph(ctxb, req)
|
||||
if err != nil {
|
||||
// We panic here in case of an error as failure to
|
||||
// create the topology client will cause all subsequent
|
||||
@ -436,7 +472,7 @@ func (l *lightningNode) lightningNetworkWatcher() {
|
||||
|
||||
select {
|
||||
case graphUpdates <- update:
|
||||
case <-l.quit:
|
||||
case <-hn.quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -506,7 +542,7 @@ func (l *lightningNode) lightningNetworkWatcher() {
|
||||
// A new watch request, has just arrived. We'll either be able
|
||||
// to dispatch immediately, or need to add the client for
|
||||
// processing later.
|
||||
case watchRequest := <-l.chanWatchRequests:
|
||||
case watchRequest := <-hn.chanWatchRequests:
|
||||
targetChan := watchRequest.chanPoint
|
||||
|
||||
// TODO(roasbeef): add update type also, checks for
|
||||
@ -540,7 +576,7 @@ func (l *lightningNode) lightningNetworkWatcher() {
|
||||
closeClients[targetChan] = append(closeClients[targetChan],
|
||||
watchRequest.eventChan)
|
||||
|
||||
case <-l.quit:
|
||||
case <-hn.quit:
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -550,7 +586,7 @@ func (l *lightningNode) lightningNetworkWatcher() {
|
||||
// outpoint is seen as being fully advertised within the network. A channel is
|
||||
// considered "fully advertised" once both of its directional edges has been
|
||||
// advertised within the test Lightning Network.
|
||||
func (l *lightningNode) WaitForNetworkChannelOpen(ctx context.Context,
|
||||
func (hn *HarnessNode) WaitForNetworkChannelOpen(ctx context.Context,
|
||||
op *lnrpc.ChannelPoint) error {
|
||||
|
||||
eventChan := make(chan struct{})
|
||||
@ -560,7 +596,7 @@ func (l *lightningNode) WaitForNetworkChannelOpen(ctx context.Context,
|
||||
return err
|
||||
}
|
||||
|
||||
l.chanWatchRequests <- &chanWatchRequest{
|
||||
hn.chanWatchRequests <- &chanWatchRequest{
|
||||
chanPoint: wire.OutPoint{
|
||||
Hash: *txid,
|
||||
Index: op.OutputIndex,
|
||||
@ -581,7 +617,7 @@ func (l *lightningNode) WaitForNetworkChannelOpen(ctx context.Context,
|
||||
// outpoint is seen as closed within the network. A channel is considered
|
||||
// closed once a transaction spending the funding outpoint is seen within a
|
||||
// confirmed block.
|
||||
func (l *lightningNode) WaitForNetworkChannelClose(ctx context.Context,
|
||||
func (hn *HarnessNode) WaitForNetworkChannelClose(ctx context.Context,
|
||||
op *lnrpc.ChannelPoint) error {
|
||||
|
||||
eventChan := make(chan struct{})
|
||||
@ -591,7 +627,7 @@ func (l *lightningNode) WaitForNetworkChannelClose(ctx context.Context,
|
||||
return err
|
||||
}
|
||||
|
||||
l.chanWatchRequests <- &chanWatchRequest{
|
||||
hn.chanWatchRequests <- &chanWatchRequest{
|
||||
chanPoint: wire.OutPoint{
|
||||
Hash: *txid,
|
||||
Index: op.OutputIndex,
|
||||
@ -613,7 +649,7 @@ func (l *lightningNode) WaitForNetworkChannelClose(ctx context.Context,
|
||||
// timeout, then the goroutine will continually poll until the timeout has
|
||||
// elapsed. In the case that the chain isn't synced before the timeout is up,
|
||||
// then this function will return an error.
|
||||
func (l *lightningNode) WaitForBlockchainSync(ctx context.Context) error {
|
||||
func (hn *HarnessNode) WaitForBlockchainSync(ctx context.Context) error {
|
||||
errChan := make(chan error, 1)
|
||||
retryDelay := time.Millisecond * 100
|
||||
|
||||
@ -621,13 +657,13 @@ func (l *lightningNode) WaitForBlockchainSync(ctx context.Context) error {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-l.quit:
|
||||
case <-hn.quit:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
getInfoReq := &lnrpc.GetInfoRequest{}
|
||||
getInfoResp, err := l.GetInfo(ctx, getInfoReq)
|
||||
getInfoResp, err := hn.GetInfo(ctx, getInfoReq)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
@ -646,7 +682,7 @@ func (l *lightningNode) WaitForBlockchainSync(ctx context.Context) error {
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-l.quit:
|
||||
case <-hn.quit:
|
||||
return nil
|
||||
case err := <-errChan:
|
||||
return err
|
||||
@ -654,3 +690,14 @@ func (l *lightningNode) WaitForBlockchainSync(ctx context.Context) error {
|
||||
return fmt.Errorf("Timeout while waiting for blockchain sync")
|
||||
}
|
||||
}
|
||||
|
||||
// fileExists reports whether the named file or directory exists.
|
||||
// This function is taken from https://github.com/btcsuite/btcd
|
||||
func fileExists(name string) bool {
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user