test: introduce a few networkHarness helper methods
This commit adds some new networkHarness helper methods which are mean’t to reduce the verbosity of the previous basic tests, and also to enable developers to right tests mote easily five a higher level interface.
This commit is contained in:
parent
0511273b47
commit
83bf0be2cc
121
lnd_test.go
121
lnd_test.go
@ -4,12 +4,10 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/roasbeef/btcd/rpctest"
|
||||
"github.com/roasbeef/btcd/wire"
|
||||
"github.com/roasbeef/btcrpcclient"
|
||||
@ -33,36 +31,20 @@ func assertTxInBlock(block *btcutil.Block, txid *wire.ShaHash, t *testing.T) {
|
||||
// Bob, then immediately closes the channel after asserting some expected post
|
||||
// conditions. Finally, the chain itelf is checked to ensure the closing
|
||||
// transaction was mined.
|
||||
// TODO(roasbeef): abstract blocking calls to async events to methods within
|
||||
// the networkHarness.
|
||||
func testBasicChannelFunding(net *networkHarness, t *testing.T) {
|
||||
// First establish a channel between Alice and Bob.
|
||||
ctxb := context.Background()
|
||||
openReq := &lnrpc.OpenChannelRequest{
|
||||
// TODO(roasbeef): should pass actual id instead, will fail if
|
||||
// more connections added for Alice.
|
||||
TargetPeerId: 1,
|
||||
LocalFundingAmount: btcutil.SatoshiPerBitcoin / 2,
|
||||
RemoteFundingAmount: 0,
|
||||
NumConfs: 1,
|
||||
}
|
||||
respStream, err := net.AliceClient.OpenChannel(ctxb, openReq)
|
||||
|
||||
// First establish a channel ween with a capacity of 0.5 BTC between
|
||||
// Alice and Bob.
|
||||
chanAmt := btcutil.Amount(btcutil.SatoshiPerBitcoin / 2)
|
||||
chanOpenUpdate, err := net.OpenChannel(ctxb, net.AliceClient, net.BobClient, chanAmt, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open channel between alice and bob: %v", err)
|
||||
t.Fatalf("unable to open channel: %v", err)
|
||||
}
|
||||
|
||||
// Consume the "channel pending" update. This allows us to synchronize
|
||||
// the node's state with the actions below.
|
||||
resp, err := respStream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to read rpc resp: %v", err)
|
||||
}
|
||||
if _, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending); !ok {
|
||||
t.Fatalf("expected channel pending update, instead got %v", resp)
|
||||
}
|
||||
|
||||
// Mine a block, the funding txid should be included, and both nodes should
|
||||
// be aware of the channel.
|
||||
// Mine a block, then wait for Alice's node to notify us that the
|
||||
// channel has been opened. The funding transaction should be found
|
||||
// within the newly mined block.
|
||||
blockHash, err := net.Miner.Node.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
@ -71,81 +53,30 @@ func testBasicChannelFunding(net *networkHarness, t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get block: %v", err)
|
||||
}
|
||||
if len(block.Transactions()) < 2 {
|
||||
t.Fatalf("funding transaction not included")
|
||||
}
|
||||
|
||||
// Next, consume the "channel open" update to reveal the proper
|
||||
// outpoint for the final funding transaction.
|
||||
resp, err = respStream.Recv()
|
||||
fundingChanPoint, err := net.WaitForChannelOpen(chanOpenUpdate)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to read rpc resp: %v", err)
|
||||
t.Fatalf("error while waiting for channeel open: %v", err)
|
||||
}
|
||||
fundingResp, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanOpen)
|
||||
if !ok {
|
||||
t.Fatalf("expected channel open update, instead got %v", resp)
|
||||
fundingTxID, err := wire.NewShaHash(fundingChanPoint.FundingTxid)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create sha hash: %v", err)
|
||||
}
|
||||
fundingChanPoint := fundingResp.ChanOpen.ChannelPoint
|
||||
fundingTxID, _ := wire.NewShaHash(fundingChanPoint.FundingTxid)
|
||||
assertTxInBlock(block, fundingTxID, t)
|
||||
|
||||
// TODO(roasbeef): remove and use "listchannels" command after
|
||||
// implemented.
|
||||
req := &lnrpc.ListPeersRequest{}
|
||||
alicePeerInfo, err := net.AliceClient.ListPeers(ctxb, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to list alice peers: %v", err)
|
||||
}
|
||||
bobPeerInfo, err := net.BobClient.ListPeers(ctxb, req)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to list bob peers: %v", err)
|
||||
}
|
||||
|
||||
// The channel should be listed in the peer information returned by
|
||||
// both peers.
|
||||
aliceChannels := alicePeerInfo.Peers[0].Channels
|
||||
if len(aliceChannels) < 1 {
|
||||
t.Fatalf("alice should have an active channel, instead have %v",
|
||||
len(aliceChannels))
|
||||
}
|
||||
bobChannels := bobPeerInfo.Peers[0].Channels
|
||||
if len(bobChannels) < 1 {
|
||||
t.Fatalf("bob should have an active channel, instead have %v",
|
||||
len(bobChannels))
|
||||
}
|
||||
aliceTxID := alicePeerInfo.Peers[0].Channels[0].ChannelPoint
|
||||
bobTxID := bobPeerInfo.Peers[0].Channels[0].ChannelPoint
|
||||
fundingTxIDStr := fundingTxID.String()
|
||||
if !strings.Contains(bobTxID, fundingTxIDStr) {
|
||||
t.Fatalf("alice's channel not found")
|
||||
}
|
||||
if !strings.Contains(aliceTxID, fundingTxIDStr) {
|
||||
t.Fatalf("bob's channel not found")
|
||||
err = net.AssertChannelExists(ctxb, net.AliceClient, net.BobClient,
|
||||
fundingChanPoint)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to assert channel existence: %v", err)
|
||||
}
|
||||
|
||||
// Initiate a close from Alice's side.
|
||||
closeReq := &lnrpc.CloseChannelRequest{
|
||||
ChannelPoint: fundingChanPoint,
|
||||
}
|
||||
closeRespStream, err := net.AliceClient.CloseChannel(ctxb, closeReq)
|
||||
closeUpdates, err := net.CloseChannel(ctxb, net.AliceClient, fundingChanPoint, false)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to close channel: %v", err)
|
||||
t.Fatalf("unable to clsoe channel: %v", err)
|
||||
}
|
||||
|
||||
// Consume the "channel close" update in order to wait for the closing
|
||||
// transaction to be broadcast, then wait for the closing tx to be seen
|
||||
// within the network.
|
||||
closeResp, err := closeRespStream.Recv()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to read rpc resp: %v", err)
|
||||
}
|
||||
pendingClose, ok := closeResp.Update.(*lnrpc.CloseStatusUpdate_ClosePending)
|
||||
if !ok {
|
||||
t.Fatalf("expected close pending update, got %v", pendingClose)
|
||||
}
|
||||
closeTxid, _ := wire.NewShaHash(pendingClose.ClosePending.Txid)
|
||||
net.WaitForTxBroadcast(*closeTxid)
|
||||
|
||||
// Finally, generate a single block, wait for the final close status
|
||||
// update, then ensure that the closing transaction was included in the
|
||||
// block.
|
||||
@ -157,16 +88,12 @@ func testBasicChannelFunding(net *networkHarness, t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get block: %v", err)
|
||||
}
|
||||
closeResp, err = closeRespStream.Recv()
|
||||
|
||||
closingTxid, err := net.WaitForChannelClose(closeUpdates)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to read rpc resp: %v", err)
|
||||
t.Fatalf("error while waiting for channel close: %v", err)
|
||||
}
|
||||
closeFin, ok := closeResp.Update.(*lnrpc.CloseStatusUpdate_ChanClose)
|
||||
if !ok {
|
||||
t.Fatalf("expected channel open update, instead got %v", resp)
|
||||
}
|
||||
closingTxID, _ := wire.NewShaHash(closeFin.ChanClose.ClosingTxid)
|
||||
assertTxInBlock(block, closingTxID, t)
|
||||
assertTxInBlock(block, closingTxid, t)
|
||||
}
|
||||
|
||||
var lndTestCases = map[string]lndTestCase{
|
||||
|
163
networktest.go
163
networktest.go
@ -11,6 +11,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -402,11 +403,24 @@ out:
|
||||
}
|
||||
}
|
||||
|
||||
// Now that the initial test network has been initialized, launch the
|
||||
// network wather.
|
||||
go n.networkWatcher()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TearDownAll tears down all active nodes within the test lightning network.
|
||||
func (n *networkHarness) TearDownAll() error {
|
||||
for _, node := range n.activeNodes {
|
||||
if err := node.shutdown(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// watchRequest encapsulates a request to the harness' network watcher to
|
||||
// dispatch a notification once a transaction with the target txid is seen
|
||||
// within the test network.
|
||||
@ -454,7 +468,9 @@ func (n *networkHarness) networkWatcher() {
|
||||
}
|
||||
}
|
||||
|
||||
func (n *networkHarness) OnTxAccepted(hash *wire.ShaHash, amount btcutil.Amount) {
|
||||
// OnTxAccepted is a callback to be called each time a new transaction has been
|
||||
// broadcast on the network.
|
||||
func (n *networkHarness) OnTxAccepted(hash *wire.ShaHash, amt btcutil.Amount) {
|
||||
go func() {
|
||||
n.seenTxns <- *hash
|
||||
}()
|
||||
@ -469,12 +485,145 @@ func (n *networkHarness) WaitForTxBroadcast(txid wire.ShaHash) {
|
||||
<-eventChan
|
||||
}
|
||||
|
||||
// TearDownAll tears down all active nodes within the test lightning network.
|
||||
func (n *networkHarness) TearDownAll() error {
|
||||
for _, node := range n.activeNodes {
|
||||
if err := node.shutdown(); err != nil {
|
||||
return err
|
||||
}
|
||||
// OpenChannel attemps to open a channel between srcNode and destNode with the
|
||||
// passed channel funding paramters.
|
||||
func (n *networkHarness) OpenChannel(ctx context.Context,
|
||||
srcNode, destNode lnrpc.LightningClient, amt btcutil.Amount,
|
||||
numConfs uint32) (lnrpc.Lightning_OpenChannelClient, error) {
|
||||
|
||||
// TODO(roasbeef): should pass actual id instead, will fail if more
|
||||
// connections added for Alice.
|
||||
openReq := &lnrpc.OpenChannelRequest{
|
||||
TargetPeerId: 1,
|
||||
LocalFundingAmount: int64(amt),
|
||||
RemoteFundingAmount: 0,
|
||||
NumConfs: 1,
|
||||
}
|
||||
respStream, err := srcNode.OpenChannel(ctx, openReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open channel between "+
|
||||
"alice and bob: %v", err)
|
||||
}
|
||||
|
||||
// Consume the "channel pending" update. This waits until the node
|
||||
// notifies us that the final message in the channel funding workflow
|
||||
// has been sent to the remote node.
|
||||
resp, err := respStream.Recv()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read rpc resp: %v", err)
|
||||
}
|
||||
if _, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending); !ok {
|
||||
return nil, fmt.Errorf("expected channel pending update, "+
|
||||
"instead got %v", resp)
|
||||
}
|
||||
|
||||
return respStream, nil
|
||||
}
|
||||
|
||||
// WaitForChannelOpen waits for a notification that a channel is open by
|
||||
// consuming a message from the past open channel stream.
|
||||
func (n *networkHarness) WaitForChannelOpen(openChanStream lnrpc.Lightning_OpenChannelClient) (*lnrpc.ChannelPoint, error) {
|
||||
resp, err := openChanStream.Recv()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read rpc resp: %v", err)
|
||||
}
|
||||
fundingResp, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanOpen)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected channel open update, instead got %v", resp)
|
||||
}
|
||||
|
||||
return fundingResp.ChanOpen.ChannelPoint, nil
|
||||
}
|
||||
|
||||
// CloseChannel close channel attempts to close the channel indicated by the
|
||||
// passed channel point, initiated by the passed lnNode.
|
||||
func (n *networkHarness) CloseChannel(ctx context.Context,
|
||||
lnNode lnrpc.LightningClient, cp *lnrpc.ChannelPoint,
|
||||
force bool) (lnrpc.Lightning_CloseChannelClient, error) {
|
||||
|
||||
closeReq := &lnrpc.CloseChannelRequest{
|
||||
ChannelPoint: cp,
|
||||
AllowForceClose: force,
|
||||
}
|
||||
closeRespStream, err := lnNode.CloseChannel(ctx, closeReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to close channel: %v", err)
|
||||
}
|
||||
|
||||
// Consume the "channel close" update in order to wait for the closing
|
||||
// transaction to be broadcast, then wait for the closing tx to be seen
|
||||
// within the network.
|
||||
closeResp, err := closeRespStream.Recv()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read rpc resp: %v", err)
|
||||
}
|
||||
pendingClose, ok := closeResp.Update.(*lnrpc.CloseStatusUpdate_ClosePending)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected close pending update, got %v", pendingClose)
|
||||
}
|
||||
closeTxid, _ := wire.NewShaHash(pendingClose.ClosePending.Txid)
|
||||
n.WaitForTxBroadcast(*closeTxid)
|
||||
|
||||
return closeRespStream, nil
|
||||
}
|
||||
|
||||
// WaitForChannelClose waits for a notification from the passed channel close
|
||||
// stream that the node has deemed the channel has been fully closed.
|
||||
func (n *networkHarness) WaitForChannelClose(closeChanStream lnrpc.Lightning_CloseChannelClient) (*wire.ShaHash, error) {
|
||||
// TODO(roasbeef): use passed ctx to set a deadline on amount of time to
|
||||
// wait.
|
||||
closeResp, err := closeChanStream.Recv()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read rpc resp: %v", err)
|
||||
}
|
||||
closeFin, ok := closeResp.Update.(*lnrpc.CloseStatusUpdate_ChanClose)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected channel open update, instead got %v", closeFin)
|
||||
}
|
||||
|
||||
return wire.NewShaHash(closeFin.ChanClose.ClosingTxid)
|
||||
}
|
||||
|
||||
// AssertChannelExists asserts that an active channel identified by
|
||||
// channelPoint exists between nodeA and nodeB.
|
||||
func (n *networkHarness) AssertChannelExists(ctx context.Context, nodeA, nodeB lnrpc.LightningClient,
|
||||
channelPoint *lnrpc.ChannelPoint) error {
|
||||
|
||||
// TODO(roasbeef): remove and use "listchannels" command after
|
||||
// implemented. Also make logic below more generic after addition of
|
||||
// the RPC.
|
||||
req := &lnrpc.ListPeersRequest{}
|
||||
alicePeerInfo, err := nodeA.ListPeers(ctx, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list nodeA peers: %v", err)
|
||||
}
|
||||
bobPeerInfo, err := nodeB.ListPeers(ctx, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list nodeB peers: %v", err)
|
||||
}
|
||||
aliceChannels := alicePeerInfo.Peers[0].Channels
|
||||
if len(aliceChannels) < 1 {
|
||||
return fmt.Errorf("alice should have an active channel, instead have %v",
|
||||
len(aliceChannels))
|
||||
}
|
||||
bobChannels := bobPeerInfo.Peers[0].Channels
|
||||
if len(bobChannels) < 1 {
|
||||
return fmt.Errorf("bob should have an active channel, instead have %v",
|
||||
len(bobChannels))
|
||||
}
|
||||
|
||||
txid, err := wire.NewShaHash(channelPoint.FundingTxid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
aliceTxID := alicePeerInfo.Peers[0].Channels[0].ChannelPoint
|
||||
bobTxID := bobPeerInfo.Peers[0].Channels[0].ChannelPoint
|
||||
if !strings.Contains(bobTxID, txid.String()) {
|
||||
return fmt.Errorf("alice's channel not found")
|
||||
}
|
||||
if !strings.Contains(aliceTxID, txid.String()) {
|
||||
return fmt.Errorf("bob's channel not found")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
1
networktest_test.go
Normal file
1
networktest_test.go
Normal file
@ -0,0 +1 @@
|
||||
package main
|
Loading…
Reference in New Issue
Block a user