diff --git a/networktest.go b/networktest.go index a0ced7c2..a335fffe 100644 --- a/networktest.go +++ b/networktest.go @@ -91,6 +91,10 @@ type lightningNode struct { cmd *exec.Cmd pidFile string + // processExit is a channel that's closed once it's detected that the + // process this instance of lightningNode is bound to has exited. + processExit chan struct{} + extraArgs []string lnrpc.LightningClient @@ -121,13 +125,13 @@ func newLightningNode(rpcConfig *btcrpcclient.ConnConfig, lndArgs []string) (*li numActiveNodes++ - return &lightningNode{ - cfg: cfg, - p2pAddr: net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.PeerPort)), - rpcAddr: net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.RPCPort)), - rpcCert: rpcConfig.Certificates, - nodeId: nodeNum, - extraArgs: lndArgs, + return &lightningNode{cfg: cfg, + p2pAddr: net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.PeerPort)), + rpcAddr: net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.RPCPort)), + rpcCert: rpcConfig.Certificates, + nodeId: nodeNum, + processExit: make(chan struct{}), + extraArgs: lndArgs, }, nil } @@ -176,6 +180,9 @@ func (l *lightningNode) start(lndError chan error) error { if err := l.cmd.Wait(); err != nil { lndError <- errors.New(errb.String()) } + + // Signal any onlookers that this process has exited. + close(l.processExit) }() pid, err := os.Create(filepath.Join(l.cfg.DataDir, @@ -255,6 +262,22 @@ func (l *lightningNode) stop() error { return l.cmd.Process.Signal(os.Interrupt) } +// restart attempts to restart a lightning node by shutting it down cleanly, +// then restarting the process. This function is fully blocking. Upon restart, +// the RPC connection to the node will be re-attempted, continuing iff the +// connection attempt is successful. +func (l *lightningNode) restart(errChan chan error) error { + if err := l.stop(); err != nil { + return nil + } + + <-l.processExit + + l.processExit = make(chan struct{}) + + return l.start(errChan) +} + // shutdown stops the active lnd process and clean up any temporary directories // created along the way. func (l *lightningNode) shutdown() error { @@ -513,6 +536,18 @@ func (n *networkHarness) ConnectNodes(ctx context.Context, a, b *lightningNode) return nil } +// RestartNode attempts to restart a lightning node by shutting it down +// cleanly, then restarting the process. This function is fully blocking. Upon +// restart, the RPC connection to the node will be re-attempted, continuing iff +// the connection attempt is successful. +// +// This method can be useful when testing edge cases such as a node broadcast +// and invalidated prior state, or persistent state recovery, simulating node +// crashes, etc. +func (n *networkHarness) RestartNode(node *lightningNode) error { + return node.restart(n.lndErrorChan) +} + // TODO(roasbeef): add a WithChannel higher-order function? // * python-like context manager w.r.t using a channel within a test // * possibly adds more funds to the target wallet if the funds are not diff --git a/networktest_test.go b/networktest_test.go index 06ab7d0f..b5285f46 100644 --- a/networktest_test.go +++ b/networktest_test.go @@ -1 +1,75 @@ package main + +import ( + "context" + "testing" + + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/roasbeef/btcd/rpctest" + "github.com/roasbeef/btcrpcclient" +) + +// TODO(roasbeef): randomize ports so can start multiple instance with each +// other, will need to catch up to rpctest upstream. + +func TestNodeRestart(t *testing.T) { + // Create a new instance of the network harness in order to initialize the + // necessary state. We'll also need to create a temporary instance of + // rpctest due to initialization dependancies. + lndHarness, err := newNetworkHarness() + if err != nil { + t.Fatalf("unable to create lightning network harness: %v", err) + } + defer lndHarness.TearDownAll() + + handlers := &btcrpcclient.NotificationHandlers{ + OnTxAccepted: lndHarness.OnTxAccepted, + } + + btcdHarness, err := rpctest.New(harnessNetParams, handlers, nil) + if err != nil { + t.Fatalf("unable to create mining node: %v", err) + } + defer btcdHarness.TearDown() + if err := btcdHarness.SetUp(true, 50); err != nil { + t.Fatalf("unable to set up mining node: %v", err) + } + if err := btcdHarness.Node.NotifyNewTransactions(false); err != nil { + t.Fatalf("unable to request transaction notifications: %v", err) + } + + if err := lndHarness.InitializeSeedNodes(btcdHarness, nil); err != nil { + t.Fatalf("unable to initialize seed nodes: %v", err) + } + if err = lndHarness.SetUp(); err != nil { + t.Fatalf("unable to set up test lightning network: %v", err) + } + + // With the harness set up, we can test the node restart method, + // asserting all data is properly persisted and recovered. + ctx := context.Background() + + // First, we'll test restarting one of the initial seed nodes: Alice. + alice := lndHarness.Alice + aliceInfo, err := alice.GetInfo(ctx, &lnrpc.GetInfoRequest{}) + if err != nil { + t.Fatalf("unable to query for alice's info: %v", err) + } + + // With alice's node information stored, attempt to restart the node. + if lndHarness.RestartNode(alice); err != nil { + t.Fatalf("unable to resart node: %v", err) + } + + // Query for alice's current information, it should be identical to + // what we received above. + aliceInfoRestart, err := alice.GetInfo(ctx, &lnrpc.GetInfoRequest{}) + if err != nil { + t.Fatalf("unable to query for alice's info: %v", err) + } + + if aliceInfo.IdentityPubkey != aliceInfoRestart.IdentityPubkey { + t.Fatalf("node info after restart doesn't match: %v vs %v", + aliceInfo.IdentityPubkey, aliceInfoRestart.IdentityPubkey) + } +}