test: add ability to networkHarness to cause an arbitrary node to restart
This commit adds a new feature to the network harness: test writers are now able to select arbitrary nodes, causing them to restart. This functionality will be useful in the future in order to test scenarios like persisting data across restarts, re-syncing after re-connections, reacting to the counter party broadcasting revoked states, etc.
This commit is contained in:
parent
75ea05aef6
commit
39c279b639
@ -91,6 +91,10 @@ type lightningNode struct {
|
|||||||
cmd *exec.Cmd
|
cmd *exec.Cmd
|
||||||
pidFile string
|
pidFile string
|
||||||
|
|
||||||
|
// processExit is a channel that's closed once it's detected that the
|
||||||
|
// process this instance of lightningNode is bound to has exited.
|
||||||
|
processExit chan struct{}
|
||||||
|
|
||||||
extraArgs []string
|
extraArgs []string
|
||||||
|
|
||||||
lnrpc.LightningClient
|
lnrpc.LightningClient
|
||||||
@ -121,13 +125,13 @@ func newLightningNode(rpcConfig *btcrpcclient.ConnConfig, lndArgs []string) (*li
|
|||||||
|
|
||||||
numActiveNodes++
|
numActiveNodes++
|
||||||
|
|
||||||
return &lightningNode{
|
return &lightningNode{cfg: cfg,
|
||||||
cfg: cfg,
|
p2pAddr: net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.PeerPort)),
|
||||||
p2pAddr: net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.PeerPort)),
|
rpcAddr: net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.RPCPort)),
|
||||||
rpcAddr: net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.RPCPort)),
|
rpcCert: rpcConfig.Certificates,
|
||||||
rpcCert: rpcConfig.Certificates,
|
nodeId: nodeNum,
|
||||||
nodeId: nodeNum,
|
processExit: make(chan struct{}),
|
||||||
extraArgs: lndArgs,
|
extraArgs: lndArgs,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,6 +180,9 @@ func (l *lightningNode) start(lndError chan error) error {
|
|||||||
if err := l.cmd.Wait(); err != nil {
|
if err := l.cmd.Wait(); err != nil {
|
||||||
lndError <- errors.New(errb.String())
|
lndError <- errors.New(errb.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Signal any onlookers that this process has exited.
|
||||||
|
close(l.processExit)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
pid, err := os.Create(filepath.Join(l.cfg.DataDir,
|
pid, err := os.Create(filepath.Join(l.cfg.DataDir,
|
||||||
@ -255,6 +262,22 @@ func (l *lightningNode) stop() error {
|
|||||||
return l.cmd.Process.Signal(os.Interrupt)
|
return l.cmd.Process.Signal(os.Interrupt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// restart attempts to restart a lightning node by shutting it down cleanly,
|
||||||
|
// then restarting the process. This function is fully blocking. Upon restart,
|
||||||
|
// the RPC connection to the node will be re-attempted, continuing iff the
|
||||||
|
// connection attempt is successful.
|
||||||
|
func (l *lightningNode) restart(errChan chan error) error {
|
||||||
|
if err := l.stop(); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
<-l.processExit
|
||||||
|
|
||||||
|
l.processExit = make(chan struct{})
|
||||||
|
|
||||||
|
return l.start(errChan)
|
||||||
|
}
|
||||||
|
|
||||||
// shutdown stops the active lnd process and clean up any temporary directories
|
// shutdown stops the active lnd process and clean up any temporary directories
|
||||||
// created along the way.
|
// created along the way.
|
||||||
func (l *lightningNode) shutdown() error {
|
func (l *lightningNode) shutdown() error {
|
||||||
@ -513,6 +536,18 @@ func (n *networkHarness) ConnectNodes(ctx context.Context, a, b *lightningNode)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RestartNode attempts to restart a lightning node by shutting it down
|
||||||
|
// cleanly, then restarting the process. This function is fully blocking. Upon
|
||||||
|
// restart, the RPC connection to the node will be re-attempted, continuing iff
|
||||||
|
// the connection attempt is successful.
|
||||||
|
//
|
||||||
|
// This method can be useful when testing edge cases such as a node broadcast
|
||||||
|
// and invalidated prior state, or persistent state recovery, simulating node
|
||||||
|
// crashes, etc.
|
||||||
|
func (n *networkHarness) RestartNode(node *lightningNode) error {
|
||||||
|
return node.restart(n.lndErrorChan)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO(roasbeef): add a WithChannel higher-order function?
|
// TODO(roasbeef): add a WithChannel higher-order function?
|
||||||
// * python-like context manager w.r.t using a channel within a test
|
// * python-like context manager w.r.t using a channel within a test
|
||||||
// * possibly adds more funds to the target wallet if the funds are not
|
// * possibly adds more funds to the target wallet if the funds are not
|
||||||
|
@ -1 +1,75 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
|
"github.com/roasbeef/btcd/rpctest"
|
||||||
|
"github.com/roasbeef/btcrpcclient"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO(roasbeef): randomize ports so can start multiple instance with each
|
||||||
|
// other, will need to catch up to rpctest upstream.
|
||||||
|
|
||||||
|
func TestNodeRestart(t *testing.T) {
|
||||||
|
// Create a new instance of the network harness in order to initialize the
|
||||||
|
// necessary state. We'll also need to create a temporary instance of
|
||||||
|
// rpctest due to initialization dependancies.
|
||||||
|
lndHarness, err := newNetworkHarness()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to create lightning network harness: %v", err)
|
||||||
|
}
|
||||||
|
defer lndHarness.TearDownAll()
|
||||||
|
|
||||||
|
handlers := &btcrpcclient.NotificationHandlers{
|
||||||
|
OnTxAccepted: lndHarness.OnTxAccepted,
|
||||||
|
}
|
||||||
|
|
||||||
|
btcdHarness, err := rpctest.New(harnessNetParams, handlers, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to create mining node: %v", err)
|
||||||
|
}
|
||||||
|
defer btcdHarness.TearDown()
|
||||||
|
if err := btcdHarness.SetUp(true, 50); err != nil {
|
||||||
|
t.Fatalf("unable to set up mining node: %v", err)
|
||||||
|
}
|
||||||
|
if err := btcdHarness.Node.NotifyNewTransactions(false); err != nil {
|
||||||
|
t.Fatalf("unable to request transaction notifications: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := lndHarness.InitializeSeedNodes(btcdHarness, nil); err != nil {
|
||||||
|
t.Fatalf("unable to initialize seed nodes: %v", err)
|
||||||
|
}
|
||||||
|
if err = lndHarness.SetUp(); err != nil {
|
||||||
|
t.Fatalf("unable to set up test lightning network: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// With the harness set up, we can test the node restart method,
|
||||||
|
// asserting all data is properly persisted and recovered.
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// First, we'll test restarting one of the initial seed nodes: Alice.
|
||||||
|
alice := lndHarness.Alice
|
||||||
|
aliceInfo, err := alice.GetInfo(ctx, &lnrpc.GetInfoRequest{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to query for alice's info: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// With alice's node information stored, attempt to restart the node.
|
||||||
|
if lndHarness.RestartNode(alice); err != nil {
|
||||||
|
t.Fatalf("unable to resart node: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query for alice's current information, it should be identical to
|
||||||
|
// what we received above.
|
||||||
|
aliceInfoRestart, err := alice.GetInfo(ctx, &lnrpc.GetInfoRequest{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unable to query for alice's info: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if aliceInfo.IdentityPubkey != aliceInfoRestart.IdentityPubkey {
|
||||||
|
t.Fatalf("node info after restart doesn't match: %v vs %v",
|
||||||
|
aliceInfo.IdentityPubkey, aliceInfoRestart.IdentityPubkey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user