lntest: CloseChannel will wait for both nodes to show chan as active if not force
In this commit, we modify the CloseChannel to wait for both nodes to detect that channel as being active before we attempt to close it. This should serve to reduce many of the flakes that we’ve been seeing on travis which were caused by node A detecting the channel as active, but node B not, leading to a test flake under certain timing conditions. The new function uses the recently added WaitPredicate method.
This commit is contained in:
parent
c3f84b1a47
commit
6992822865
@ -642,32 +642,63 @@ func (n *NetworkHarness) CloseChannel(ctx context.Context,
|
|||||||
Index: cp.OutputIndex,
|
Index: cp.OutputIndex,
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we are not force closing the channel, wait for channel to become
|
// We'll wait for *both* nodes to read the channel as active if we're
|
||||||
// active before attempting to close it.
|
// performing a cooperative channel closure.
|
||||||
numTries := 10
|
if !force {
|
||||||
CheckActive:
|
timeout := time.Second * 15
|
||||||
for i := 0; !force && i < numTries; i++ {
|
|
||||||
listReq := &lnrpc.ListChannelsRequest{}
|
listReq := &lnrpc.ListChannelsRequest{}
|
||||||
listResp, err := lnNode.ListChannels(ctx, listReq)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("unable fetch node's "+
|
|
||||||
"channels: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, c := range listResp.Channels {
|
// We define two helper functions, one two locate a particular
|
||||||
if c.ChannelPoint == chanPoint.String() && c.Active {
|
// channel, and the other to check if a channel is active or
|
||||||
break CheckActive
|
// not.
|
||||||
|
filterChannel := func(node *HarnessNode,
|
||||||
|
op wire.OutPoint) (*lnrpc.ActiveChannel, error) {
|
||||||
|
listResp, err := node.ListChannels(ctx, listReq)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, c := range listResp.Channels {
|
||||||
|
if c.ChannelPoint == op.String() {
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("unable to find channel")
|
||||||
|
}
|
||||||
|
activeChanPredicate := func(node *HarnessNode) func() bool {
|
||||||
|
return func() bool {
|
||||||
|
channel, err := filterChannel(node, chanPoint)
|
||||||
|
if err != nil {
|
||||||
|
}
|
||||||
|
|
||||||
|
return channel.Active
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if i == numTries-1 {
|
// Next, we'll fetch the target channel in order to get the
|
||||||
// Last iteration, and channel is still not active.
|
// harness node that'll be receiving the channel close request.
|
||||||
return nil, nil, fmt.Errorf("channel did not become " +
|
targetChan, err := filterChannel(lnNode, chanPoint)
|
||||||
"active")
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
receivingNode, err := n.LookUpNodeByPub(targetChan.RemotePubkey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sleep, and try again.
|
// Before proceeding, we'll ensure that the channel is active
|
||||||
time.Sleep(300 * time.Millisecond)
|
// for both nodes.
|
||||||
|
err = WaitPredicate(activeChanPredicate(lnNode), timeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("channel of closing " +
|
||||||
|
"node not active in time")
|
||||||
|
}
|
||||||
|
err = WaitPredicate(activeChanPredicate(receivingNode), timeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("channel of receiving " +
|
||||||
|
"node not active in time")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
closeReq := &lnrpc.CloseChannelRequest{
|
closeReq := &lnrpc.CloseChannelRequest{
|
||||||
|
Loading…
Reference in New Issue
Block a user