lntest: use nextAvailablePort for miner and btcd backend

With the new btcd version we can specify our own listen address
generator function for any btcd nodes. This should reduce flakiness as
the previous way of getting a free port was based on just picking a
random number which lead to conflicts.
We also double the default values for connection retries and timeouts,
effectively waiting up to 4 seconds in total now.
This commit is contained in:
Oliver Gugger 2020-12-03 11:30:23 +01:00
parent 2edc3cf98f
commit 36756c012d
No known key found for this signature in database
GPG Key ID: 8E4256593F177720
3 changed files with 42 additions and 22 deletions

View File

@ -93,6 +93,14 @@ func NewBackend(miner string, netParams *chaincfg.Params) (
return nil, nil, fmt.Errorf("unable to create btcd node: %v", err)
}
// We want to overwrite some of the connection settings to make the
// tests more robust. We might need to restart the backend while there
// are already blocks present, which will take a bit longer than the
// 1 second the default settings amount to. Doubling both values will
// give us retries up to 4 seconds.
chainBackend.MaxConnRetries = rpctest.DefaultMaxConnectionRetries * 2
chainBackend.ConnectionRetryTimeout = rpctest.DefaultConnectionRetryTimeout * 2
if err := chainBackend.SetUp(false, 0); err != nil {
return nil, nil, fmt.Errorf("unable to set up btcd backend: %v", err)
}

View File

@ -14241,7 +14241,10 @@ func TestLightningNetworkDaemon(t *testing.T) {
testCases, trancheIndex, trancheOffset := getTestCaseSplitTranche()
lntest.ApplyPortOffset(uint32(trancheIndex) * 1000)
ht := newHarnessTest(t, nil)
// Before we start any node, we need to make sure that any btcd node
// that is started through the RPC harness uses a unique port as well to
// avoid any port collisions.
rpctest.ListenAddressGenerator = lntest.GenerateBtcdListenerAddresses
// Declare the network harness here to gain access to its
// 'OnTxAccepted' call back.
@ -14270,29 +14273,27 @@ func TestLightningNetworkDaemon(t *testing.T) {
chainBackend, cleanUp, err := lntest.NewBackend(
miner.P2PAddress(), harnessNetParams,
)
if err != nil {
ht.Fatalf("unable to start backend: %v", err)
}
require.NoError(t, err, "new backend")
defer func() {
require.NoError(
t, cleanUp(), "failed to clean up chain backend",
)
require.NoError(t, cleanUp(), "cleanup")
}()
if err := miner.SetUp(true, 50); err != nil {
ht.Fatalf("unable to set up mining node: %v", err)
}
if err := miner.Node.NotifyNewTransactions(false); err != nil {
ht.Fatalf("unable to request transaction notifications: %v", err)
}
// Before we start anything, we want to overwrite some of the connection
// settings to make the tests more robust. We might need to restart the
// miner while there are already blocks present, which will take a bit
// longer than the 1 second the default settings amount to. Doubling
// both values will give us retries up to 4 seconds.
miner.MaxConnRetries = rpctest.DefaultMaxConnectionRetries * 2
miner.ConnectionRetryTimeout = rpctest.DefaultConnectionRetryTimeout * 2
// Connect chainbackend to miner.
require.NoError(
t, chainBackend.ConnectMiner(), "failed to connect to miner",
)
// Set up miner and connect chain backend to it.
require.NoError(t, miner.SetUp(true, 50))
require.NoError(t, miner.Node.NotifyNewTransactions(false))
require.NoError(t, chainBackend.ConnectMiner(), "connect miner")
// Now we can set up our test harness (LND instance), with the chain
// backend we just created.
ht := newHarnessTest(t, nil)
binary := ht.getLndBinary()
lndHarness, err = lntest.NewNetworkHarness(
miner, chainBackend, binary, *useEtcd,

View File

@ -12,7 +12,6 @@ import (
"os"
"os/exec"
"path/filepath"
"strconv"
"sync"
"sync/atomic"
"time"
@ -54,6 +53,10 @@ const (
// trickleDelay is the amount of time in milliseconds between each
// release of announcements by AuthenticatedGossiper to the network.
trickleDelay = 50
// listenerFormat is the format string that is used to generate local
// listener addresses.
listenerFormat = "127.0.0.1:%d"
)
var (
@ -98,7 +101,7 @@ func nextAvailablePort() int {
// the harness node, in practice in CI servers this seems much
// less likely than simply some other process already being
// bound at the start of the tests.
addr := fmt.Sprintf("127.0.0.1:%d", port)
addr := fmt.Sprintf(listenerFormat, port)
l, err := net.Listen("tcp4", addr)
if err == nil {
err := l.Close()
@ -138,6 +141,14 @@ func GetBtcdBinary() string {
return ""
}
// GenerateBtcdListenerAddresses is a function that returns two listener
// addresses with unique ports and should be used to overwrite rpctest's default
// generator which is prone to use colliding ports.
func GenerateBtcdListenerAddresses() (string, string) {
return fmt.Sprintf(listenerFormat, nextAvailablePort()),
fmt.Sprintf(listenerFormat, nextAvailablePort())
}
// generateListeningPorts returns four ints representing ports to listen on
// designated for the current lightning network test. This returns the next
// available ports for the p2p, rpc, rest and profiling services.
@ -203,15 +214,15 @@ type NodeConfig struct {
}
func (cfg NodeConfig) P2PAddr() string {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.P2PPort))
return fmt.Sprintf(listenerFormat, cfg.P2PPort)
}
func (cfg NodeConfig) RPCAddr() string {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.RPCPort))
return fmt.Sprintf(listenerFormat, cfg.RPCPort)
}
func (cfg NodeConfig) RESTAddr() string {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.RESTPort))
return fmt.Sprintf(listenerFormat, cfg.RESTPort)
}
// DBDir returns the holding directory path of the graph database.