Merge pull request #4655 from guggero/itest-parallel
Travis: run itests in parallel, get ~60% speedup overall
This commit is contained in:
commit
0552ba93c8
2
.gitignore
vendored
2
.gitignore
vendored
@ -36,6 +36,8 @@ lntest/itest/output*.log
|
||||
lntest/itest/pprof*.log
|
||||
lntest/itest/.backendlogs
|
||||
lntest/itest/.minerlogs
|
||||
lntest/itest/lnd-itest
|
||||
lntest/itest/.logs-*
|
||||
|
||||
cmd/cmd
|
||||
*.key
|
||||
|
21
.travis.yml
21
.travis.yml
@ -50,32 +50,30 @@ jobs:
|
||||
- stage: Integration Test
|
||||
name: Btcd Integration
|
||||
script:
|
||||
- make itest
|
||||
- make itest-parallel
|
||||
|
||||
- name: Bitcoind Integration (txindex enabled)
|
||||
script:
|
||||
- bash ./scripts/install_bitcoind.sh
|
||||
- make itest backend=bitcoind
|
||||
- make itest-parallel backend=bitcoind
|
||||
|
||||
- name: Bitcoind Integration (txindex disabled)
|
||||
script:
|
||||
- bash ./scripts/install_bitcoind.sh
|
||||
- make itest backend="bitcoind notxindex"
|
||||
- make itest-parallel backend="bitcoind notxindex"
|
||||
|
||||
- name: Neutrino Integration
|
||||
script:
|
||||
- make itest backend=neutrino
|
||||
- make itest-parallel backend=neutrino
|
||||
|
||||
- name: Btcd Integration ARM
|
||||
script:
|
||||
- GOARM=7 GOARCH=arm GOOS=linux CGO_ENABLED=0 make btcd build-itest
|
||||
- file lnd-itest
|
||||
- GOARM=7 GOARCH=arm GOOS=linux CGO_ENABLED=0 make itest-only
|
||||
- GOARM=7 GOARCH=arm GOOS=linux make itest-parallel
|
||||
arch: arm64
|
||||
|
||||
- name: Btcd Integration Windows
|
||||
script:
|
||||
- make itest-windows
|
||||
- make itest-parallel-windows
|
||||
os: windows
|
||||
before_install:
|
||||
- choco upgrade --no-progress -y make netcat curl findutils
|
||||
@ -85,7 +83,8 @@ jobs:
|
||||
case $TRAVIS_OS_NAME in
|
||||
windows)
|
||||
echo "Uploading to termbin.com..."
|
||||
for f in ./lntest/itest/*.log; do cat $f | nc termbin.com 9999 | xargs -r0 printf "$f"' uploaded to %s'; done
|
||||
LOG_FILES=$(find ./lntest/itest -name '*.log')
|
||||
for f in $LOG_FILES; do echo -n $f; cat $f | nc termbin.com 9999 | xargs -r0 printf ' uploaded to %s'; done
|
||||
;;
|
||||
esac
|
||||
|
||||
@ -97,8 +96,8 @@ after_failure:
|
||||
;;
|
||||
|
||||
*)
|
||||
LOG_FILES=./lntest/itest/*.log
|
||||
echo "Uploading to termbin.com..." && find $LOG_FILES | xargs -I{} sh -c "cat {} | nc termbin.com 9999 | xargs -r0 printf '{} uploaded to %s'"
|
||||
LOG_FILES=$(find ./lntest/itest -name '*.log')
|
||||
echo "Uploading to termbin.com..." && for f in $LOG_FILES; do echo -n $f; cat $f | nc termbin.com 9999 | xargs -r0 printf ' uploaded to %s'; done
|
||||
echo "Uploading to file.io..." && tar -zcvO $LOG_FILES | curl -s -F 'file=@-;filename=logs.tar.gz' https://file.io | xargs -r0 printf 'logs.tar.gz uploaded to %s\n'
|
||||
;;
|
||||
esac
|
||||
|
21
Makefile
21
Makefile
@ -175,6 +175,27 @@ itest-only:
|
||||
|
||||
itest: btcd build-itest itest-only
|
||||
|
||||
itest-parallel: btcd
|
||||
@$(call print, "Building lnd binary")
|
||||
CGO_ENABLED=0 $(GOBUILD) -tags="$(ITEST_TAGS)" -o lntest/itest/lnd-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lnd
|
||||
|
||||
@$(call print, "Building itest binary for $(backend) backend")
|
||||
CGO_ENABLED=0 $(GOTEST) -v ./lntest/itest -tags="$(DEV_TAGS) $(RPC_TAGS) rpctest $(backend)" -logoutput -goroutinedump -c -o lntest/itest/itest.test
|
||||
|
||||
@$(call print, "Running tests")
|
||||
rm -rf lntest/itest/*.log lntest/itest/.logs-*
|
||||
echo -n "$$(seq 0 $$(expr $(NUM_ITEST_TRANCHES) - 1))" | xargs -P $(NUM_ITEST_TRANCHES) -n 1 -I {} scripts/itest_part.sh {} $(NUM_ITEST_TRANCHES) $(TEST_FLAGS)
|
||||
|
||||
itest-parallel-windows: btcd
|
||||
@$(call print, "Building lnd binary")
|
||||
CGO_ENABLED=0 $(GOBUILD) -tags="$(ITEST_TAGS)" -o lntest/itest/lnd-itest.exe $(ITEST_LDFLAGS) $(PKG)/cmd/lnd
|
||||
|
||||
@$(call print, "Building itest binary for $(backend) backend")
|
||||
CGO_ENABLED=0 $(GOTEST) -v ./lntest/itest -tags="$(DEV_TAGS) $(RPC_TAGS) rpctest $(backend)" -logoutput -goroutinedump -c -o lntest/itest/itest.test.exe
|
||||
|
||||
@$(call print, "Running tests")
|
||||
EXEC_SUFFIX=".exe" echo -n "$$(seq 0 $$(expr $(NUM_ITEST_TRANCHES) - 1))" | xargs -P $(NUM_ITEST_TRANCHES) -n 1 -I {} scripts/itest_part.sh {} $(NUM_ITEST_TRANCHES) $(TEST_FLAGS)
|
||||
|
||||
itest-windows: btcd build-itest-windows itest-only
|
||||
|
||||
unit: btcd
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@ -16,8 +15,8 @@ import (
|
||||
"github.com/btcsuite/btcd/rpcclient"
|
||||
)
|
||||
|
||||
// logDir is the name of the temporary log directory.
|
||||
const logDir = "./.backendlogs"
|
||||
// logDirPattern is the pattern of the name of the temporary log directory.
|
||||
const logDirPattern = "%s/.backendlogs"
|
||||
|
||||
// BitcoindBackendConfig is an implementation of the BackendConfig interface
|
||||
// backed by a Bitcoind node.
|
||||
@ -74,15 +73,16 @@ func (b BitcoindBackendConfig) Name() string {
|
||||
func newBackend(miner string, netParams *chaincfg.Params, extraArgs []string) (
|
||||
*BitcoindBackendConfig, func() error, error) {
|
||||
|
||||
baseLogDir := fmt.Sprintf(logDirPattern, GetLogDir())
|
||||
if netParams != &chaincfg.RegressionNetParams {
|
||||
return nil, nil, fmt.Errorf("only regtest supported")
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(logDir, 0700); err != nil {
|
||||
if err := os.MkdirAll(baseLogDir, 0700); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
logFile, err := filepath.Abs(logDir + "/bitcoind.log")
|
||||
logFile, err := filepath.Abs(baseLogDir + "/bitcoind.log")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -93,10 +93,10 @@ func newBackend(miner string, netParams *chaincfg.Params, extraArgs []string) (
|
||||
fmt.Errorf("unable to create temp directory: %v", err)
|
||||
}
|
||||
|
||||
zmqBlockPath := "ipc:///" + tempBitcoindDir + "/blocks.socket"
|
||||
zmqTxPath := "ipc:///" + tempBitcoindDir + "/txs.socket"
|
||||
rpcPort := rand.Int()%(65536-1024) + 1024
|
||||
p2pPort := rand.Int()%(65536-1024) + 1024
|
||||
zmqBlockAddr := fmt.Sprintf("tcp://127.0.0.1:%d", nextAvailablePort())
|
||||
zmqTxAddr := fmt.Sprintf("tcp://127.0.0.1:%d", nextAvailablePort())
|
||||
rpcPort := nextAvailablePort()
|
||||
p2pPort := nextAvailablePort()
|
||||
|
||||
cmdArgs := []string{
|
||||
"-datadir=" + tempBitcoindDir,
|
||||
@ -106,8 +106,8 @@ func newBackend(miner string, netParams *chaincfg.Params, extraArgs []string) (
|
||||
"220110063096c221be9933c82d38e1",
|
||||
fmt.Sprintf("-rpcport=%d", rpcPort),
|
||||
fmt.Sprintf("-port=%d", p2pPort),
|
||||
"-zmqpubrawblock=" + zmqBlockPath,
|
||||
"-zmqpubrawtx=" + zmqTxPath,
|
||||
"-zmqpubrawblock=" + zmqBlockAddr,
|
||||
"-zmqpubrawtx=" + zmqTxAddr,
|
||||
"-debuglogfile=" + logFile,
|
||||
}
|
||||
cmdArgs = append(cmdArgs, extraArgs...)
|
||||
@ -129,13 +129,16 @@ func newBackend(miner string, netParams *chaincfg.Params, extraArgs []string) (
|
||||
var errStr string
|
||||
// After shutting down the chain backend, we'll make a copy of
|
||||
// the log file before deleting the temporary log dir.
|
||||
err := CopyFile("./output_bitcoind_chainbackend.log", logFile)
|
||||
logDestination := fmt.Sprintf(
|
||||
"%s/output_bitcoind_chainbackend.log", GetLogDir(),
|
||||
)
|
||||
err := CopyFile(logDestination, logFile)
|
||||
if err != nil {
|
||||
errStr += fmt.Sprintf("unable to copy file: %v\n", err)
|
||||
}
|
||||
if err = os.RemoveAll(logDir); err != nil {
|
||||
if err = os.RemoveAll(baseLogDir); err != nil {
|
||||
errStr += fmt.Sprintf(
|
||||
"cannot remove dir %s: %v\n", logDir, err,
|
||||
"cannot remove dir %s: %v\n", baseLogDir, err,
|
||||
)
|
||||
}
|
||||
if err := os.RemoveAll(tempBitcoindDir); err != nil {
|
||||
@ -178,8 +181,8 @@ func newBackend(miner string, netParams *chaincfg.Params, extraArgs []string) (
|
||||
rpcHost: rpcHost,
|
||||
rpcUser: rpcUser,
|
||||
rpcPass: rpcPass,
|
||||
zmqBlockPath: zmqBlockPath,
|
||||
zmqTxPath: zmqTxPath,
|
||||
zmqBlockPath: zmqBlockAddr,
|
||||
zmqTxPath: zmqTxAddr,
|
||||
p2pPort: p2pPort,
|
||||
rpcClient: client,
|
||||
minerAddr: miner,
|
||||
|
@ -14,8 +14,8 @@ import (
|
||||
"github.com/btcsuite/btcd/rpcclient"
|
||||
)
|
||||
|
||||
// logDir is the name of the temporary log directory.
|
||||
const logDir = "./.backendlogs"
|
||||
// logDirPattern is the pattern of the name of the temporary log directory.
|
||||
const logDirPattern = "%s/.backendlogs"
|
||||
|
||||
// temp is used to signal we want to establish a temporary connection using the
|
||||
// btcd Node API.
|
||||
@ -75,12 +75,13 @@ func (b BtcdBackendConfig) Name() string {
|
||||
func NewBackend(miner string, netParams *chaincfg.Params) (
|
||||
*BtcdBackendConfig, func() error, error) {
|
||||
|
||||
baseLogDir := fmt.Sprintf(logDirPattern, GetLogDir())
|
||||
args := []string{
|
||||
"--rejectnonstd",
|
||||
"--txindex",
|
||||
"--trickleinterval=100ms",
|
||||
"--debuglevel=debug",
|
||||
"--logdir=" + logDir,
|
||||
"--logdir=" + baseLogDir,
|
||||
"--nowinservice",
|
||||
// The miner will get banned and disconnected from the node if
|
||||
// its requested data are not found. We add a nobanning flag to
|
||||
@ -110,14 +111,17 @@ func NewBackend(miner string, netParams *chaincfg.Params) (
|
||||
|
||||
// After shutting down the chain backend, we'll make a copy of
|
||||
// the log file before deleting the temporary log dir.
|
||||
logFile := logDir + "/" + netParams.Name + "/btcd.log"
|
||||
err := CopyFile("./output_btcd_chainbackend.log", logFile)
|
||||
logFile := baseLogDir + "/" + netParams.Name + "/btcd.log"
|
||||
logDestination := fmt.Sprintf(
|
||||
"%s/output_btcd_chainbackend.log", GetLogDir(),
|
||||
)
|
||||
err := CopyFile(logDestination, logFile)
|
||||
if err != nil {
|
||||
errStr += fmt.Sprintf("unable to copy file: %v\n", err)
|
||||
}
|
||||
if err = os.RemoveAll(logDir); err != nil {
|
||||
if err = os.RemoveAll(baseLogDir); err != nil {
|
||||
errStr += fmt.Sprintf(
|
||||
"cannot remove dir %s: %v\n", logDir, err,
|
||||
"cannot remove dir %s: %v\n", baseLogDir, err,
|
||||
)
|
||||
}
|
||||
if errStr != "" {
|
||||
|
@ -16,9 +16,6 @@ const (
|
||||
// is returned. Requests for higher confirmation targets will fall back
|
||||
// to this.
|
||||
feeServiceTarget = 2
|
||||
|
||||
// feeServicePort is the tcp port on which the service runs.
|
||||
feeServicePort = 16534
|
||||
)
|
||||
|
||||
// feeService runs a web service that provides fee estimation information.
|
||||
@ -40,16 +37,15 @@ type feeEstimates struct {
|
||||
|
||||
// startFeeService spins up a go-routine to serve fee estimates.
|
||||
func startFeeService() *feeService {
|
||||
port := nextAvailablePort()
|
||||
f := feeService{
|
||||
url: fmt.Sprintf(
|
||||
"http://localhost:%v/fee-estimates.json", feeServicePort,
|
||||
),
|
||||
url: fmt.Sprintf("http://localhost:%v/fee-estimates.json", port),
|
||||
}
|
||||
|
||||
// Initialize default fee estimate.
|
||||
f.Fees = map[uint32]uint32{feeServiceTarget: 50000}
|
||||
|
||||
listenAddr := fmt.Sprintf(":%v", feeServicePort)
|
||||
listenAddr := fmt.Sprintf(":%v", port)
|
||||
f.srv = &http.Server{
|
||||
Addr: listenAddr,
|
||||
}
|
||||
|
@ -1012,6 +1012,10 @@ func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness,
|
||||
require.Contains(t.t, err.Error(), "cannot close channel with state: ")
|
||||
require.Contains(t.t, err.Error(), "ChanStatusRestored")
|
||||
|
||||
// Increase the fee estimate so that the following force close tx will
|
||||
// be cpfp'ed in case of anchor commitments.
|
||||
net.SetFeeEstimate(30000)
|
||||
|
||||
// Now that we have ensured that the channels restored by the backup are
|
||||
// in the correct state even without the remote peer telling us so,
|
||||
// let's start up Carol again.
|
||||
|
@ -101,15 +101,17 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
||||
// bob will attempt to redeem his anchor commitment (if the channel
|
||||
// type is of that type).
|
||||
if c == commitTypeAnchors {
|
||||
_, err = waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout)
|
||||
_, err = waitForNTxsInMempool(
|
||||
net.Miner.Node, 1, minerMempoolTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find bob's anchor commit sweep: %v", err)
|
||||
|
||||
t.Fatalf("unable to find bob's anchor commit sweep: %v",
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
// Mine enough blocks for Alice to sweep her funds from the force
|
||||
// closed channel. closeCHannelAndAssertType() already mined a block
|
||||
// closed channel. closeChannelAndAssertType() already mined a block
|
||||
// containing the commitment tx and the commit sweep tx will be
|
||||
// broadcast immediately before it can be included in a block, so mine
|
||||
// one less than defaultCSV in order to perform mempool assertions.
|
||||
|
@ -6,6 +6,7 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@ -13,7 +14,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@ -53,6 +53,60 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultSplitTranches is the default number of tranches we split the
|
||||
// test cases into.
|
||||
defaultSplitTranches uint = 1
|
||||
|
||||
// defaultRunTranche is the default index of the test cases tranche that
|
||||
// we run.
|
||||
defaultRunTranche uint = 0
|
||||
)
|
||||
|
||||
var (
|
||||
// testCasesSplitParts is the number of tranches the test cases should
|
||||
// be split into. By default this is set to 1, so no splitting happens.
|
||||
// If this value is increased, then the -runtranche flag must be
|
||||
// specified as well to indicate which part should be run in the current
|
||||
// invocation.
|
||||
testCasesSplitTranches = flag.Uint(
|
||||
"splittranches", defaultSplitTranches, "split the test cases "+
|
||||
"in this many tranches and run the tranche at "+
|
||||
"0-based index specified by the -runtranche flag",
|
||||
)
|
||||
|
||||
// testCasesRunTranche is the 0-based index of the split test cases
|
||||
// tranche to run in the current invocation.
|
||||
testCasesRunTranche = flag.Uint(
|
||||
"runtranche", defaultRunTranche, "run the tranche of the "+
|
||||
"split test cases with the given (0-based) index",
|
||||
)
|
||||
)
|
||||
|
||||
// getTestCaseSplitTranche returns the sub slice of the test cases that should
|
||||
// be run as the current split tranche as well as the index and slice offset of
|
||||
// the tranche.
|
||||
func getTestCaseSplitTranche() ([]*testCase, uint, uint) {
|
||||
numTranches := defaultSplitTranches
|
||||
if testCasesSplitTranches != nil {
|
||||
numTranches = *testCasesSplitTranches
|
||||
}
|
||||
runTranche := defaultRunTranche
|
||||
if testCasesRunTranche != nil {
|
||||
runTranche = *testCasesRunTranche
|
||||
}
|
||||
|
||||
numCases := uint(len(allTestCases))
|
||||
testsPerTranche := numCases / numTranches
|
||||
trancheOffset := runTranche * testsPerTranche
|
||||
trancheEnd := trancheOffset + testsPerTranche
|
||||
if trancheEnd > numCases || runTranche == numTranches-1 {
|
||||
trancheEnd = numCases
|
||||
}
|
||||
|
||||
return allTestCases[trancheOffset:trancheEnd], runTranche, trancheOffset
|
||||
}
|
||||
|
||||
func rpcPointToWirePoint(t *harnessTest, chanPoint *lnrpc.ChannelPoint) wire.OutPoint {
|
||||
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
|
||||
if err != nil {
|
||||
@ -2380,7 +2434,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
)
|
||||
|
||||
// Set up a new miner that we can use to cause a reorg.
|
||||
tempLogDir := "./.tempminerlogs"
|
||||
tempLogDir := fmt.Sprintf("%s/.tempminerlogs", lntest.GetLogDir())
|
||||
logFilename := "output-open_channel_reorg-temp_miner.log"
|
||||
tempMiner, tempMinerCleanUp, err := lntest.NewMiner(
|
||||
tempLogDir, logFilename,
|
||||
@ -14098,10 +14152,16 @@ func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) (
|
||||
// programmatically driven network of lnd nodes.
|
||||
func TestLightningNetworkDaemon(t *testing.T) {
|
||||
// If no tests are registered, then we can exit early.
|
||||
if len(testsCases) == 0 {
|
||||
if len(allTestCases) == 0 {
|
||||
t.Skip("integration tests not selected with flag 'rpctest'")
|
||||
}
|
||||
|
||||
// Parse testing flags that influence our test execution.
|
||||
logDir := lntest.GetLogDir()
|
||||
require.NoError(t, os.MkdirAll(logDir, 0700))
|
||||
testCases, trancheIndex, trancheOffset := getTestCaseSplitTranche()
|
||||
lntest.ApplyPortOffset(uint32(trancheIndex) * 1000)
|
||||
|
||||
ht := newHarnessTest(t, nil)
|
||||
|
||||
// Declare the network harness here to gain access to its
|
||||
@ -14117,7 +14177,7 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
||||
// guarantees of getting included in to blocks.
|
||||
//
|
||||
// We will also connect it to our chain backend.
|
||||
minerLogDir := "./.minerlogs"
|
||||
minerLogDir := fmt.Sprintf("%s/.minerlogs", logDir)
|
||||
miner, minerCleanUp, err := lntest.NewMiner(
|
||||
minerLogDir, "output_btcd_miner.log",
|
||||
harnessNetParams, &rpcclient.NotificationHandlers{},
|
||||
@ -14149,27 +14209,12 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
||||
|
||||
// Connect chainbackend to miner.
|
||||
require.NoError(
|
||||
t, chainBackend.ConnectMiner(),
|
||||
"failed to connect to miner",
|
||||
t, chainBackend.ConnectMiner(), "failed to connect to miner",
|
||||
)
|
||||
|
||||
binary := itestLndBinary
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows (even in a bash like environment like git bash as on
|
||||
// Travis) doesn't seem to like relative paths to exe files...
|
||||
currentDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
ht.Fatalf("unable to get working directory: %v", err)
|
||||
}
|
||||
targetPath := filepath.Join(currentDir, "../../lnd-itest.exe")
|
||||
binary, err = filepath.Abs(targetPath)
|
||||
if err != nil {
|
||||
ht.Fatalf("unable to get absolute path: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Now we can set up our test harness (LND instance), with the chain
|
||||
// backend we just created.
|
||||
binary := ht.getLndBinary()
|
||||
lndHarness, err = lntest.NewNetworkHarness(miner, chainBackend, binary)
|
||||
if err != nil {
|
||||
ht.Fatalf("unable to create lightning network harness: %v", err)
|
||||
@ -14187,7 +14232,8 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
ht.Logf("lnd finished with error (stderr):\n%v", err)
|
||||
ht.Logf("lnd finished with error (stderr):\n%v",
|
||||
err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -14210,8 +14256,9 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
||||
ht.Fatalf("unable to set up test lightning network: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Running %v integration tests", len(testsCases))
|
||||
for _, testCase := range testsCases {
|
||||
// Run the subset of the test cases selected in this tranche.
|
||||
for idx, testCase := range testCases {
|
||||
testCase := testCase
|
||||
logLine := fmt.Sprintf("STARTING ============ %v ============\n",
|
||||
testCase.name)
|
||||
|
||||
@ -14232,7 +14279,10 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
||||
// Start every test with the default static fee estimate.
|
||||
lndHarness.SetFeeEstimate(12500)
|
||||
|
||||
success := t.Run(testCase.name, func(t1 *testing.T) {
|
||||
name := fmt.Sprintf("%02d-of-%d/%s/%s",
|
||||
trancheOffset+uint(idx)+1, len(allTestCases),
|
||||
chainBackend.Name(), testCase.name)
|
||||
success := t.Run(name, func(t1 *testing.T) {
|
||||
ht := newHarnessTest(t1, lndHarness)
|
||||
ht.RunTestCase(testCase)
|
||||
})
|
||||
@ -14242,8 +14292,9 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
||||
if !success {
|
||||
// Log failure time to help relate the lnd logs to the
|
||||
// failure.
|
||||
t.Logf("Failure time: %v",
|
||||
time.Now().Format("2006-01-02 15:04:05.000"))
|
||||
t.Logf("Failure time: %v", time.Now().Format(
|
||||
"2006-01-02 15:04:05.000",
|
||||
))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -2,4 +2,4 @@
|
||||
|
||||
package itest
|
||||
|
||||
var testsCases = []*testCase{}
|
||||
var allTestCases = []*testCase{}
|
||||
|
@ -2,7 +2,11 @@
|
||||
|
||||
package itest
|
||||
|
||||
var testsCases = []*testCase{
|
||||
var allTestCases = []*testCase{
|
||||
{
|
||||
name: "test multi-hop htlc",
|
||||
test: testMultiHopHtlcClaims,
|
||||
},
|
||||
{
|
||||
name: "sweep coins",
|
||||
test: testSweepAllCoins,
|
||||
@ -144,10 +148,6 @@ var testsCases = []*testCase{
|
||||
name: "async bidirectional payments",
|
||||
test: testBidirectionalAsyncPayments,
|
||||
},
|
||||
{
|
||||
name: "test multi-hop htlc",
|
||||
test: testMultiHopHtlcClaims,
|
||||
},
|
||||
{
|
||||
name: "switch circuit persistence",
|
||||
test: testSwitchCircuitPersistence,
|
||||
|
@ -3,8 +3,12 @@ package itest
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -20,6 +24,11 @@ import (
|
||||
|
||||
var (
|
||||
harnessNetParams = &chaincfg.RegressionNetParams
|
||||
|
||||
// lndExecutable is the full path to the lnd binary.
|
||||
lndExecutable = flag.String(
|
||||
"lndexec", itestLndBinary, "full path to lnd binary",
|
||||
)
|
||||
)
|
||||
|
||||
const (
|
||||
@ -111,6 +120,31 @@ func (h *harnessTest) Log(args ...interface{}) {
|
||||
h.t.Log(args...)
|
||||
}
|
||||
|
||||
func (h *harnessTest) getLndBinary() string {
|
||||
binary := itestLndBinary
|
||||
lndExec := ""
|
||||
if lndExecutable != nil && *lndExecutable != "" {
|
||||
lndExec = *lndExecutable
|
||||
}
|
||||
if lndExec == "" && runtime.GOOS == "windows" {
|
||||
// Windows (even in a bash like environment like git bash as on
|
||||
// Travis) doesn't seem to like relative paths to exe files...
|
||||
currentDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
h.Fatalf("unable to get working directory: %v", err)
|
||||
}
|
||||
targetPath := filepath.Join(currentDir, "../../lnd-itest.exe")
|
||||
binary, err = filepath.Abs(targetPath)
|
||||
if err != nil {
|
||||
h.Fatalf("unable to get absolute path: %v", err)
|
||||
}
|
||||
} else if lndExec != "" {
|
||||
binary = lndExec
|
||||
}
|
||||
|
||||
return binary
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
name string
|
||||
test func(net *lntest.NetworkHarness, t *harnessTest)
|
||||
|
@ -43,7 +43,7 @@ const (
|
||||
// defaultNodePort is the start of the range for listening ports of
|
||||
// harness nodes. Ports are monotonically increasing starting from this
|
||||
// number and are determined by the results of nextAvailablePort().
|
||||
defaultNodePort = 19555
|
||||
defaultNodePort = 5555
|
||||
|
||||
// logPubKeyBytes is the number of bytes of the node's PubKey that will
|
||||
// be appended to the log file name. The whole PubKey is too long and
|
||||
@ -70,6 +70,10 @@ var (
|
||||
logOutput = flag.Bool("logoutput", false,
|
||||
"log output from node n to file output-n.log")
|
||||
|
||||
// logSubDir is the default directory where the logs are written to if
|
||||
// logOutput is true.
|
||||
logSubDir = flag.String("logdir", ".", "default dir to write logs to")
|
||||
|
||||
// goroutineDump is a flag that can be set to dump the active
|
||||
// goroutines of test nodes on failure.
|
||||
goroutineDump = flag.Bool("goroutinedump", false,
|
||||
@ -104,6 +108,21 @@ func nextAvailablePort() int {
|
||||
panic("no ports available for listening")
|
||||
}
|
||||
|
||||
// ApplyPortOffset adds the given offset to the lastPort variable, making it
|
||||
// possible to run the tests in parallel without colliding on the same ports.
|
||||
func ApplyPortOffset(offset uint32) {
|
||||
_ = atomic.AddUint32(&lastPort, offset)
|
||||
}
|
||||
|
||||
// GetLogDir returns the passed --logdir flag or the default value if it wasn't
|
||||
// set.
|
||||
func GetLogDir() string {
|
||||
if logSubDir != nil && *logSubDir != "" {
|
||||
return *logSubDir
|
||||
}
|
||||
return "."
|
||||
}
|
||||
|
||||
// generateListeningPorts returns four ints representing ports to listen on
|
||||
// designated for the current lightning network test. This returns the next
|
||||
// available ports for the p2p, rpc, rest and profiling services.
|
||||
@ -386,11 +405,9 @@ func NewMiner(logDir, logFilename string, netParams *chaincfg.Params,
|
||||
|
||||
// After shutting down the miner, we'll make a copy of the log
|
||||
// file before deleting the temporary log dir.
|
||||
logFile := fmt.Sprintf(
|
||||
"%s/%s/btcd.log", logDir, netParams.Name,
|
||||
)
|
||||
copyPath := fmt.Sprintf("./%s", logFilename)
|
||||
err := CopyFile(copyPath, logFile)
|
||||
logFile := fmt.Sprintf("%s/%s/btcd.log", logDir, netParams.Name)
|
||||
copyPath := fmt.Sprintf("%s/../%s", logDir, logFilename)
|
||||
err := CopyFile(filepath.Clean(copyPath), logFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to copy file: %v", err)
|
||||
}
|
||||
@ -475,24 +492,28 @@ func (hn *HarnessNode) start(lndBinary string, lndError chan<- error) error {
|
||||
// If the logoutput flag is passed, redirect output from the nodes to
|
||||
// log files.
|
||||
if *logOutput {
|
||||
fileName := fmt.Sprintf("output-%d-%s-%s.log", hn.NodeID,
|
||||
dir := GetLogDir()
|
||||
fileName := fmt.Sprintf("%s/output-%d-%s-%s.log", dir, hn.NodeID,
|
||||
hn.Cfg.Name, hex.EncodeToString(hn.PubKey[:logPubKeyBytes]))
|
||||
|
||||
// If the node's PubKey is not yet initialized, create a temporary
|
||||
// file name. Later, after the PubKey has been initialized, the
|
||||
// file can be moved to its final name with the PubKey included.
|
||||
if bytes.Equal(hn.PubKey[:4], []byte{0, 0, 0, 0}) {
|
||||
fileName = fmt.Sprintf("output-%d-%s-tmp__.log", hn.NodeID,
|
||||
hn.Cfg.Name)
|
||||
fileName = fmt.Sprintf("%s/output-%d-%s-tmp__.log",
|
||||
dir, hn.NodeID, hn.Cfg.Name)
|
||||
|
||||
// Once the node has done its work, the log file can be renamed.
|
||||
finalizeLogfile = func() {
|
||||
if hn.logFile != nil {
|
||||
hn.logFile.Close()
|
||||
|
||||
newFileName := fmt.Sprintf("output-%d-%s-%s.log",
|
||||
hn.NodeID, hn.Cfg.Name,
|
||||
hex.EncodeToString(hn.PubKey[:logPubKeyBytes]))
|
||||
pubKeyHex := hex.EncodeToString(
|
||||
hn.PubKey[:logPubKeyBytes],
|
||||
)
|
||||
newFileName := fmt.Sprintf("%s/output"+
|
||||
"-%d-%s-%s.log", dir, hn.NodeID,
|
||||
hn.Cfg.Name, pubKeyHex)
|
||||
err := os.Rename(fileName, newFileName)
|
||||
if err != nil {
|
||||
fmt.Printf("could not rename "+
|
||||
|
@ -3,12 +3,18 @@ RPC_TAGS = autopilotrpc chainrpc invoicesrpc routerrpc signrpc verrpc walletrpc
|
||||
LOG_TAGS =
|
||||
TEST_FLAGS =
|
||||
COVER_PKG = $$(go list -deps ./... | grep '$(PKG)' | grep -v lnrpc)
|
||||
NUM_ITEST_TRANCHES = 6
|
||||
|
||||
# If rpc option is set also add all extra RPC tags to DEV_TAGS
|
||||
ifneq ($(with-rpc),)
|
||||
DEV_TAGS += $(RPC_TAGS)
|
||||
endif
|
||||
|
||||
# Scale the number of parallel running itest tranches.
|
||||
ifneq ($(tranches),)
|
||||
NUM_ITEST_TRANCHES = $(tranches)
|
||||
endif
|
||||
|
||||
# If specific package is being unit tested, construct the full name of the
|
||||
# subpackage.
|
||||
ifneq ($(pkg),)
|
||||
@ -25,7 +31,7 @@ endif
|
||||
|
||||
# Define the integration test.run filter if the icase argument was provided.
|
||||
ifneq ($(icase),)
|
||||
TEST_FLAGS += -test.run=TestLightningNetworkDaemon/$(icase)
|
||||
TEST_FLAGS += -test.run="TestLightningNetworkDaemon/.*-of-.*/.*/$(icase)"
|
||||
endif
|
||||
|
||||
ifneq ($(tags),)
|
||||
|
23
scripts/itest_part.sh
Executable file
23
scripts/itest_part.sh
Executable file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Let's work with absolute paths only, we run in the itest directory itself.
|
||||
WORKDIR=$(pwd)/lntest/itest
|
||||
|
||||
TRANCHE=$1
|
||||
NUM_TRANCHES=$2
|
||||
|
||||
# Shift the passed parameters by two, giving us all remaining testing flags in
|
||||
# the $@ special variable.
|
||||
shift
|
||||
shift
|
||||
|
||||
# Windows insists on having the .exe suffix for an executable, we need to add
|
||||
# that here if necessary.
|
||||
EXEC="$WORKDIR"/itest.test"$EXEC_SUFFIX"
|
||||
LND_EXEC="$WORKDIR"/lnd-itest"$EXEC_SUFFIX"
|
||||
echo $EXEC -test.v "$@" -logoutput -goroutinedump -logdir=.logs-tranche$TRANCHE -lndexec=$LND_EXEC -splittranches=$NUM_TRANCHES -runtranche=$TRANCHE
|
||||
|
||||
# Exit code 255 causes the parallel jobs to abort, so if one part fails the
|
||||
# other is aborted too.
|
||||
cd "$WORKDIR" || exit 255
|
||||
$EXEC -test.v "$@" -logoutput -goroutinedump -logdir=.logs-tranche$TRANCHE -lndexec=$LND_EXEC -splittranches=$NUM_TRANCHES -runtranche=$TRANCHE || exit 255
|
Loading…
Reference in New Issue
Block a user