Merge pull request #4402 from bhandras/etcd_itest
itests: option to run our integration tests on etcd + boltdb (remote/local)
This commit is contained in:
commit
a8b6966017
@ -30,6 +30,8 @@ type BoltConfig struct {
|
|||||||
|
|
||||||
// EtcdConfig holds etcd configuration.
|
// EtcdConfig holds etcd configuration.
|
||||||
type EtcdConfig struct {
|
type EtcdConfig struct {
|
||||||
|
Embedded bool `long:"embedded" description:"Use embedded etcd instance instead of the external one."`
|
||||||
|
|
||||||
Host string `long:"host" description:"Etcd database host."`
|
Host string `long:"host" description:"Etcd database host."`
|
||||||
|
|
||||||
User string `long:"user" description:"Etcd database user."`
|
User string `long:"user" description:"Etcd database user."`
|
||||||
|
10
lncfg/db.go
10
lncfg/db.go
@ -38,7 +38,7 @@ func (db *DB) Validate() error {
|
|||||||
case BoltBackend:
|
case BoltBackend:
|
||||||
|
|
||||||
case EtcdBackend:
|
case EtcdBackend:
|
||||||
if db.Etcd.Host == "" {
|
if !db.Etcd.Embedded && db.Etcd.Host == "" {
|
||||||
return fmt.Errorf("etcd host must be set")
|
return fmt.Errorf("etcd host must be set")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,8 +76,12 @@ func (db *DB) GetBackends(ctx context.Context, dbPath string,
|
|||||||
)
|
)
|
||||||
|
|
||||||
if db.Backend == EtcdBackend {
|
if db.Backend == EtcdBackend {
|
||||||
// Prefix will separate key/values in the db.
|
if db.Etcd.Embedded {
|
||||||
remoteDB, err = kvdb.GetEtcdBackend(ctx, networkName, db.Etcd)
|
remoteDB, _, err = kvdb.GetEtcdTestBackend(dbPath, dbName)
|
||||||
|
} else {
|
||||||
|
// Prefix will separate key/values in the db.
|
||||||
|
remoteDB, err = kvdb.GetEtcdBackend(ctx, networkName, db.Etcd)
|
||||||
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,7 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
@ -60,6 +61,10 @@ type NetworkHarness struct {
|
|||||||
Alice *HarnessNode
|
Alice *HarnessNode
|
||||||
Bob *HarnessNode
|
Bob *HarnessNode
|
||||||
|
|
||||||
|
// useEtcd is set to true if new nodes are to be created with an
|
||||||
|
// embedded etcd backend instead of just bbolt.
|
||||||
|
useEtcd bool
|
||||||
|
|
||||||
// Channel for transmitting stderr output from failed lightning node
|
// Channel for transmitting stderr output from failed lightning node
|
||||||
// to main process.
|
// to main process.
|
||||||
lndErrorChan chan error
|
lndErrorChan chan error
|
||||||
@ -77,8 +82,8 @@ type NetworkHarness struct {
|
|||||||
// TODO(roasbeef): add option to use golang's build library to a binary of the
|
// TODO(roasbeef): add option to use golang's build library to a binary of the
|
||||||
// current repo. This will save developers from having to manually `go install`
|
// current repo. This will save developers from having to manually `go install`
|
||||||
// within the repo each time before changes
|
// within the repo each time before changes
|
||||||
func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string) (
|
func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string,
|
||||||
*NetworkHarness, error) {
|
useEtcd bool) (*NetworkHarness, error) {
|
||||||
|
|
||||||
feeService := startFeeService()
|
feeService := startFeeService()
|
||||||
|
|
||||||
@ -92,6 +97,7 @@ func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string) (
|
|||||||
feeService: feeService,
|
feeService: feeService,
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
lndBinary: lndBinary,
|
lndBinary: lndBinary,
|
||||||
|
useEtcd: useEtcd,
|
||||||
}
|
}
|
||||||
return &n, nil
|
return &n, nil
|
||||||
}
|
}
|
||||||
@ -376,6 +382,7 @@ func (n *NetworkHarness) newNode(name string, extraArgs []string, hasSeed bool,
|
|||||||
NetParams: n.netParams,
|
NetParams: n.netParams,
|
||||||
ExtraArgs: extraArgs,
|
ExtraArgs: extraArgs,
|
||||||
FeeURL: n.feeService.url,
|
FeeURL: n.feeService.url,
|
||||||
|
Etcd: n.useEtcd,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -1397,3 +1404,47 @@ func CopyFile(dest, src string) error {
|
|||||||
|
|
||||||
return d.Close()
|
return d.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FileExists returns true if the file at path exists.
|
||||||
|
func FileExists(path string) bool {
|
||||||
|
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyAll copies all files and directories from srcDir to dstDir recursively.
|
||||||
|
// Note that this function does not support links.
|
||||||
|
func CopyAll(dstDir, srcDir string) error {
|
||||||
|
entries, err := ioutil.ReadDir(srcDir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
srcPath := filepath.Join(srcDir, entry.Name())
|
||||||
|
dstPath := filepath.Join(dstDir, entry.Name())
|
||||||
|
|
||||||
|
info, err := os.Stat(srcPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.IsDir() {
|
||||||
|
err := os.Mkdir(dstPath, info.Mode())
|
||||||
|
if err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = CopyAll(dstPath, srcPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if err := CopyFile(dstPath, srcPath); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
@ -12,7 +12,6 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math"
|
"math"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -81,6 +80,9 @@ var (
|
|||||||
"runtranche", defaultRunTranche, "run the tranche of the "+
|
"runtranche", defaultRunTranche, "run the tranche of the "+
|
||||||
"split test cases with the given (0-based) index",
|
"split test cases with the given (0-based) index",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// useEtcd test LND nodes use (embedded) etcd as remote db.
|
||||||
|
useEtcd = flag.Bool("etcd", false, "Use etcd backend for lnd.")
|
||||||
)
|
)
|
||||||
|
|
||||||
// getTestCaseSplitTranche returns the sub slice of the test cases that should
|
// getTestCaseSplitTranche returns the sub slice of the test cases that should
|
||||||
@ -8238,13 +8240,12 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create temp db folder: %v", err)
|
t.Fatalf("unable to create temp db folder: %v", err)
|
||||||
}
|
}
|
||||||
bobTempDbFile := filepath.Join(bobTempDbPath, "channel.db")
|
|
||||||
defer os.Remove(bobTempDbPath)
|
defer os.Remove(bobTempDbPath)
|
||||||
|
|
||||||
// With the temporary file created, copy Bob's current state into the
|
// With the temporary file created, copy Bob's current state into the
|
||||||
// temporary file we created above. Later after more updates, we'll
|
// temporary file we created above. Later after more updates, we'll
|
||||||
// restore this state.
|
// restore this state.
|
||||||
if err := lntest.CopyFile(bobTempDbFile, net.Bob.DBPath()); err != nil {
|
if err := lntest.CopyAll(bobTempDbPath, net.Bob.DBDir()); err != nil {
|
||||||
t.Fatalf("unable to copy database files: %v", err)
|
t.Fatalf("unable to copy database files: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -8270,7 +8271,7 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
// state. With this, we essentially force Bob to travel back in time
|
// state. With this, we essentially force Bob to travel back in time
|
||||||
// within the channel's history.
|
// within the channel's history.
|
||||||
if err = net.RestartNode(net.Bob, func() error {
|
if err = net.RestartNode(net.Bob, func() error {
|
||||||
return os.Rename(bobTempDbFile, net.Bob.DBPath())
|
return lntest.CopyAll(net.Bob.DBDir(), bobTempDbPath)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("unable to restart node: %v", err)
|
t.Fatalf("unable to restart node: %v", err)
|
||||||
}
|
}
|
||||||
@ -8493,13 +8494,12 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create temp db folder: %v", err)
|
t.Fatalf("unable to create temp db folder: %v", err)
|
||||||
}
|
}
|
||||||
carolTempDbFile := filepath.Join(carolTempDbPath, "channel.db")
|
|
||||||
defer os.Remove(carolTempDbPath)
|
defer os.Remove(carolTempDbPath)
|
||||||
|
|
||||||
// With the temporary file created, copy Carol's current state into the
|
// With the temporary file created, copy Carol's current state into the
|
||||||
// temporary file we created above. Later after more updates, we'll
|
// temporary file we created above. Later after more updates, we'll
|
||||||
// restore this state.
|
// restore this state.
|
||||||
if err := lntest.CopyFile(carolTempDbFile, carol.DBPath()); err != nil {
|
if err := lntest.CopyAll(carolTempDbPath, carol.DBDir()); err != nil {
|
||||||
t.Fatalf("unable to copy database files: %v", err)
|
t.Fatalf("unable to copy database files: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -8524,7 +8524,7 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness
|
|||||||
// state. With this, we essentially force Carol to travel back in time
|
// state. With this, we essentially force Carol to travel back in time
|
||||||
// within the channel's history.
|
// within the channel's history.
|
||||||
if err = net.RestartNode(carol, func() error {
|
if err = net.RestartNode(carol, func() error {
|
||||||
return os.Rename(carolTempDbFile, carol.DBPath())
|
return lntest.CopyAll(carol.DBDir(), carolTempDbPath)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("unable to restart node: %v", err)
|
t.Fatalf("unable to restart node: %v", err)
|
||||||
}
|
}
|
||||||
@ -8817,13 +8817,12 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create temp db folder: %v", err)
|
t.Fatalf("unable to create temp db folder: %v", err)
|
||||||
}
|
}
|
||||||
carolTempDbFile := filepath.Join(carolTempDbPath, "channel.db")
|
|
||||||
defer os.Remove(carolTempDbPath)
|
defer os.Remove(carolTempDbPath)
|
||||||
|
|
||||||
// With the temporary file created, copy Carol's current state into the
|
// With the temporary file created, copy Carol's current state into the
|
||||||
// temporary file we created above. Later after more updates, we'll
|
// temporary file we created above. Later after more updates, we'll
|
||||||
// restore this state.
|
// restore this state.
|
||||||
if err := lntest.CopyFile(carolTempDbFile, carol.DBPath()); err != nil {
|
if err := lntest.CopyAll(carolTempDbPath, carol.DBDir()); err != nil {
|
||||||
t.Fatalf("unable to copy database files: %v", err)
|
t.Fatalf("unable to copy database files: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -8857,7 +8856,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
|
|||||||
// state. With this, we essentially force Carol to travel back in time
|
// state. With this, we essentially force Carol to travel back in time
|
||||||
// within the channel's history.
|
// within the channel's history.
|
||||||
if err = net.RestartNode(carol, func() error {
|
if err = net.RestartNode(carol, func() error {
|
||||||
return os.Rename(carolTempDbFile, carol.DBPath())
|
return lntest.CopyAll(carol.DBDir(), carolTempDbPath)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("unable to restart node: %v", err)
|
t.Fatalf("unable to restart node: %v", err)
|
||||||
}
|
}
|
||||||
@ -9219,13 +9218,12 @@ func testRevokedCloseRetributionAltruistWatchtower(net *lntest.NetworkHarness,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create temp db folder: %v", err)
|
t.Fatalf("unable to create temp db folder: %v", err)
|
||||||
}
|
}
|
||||||
carolTempDbFile := filepath.Join(carolTempDbPath, "channel.db")
|
|
||||||
defer os.Remove(carolTempDbPath)
|
defer os.Remove(carolTempDbPath)
|
||||||
|
|
||||||
// With the temporary file created, copy Carol's current state into the
|
// With the temporary file created, copy Carol's current state into the
|
||||||
// temporary file we created above. Later after more updates, we'll
|
// temporary file we created above. Later after more updates, we'll
|
||||||
// restore this state.
|
// restore this state.
|
||||||
if err := lntest.CopyFile(carolTempDbFile, carol.DBPath()); err != nil {
|
if err := lntest.CopyAll(carolTempDbPath, carol.DBDir()); err != nil {
|
||||||
t.Fatalf("unable to copy database files: %v", err)
|
t.Fatalf("unable to copy database files: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -9282,7 +9280,7 @@ func testRevokedCloseRetributionAltruistWatchtower(net *lntest.NetworkHarness,
|
|||||||
// state. With this, we essentially force Carol to travel back in time
|
// state. With this, we essentially force Carol to travel back in time
|
||||||
// within the channel's history.
|
// within the channel's history.
|
||||||
if err = net.RestartNode(carol, func() error {
|
if err = net.RestartNode(carol, func() error {
|
||||||
return os.Rename(carolTempDbFile, carol.DBPath())
|
return lntest.CopyAll(carol.DBDir(), carolTempDbPath)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("unable to restart node: %v", err)
|
t.Fatalf("unable to restart node: %v", err)
|
||||||
}
|
}
|
||||||
@ -9766,13 +9764,12 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create temp db folder: %v", err)
|
t.Fatalf("unable to create temp db folder: %v", err)
|
||||||
}
|
}
|
||||||
tempDbFile := filepath.Join(tempDbPath, "channel.db")
|
|
||||||
defer os.Remove(tempDbPath)
|
defer os.Remove(tempDbPath)
|
||||||
|
|
||||||
// With the temporary file created, copy the current state into
|
// With the temporary file created, copy the current state into
|
||||||
// the temporary file we created above. Later after more
|
// the temporary file we created above. Later after more
|
||||||
// updates, we'll restore this state.
|
// updates, we'll restore this state.
|
||||||
if err := lntest.CopyFile(tempDbFile, node.DBPath()); err != nil {
|
if err := lntest.CopyAll(tempDbPath, node.DBDir()); err != nil {
|
||||||
t.Fatalf("unable to copy database files: %v", err)
|
t.Fatalf("unable to copy database files: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -9799,7 +9796,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
// force the node to travel back in time within the channel's
|
// force the node to travel back in time within the channel's
|
||||||
// history.
|
// history.
|
||||||
if err = net.RestartNode(node, func() error {
|
if err = net.RestartNode(node, func() error {
|
||||||
return os.Rename(tempDbFile, node.DBPath())
|
return lntest.CopyAll(node.DBDir(), tempDbPath)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
t.Fatalf("unable to restart node: %v", err)
|
t.Fatalf("unable to restart node: %v", err)
|
||||||
}
|
}
|
||||||
@ -11607,10 +11604,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
var predErr error
|
var predErr error
|
||||||
err = wait.Predicate(func() bool {
|
err = wait.Predicate(func() bool {
|
||||||
predErr = assertNumActiveHtlcs(nodes, numPayments)
|
predErr = assertNumActiveHtlcs(nodes, numPayments)
|
||||||
if predErr != nil {
|
return predErr == nil
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
}, time.Second*15)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("htlc mismatch: %v", predErr)
|
t.Fatalf("htlc mismatch: %v", predErr)
|
||||||
@ -14250,7 +14244,9 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
|||||||
// Now we can set up our test harness (LND instance), with the chain
|
// Now we can set up our test harness (LND instance), with the chain
|
||||||
// backend we just created.
|
// backend we just created.
|
||||||
binary := ht.getLndBinary()
|
binary := ht.getLndBinary()
|
||||||
lndHarness, err = lntest.NewNetworkHarness(miner, chainBackend, binary)
|
lndHarness, err = lntest.NewNetworkHarness(
|
||||||
|
miner, chainBackend, binary, *useEtcd,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ht.Fatalf("unable to create lightning network harness: %v", err)
|
ht.Fatalf("unable to create lightning network harness: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -183,6 +183,8 @@ type NodeConfig struct {
|
|||||||
AcceptKeySend bool
|
AcceptKeySend bool
|
||||||
|
|
||||||
FeeURL string
|
FeeURL string
|
||||||
|
|
||||||
|
Etcd bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg NodeConfig) P2PAddr() string {
|
func (cfg NodeConfig) P2PAddr() string {
|
||||||
@ -197,9 +199,13 @@ func (cfg NodeConfig) RESTAddr() string {
|
|||||||
return net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.RESTPort))
|
return net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.RESTPort))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DBDir returns the holding directory path of the graph database.
|
||||||
|
func (cfg NodeConfig) DBDir() string {
|
||||||
|
return filepath.Join(cfg.DataDir, "graph", cfg.NetParams.Name)
|
||||||
|
}
|
||||||
|
|
||||||
func (cfg NodeConfig) DBPath() string {
|
func (cfg NodeConfig) DBPath() string {
|
||||||
return filepath.Join(cfg.DataDir, "graph",
|
return filepath.Join(cfg.DBDir(), "channel.db")
|
||||||
fmt.Sprintf("%v/channel.db", cfg.NetParams.Name))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cfg NodeConfig) ChanBackupPath() string {
|
func (cfg NodeConfig) ChanBackupPath() string {
|
||||||
@ -261,6 +267,11 @@ func (cfg NodeConfig) genArgs() []string {
|
|||||||
args = append(args, "--accept-keysend")
|
args = append(args, "--accept-keysend")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cfg.Etcd {
|
||||||
|
args = append(args, "--db.backend=etcd")
|
||||||
|
args = append(args, "--db.etcd.embedded")
|
||||||
|
}
|
||||||
|
|
||||||
if cfg.FeeURL != "" {
|
if cfg.FeeURL != "" {
|
||||||
args = append(args, "--feeurl="+cfg.FeeURL)
|
args = append(args, "--feeurl="+cfg.FeeURL)
|
||||||
}
|
}
|
||||||
@ -433,6 +444,11 @@ func (hn *HarnessNode) DBPath() string {
|
|||||||
return hn.Cfg.DBPath()
|
return hn.Cfg.DBPath()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DBDir returns the path for the directory holding channeldb file(s).
|
||||||
|
func (hn *HarnessNode) DBDir() string {
|
||||||
|
return hn.Cfg.DBDir()
|
||||||
|
}
|
||||||
|
|
||||||
// Name returns the name of this node set during initialization.
|
// Name returns the name of this node set during initialization.
|
||||||
func (hn *HarnessNode) Name() string {
|
func (hn *HarnessNode) Name() string {
|
||||||
return hn.Cfg.Name
|
return hn.Cfg.Name
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
// +build !darwin
|
// +build !darwin, !kvdb_etcd
|
||||||
|
|
||||||
package lntest
|
package lntest
|
||||||
|
|
||||||
|
27
lntest/timeouts_etcd.go
Normal file
27
lntest/timeouts_etcd.go
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// +build !darwin, kvdb_etcd
|
||||||
|
|
||||||
|
package lntest
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const (
|
||||||
|
// MinerMempoolTimeout is the max time we will wait for a transaction
|
||||||
|
// to propagate to the mining node's mempool.
|
||||||
|
MinerMempoolTimeout = time.Minute
|
||||||
|
|
||||||
|
// ChannelOpenTimeout is the max time we will wait before a channel to
|
||||||
|
// be considered opened.
|
||||||
|
ChannelOpenTimeout = time.Second * 30
|
||||||
|
|
||||||
|
// ChannelCloseTimeout is the max time we will wait before a channel is
|
||||||
|
// considered closed.
|
||||||
|
ChannelCloseTimeout = time.Second * 120
|
||||||
|
|
||||||
|
// DefaultTimeout is a timeout that will be used for various wait
|
||||||
|
// scenarios where no custom timeout value is defined.
|
||||||
|
DefaultTimeout = time.Second * 30
|
||||||
|
|
||||||
|
// AsyncBenchmarkTimeout is the timeout used when running the async
|
||||||
|
// payments benchmark.
|
||||||
|
AsyncBenchmarkTimeout = 2 * time.Minute
|
||||||
|
)
|
@ -2,6 +2,7 @@ DEV_TAGS = dev
|
|||||||
RPC_TAGS = autopilotrpc chainrpc invoicesrpc routerrpc signrpc verrpc walletrpc watchtowerrpc wtclientrpc
|
RPC_TAGS = autopilotrpc chainrpc invoicesrpc routerrpc signrpc verrpc walletrpc watchtowerrpc wtclientrpc
|
||||||
LOG_TAGS =
|
LOG_TAGS =
|
||||||
TEST_FLAGS =
|
TEST_FLAGS =
|
||||||
|
ITEST_FLAGS =
|
||||||
COVER_PKG = $$(go list -deps ./... | grep '$(PKG)' | grep -v lnrpc)
|
COVER_PKG = $$(go list -deps ./... | grep '$(PKG)' | grep -v lnrpc)
|
||||||
NUM_ITEST_TRANCHES = 6
|
NUM_ITEST_TRANCHES = 6
|
||||||
ITEST_PARALLELISM = $(NUM_ITEST_TRANCHES)
|
ITEST_PARALLELISM = $(NUM_ITEST_TRANCHES)
|
||||||
@ -41,6 +42,12 @@ ifneq ($(icase),)
|
|||||||
TEST_FLAGS += -test.run="TestLightningNetworkDaemon/.*-of-.*/.*/$(icase)"
|
TEST_FLAGS += -test.run="TestLightningNetworkDaemon/.*-of-.*/.*/$(icase)"
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
# Run itests with etcd backend.
|
||||||
|
ifeq ($(etcd),1)
|
||||||
|
ITEST_FLAGS += -etcd
|
||||||
|
DEV_TAGS += kvdb_etcd
|
||||||
|
endif
|
||||||
|
|
||||||
ifneq ($(tags),)
|
ifneq ($(tags),)
|
||||||
DEV_TAGS += ${tags}
|
DEV_TAGS += ${tags}
|
||||||
endif
|
endif
|
||||||
@ -89,4 +96,4 @@ endif
|
|||||||
# Construct the integration test command with the added build flags.
|
# Construct the integration test command with the added build flags.
|
||||||
ITEST_TAGS := $(DEV_TAGS) $(RPC_TAGS) rpctest $(backend)
|
ITEST_TAGS := $(DEV_TAGS) $(RPC_TAGS) rpctest $(backend)
|
||||||
|
|
||||||
ITEST := rm lntest/itest/*.log; date; $(GOTEST) -v ./lntest/itest -tags="$(ITEST_TAGS)" $(TEST_FLAGS) -logoutput -goroutinedump
|
ITEST := rm -f lntest/itest/*.log; date; $(GOTEST) -v ./lntest/itest -tags="$(ITEST_TAGS)" $(TEST_FLAGS) $(ITEST_FLAGS) -logoutput -goroutinedump
|
||||||
|
@ -952,6 +952,10 @@ litecoin.node=ltcd
|
|||||||
; Whether to collect etcd commit stats.
|
; Whether to collect etcd commit stats.
|
||||||
; db.etcd.collect_stats=true
|
; db.etcd.collect_stats=true
|
||||||
|
|
||||||
|
; If set LND will use an embedded etcd instance instead of the external one.
|
||||||
|
; Useful for testing.
|
||||||
|
; db.etcd.embedded=false
|
||||||
|
|
||||||
[bolt]
|
[bolt]
|
||||||
; If true, prevents the database from syncing its freelist to disk.
|
; If true, prevents the database from syncing its freelist to disk.
|
||||||
; db.bolt.nofreelistsync=1
|
; db.bolt.nofreelistsync=1
|
||||||
|
Loading…
Reference in New Issue
Block a user