Merge pull request #4402 from bhandras/etcd_itest

itests: option to run our integration tests on etcd + boltdb (remote/local)
This commit is contained in:
Johan T. Halseth 2020-11-18 19:48:03 +01:00 committed by GitHub
commit a8b6966017
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 137 additions and 30 deletions

View File

@ -30,6 +30,8 @@ type BoltConfig struct {
// EtcdConfig holds etcd configuration.
type EtcdConfig struct {
Embedded bool `long:"embedded" description:"Use embedded etcd instance instead of the external one."`
Host string `long:"host" description:"Etcd database host."`
User string `long:"user" description:"Etcd database user."`

View File

@ -38,7 +38,7 @@ func (db *DB) Validate() error {
case BoltBackend:
case EtcdBackend:
if db.Etcd.Host == "" {
if !db.Etcd.Embedded && db.Etcd.Host == "" {
return fmt.Errorf("etcd host must be set")
}
@ -76,8 +76,12 @@ func (db *DB) GetBackends(ctx context.Context, dbPath string,
)
if db.Backend == EtcdBackend {
// Prefix will separate key/values in the db.
remoteDB, err = kvdb.GetEtcdBackend(ctx, networkName, db.Etcd)
if db.Etcd.Embedded {
remoteDB, _, err = kvdb.GetEtcdTestBackend(dbPath, dbName)
} else {
// Prefix will separate key/values in the db.
remoteDB, err = kvdb.GetEtcdBackend(ctx, networkName, db.Etcd)
}
if err != nil {
return nil, err
}

View File

@ -9,6 +9,7 @@ import (
"io/ioutil"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
@ -60,6 +61,10 @@ type NetworkHarness struct {
Alice *HarnessNode
Bob *HarnessNode
// useEtcd is set to true if new nodes are to be created with an
// embedded etcd backend instead of just bbolt.
useEtcd bool
// Channel for transmitting stderr output from failed lightning node
// to main process.
lndErrorChan chan error
@ -77,8 +82,8 @@ type NetworkHarness struct {
// TODO(roasbeef): add option to use golang's build library to a binary of the
// current repo. This will save developers from having to manually `go install`
// within the repo each time before changes
func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string) (
*NetworkHarness, error) {
func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string,
useEtcd bool) (*NetworkHarness, error) {
feeService := startFeeService()
@ -92,6 +97,7 @@ func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string) (
feeService: feeService,
quit: make(chan struct{}),
lndBinary: lndBinary,
useEtcd: useEtcd,
}
return &n, nil
}
@ -376,6 +382,7 @@ func (n *NetworkHarness) newNode(name string, extraArgs []string, hasSeed bool,
NetParams: n.netParams,
ExtraArgs: extraArgs,
FeeURL: n.feeService.url,
Etcd: n.useEtcd,
})
if err != nil {
return nil, err
@ -1397,3 +1404,47 @@ func CopyFile(dest, src string) error {
return d.Close()
}
// FileExists returns true if the file at path exists.
func FileExists(path string) bool {
if _, err := os.Stat(path); os.IsNotExist(err) {
return false
}
return true
}
// CopyAll copies all files and directories from srcDir to dstDir recursively.
// Note that this function does not support links.
func CopyAll(dstDir, srcDir string) error {
entries, err := ioutil.ReadDir(srcDir)
if err != nil {
return err
}
for _, entry := range entries {
srcPath := filepath.Join(srcDir, entry.Name())
dstPath := filepath.Join(dstDir, entry.Name())
info, err := os.Stat(srcPath)
if err != nil {
return err
}
if info.IsDir() {
err := os.Mkdir(dstPath, info.Mode())
if err != nil && !os.IsExist(err) {
return err
}
err = CopyAll(dstPath, srcPath)
if err != nil {
return err
}
} else if err := CopyFile(dstPath, srcPath); err != nil {
return err
}
}
return nil
}

View File

@ -12,7 +12,6 @@ import (
"io/ioutil"
"math"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
@ -81,6 +80,9 @@ var (
"runtranche", defaultRunTranche, "run the tranche of the "+
"split test cases with the given (0-based) index",
)
// useEtcd test LND nodes use (embedded) etcd as remote db.
useEtcd = flag.Bool("etcd", false, "Use etcd backend for lnd.")
)
// getTestCaseSplitTranche returns the sub slice of the test cases that should
@ -8238,13 +8240,12 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
if err != nil {
t.Fatalf("unable to create temp db folder: %v", err)
}
bobTempDbFile := filepath.Join(bobTempDbPath, "channel.db")
defer os.Remove(bobTempDbPath)
// With the temporary file created, copy Bob's current state into the
// temporary file we created above. Later after more updates, we'll
// restore this state.
if err := lntest.CopyFile(bobTempDbFile, net.Bob.DBPath()); err != nil {
if err := lntest.CopyAll(bobTempDbPath, net.Bob.DBDir()); err != nil {
t.Fatalf("unable to copy database files: %v", err)
}
@ -8270,7 +8271,7 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
// state. With this, we essentially force Bob to travel back in time
// within the channel's history.
if err = net.RestartNode(net.Bob, func() error {
return os.Rename(bobTempDbFile, net.Bob.DBPath())
return lntest.CopyAll(net.Bob.DBDir(), bobTempDbPath)
}); err != nil {
t.Fatalf("unable to restart node: %v", err)
}
@ -8493,13 +8494,12 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness
if err != nil {
t.Fatalf("unable to create temp db folder: %v", err)
}
carolTempDbFile := filepath.Join(carolTempDbPath, "channel.db")
defer os.Remove(carolTempDbPath)
// With the temporary file created, copy Carol's current state into the
// temporary file we created above. Later after more updates, we'll
// restore this state.
if err := lntest.CopyFile(carolTempDbFile, carol.DBPath()); err != nil {
if err := lntest.CopyAll(carolTempDbPath, carol.DBDir()); err != nil {
t.Fatalf("unable to copy database files: %v", err)
}
@ -8524,7 +8524,7 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness
// state. With this, we essentially force Carol to travel back in time
// within the channel's history.
if err = net.RestartNode(carol, func() error {
return os.Rename(carolTempDbFile, carol.DBPath())
return lntest.CopyAll(carol.DBDir(), carolTempDbPath)
}); err != nil {
t.Fatalf("unable to restart node: %v", err)
}
@ -8817,13 +8817,12 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
if err != nil {
t.Fatalf("unable to create temp db folder: %v", err)
}
carolTempDbFile := filepath.Join(carolTempDbPath, "channel.db")
defer os.Remove(carolTempDbPath)
// With the temporary file created, copy Carol's current state into the
// temporary file we created above. Later after more updates, we'll
// restore this state.
if err := lntest.CopyFile(carolTempDbFile, carol.DBPath()); err != nil {
if err := lntest.CopyAll(carolTempDbPath, carol.DBDir()); err != nil {
t.Fatalf("unable to copy database files: %v", err)
}
@ -8857,7 +8856,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
// state. With this, we essentially force Carol to travel back in time
// within the channel's history.
if err = net.RestartNode(carol, func() error {
return os.Rename(carolTempDbFile, carol.DBPath())
return lntest.CopyAll(carol.DBDir(), carolTempDbPath)
}); err != nil {
t.Fatalf("unable to restart node: %v", err)
}
@ -9219,13 +9218,12 @@ func testRevokedCloseRetributionAltruistWatchtower(net *lntest.NetworkHarness,
if err != nil {
t.Fatalf("unable to create temp db folder: %v", err)
}
carolTempDbFile := filepath.Join(carolTempDbPath, "channel.db")
defer os.Remove(carolTempDbPath)
// With the temporary file created, copy Carol's current state into the
// temporary file we created above. Later after more updates, we'll
// restore this state.
if err := lntest.CopyFile(carolTempDbFile, carol.DBPath()); err != nil {
if err := lntest.CopyAll(carolTempDbPath, carol.DBDir()); err != nil {
t.Fatalf("unable to copy database files: %v", err)
}
@ -9282,7 +9280,7 @@ func testRevokedCloseRetributionAltruistWatchtower(net *lntest.NetworkHarness,
// state. With this, we essentially force Carol to travel back in time
// within the channel's history.
if err = net.RestartNode(carol, func() error {
return os.Rename(carolTempDbFile, carol.DBPath())
return lntest.CopyAll(carol.DBDir(), carolTempDbPath)
}); err != nil {
t.Fatalf("unable to restart node: %v", err)
}
@ -9766,13 +9764,12 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
if err != nil {
t.Fatalf("unable to create temp db folder: %v", err)
}
tempDbFile := filepath.Join(tempDbPath, "channel.db")
defer os.Remove(tempDbPath)
// With the temporary file created, copy the current state into
// the temporary file we created above. Later after more
// updates, we'll restore this state.
if err := lntest.CopyFile(tempDbFile, node.DBPath()); err != nil {
if err := lntest.CopyAll(tempDbPath, node.DBDir()); err != nil {
t.Fatalf("unable to copy database files: %v", err)
}
@ -9799,7 +9796,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
// force the node to travel back in time within the channel's
// history.
if err = net.RestartNode(node, func() error {
return os.Rename(tempDbFile, node.DBPath())
return lntest.CopyAll(node.DBDir(), tempDbPath)
}); err != nil {
t.Fatalf("unable to restart node: %v", err)
}
@ -11607,10 +11604,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) {
var predErr error
err = wait.Predicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, numPayments)
if predErr != nil {
return false
}
return true
return predErr == nil
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
@ -14250,7 +14244,9 @@ func TestLightningNetworkDaemon(t *testing.T) {
// Now we can set up our test harness (LND instance), with the chain
// backend we just created.
binary := ht.getLndBinary()
lndHarness, err = lntest.NewNetworkHarness(miner, chainBackend, binary)
lndHarness, err = lntest.NewNetworkHarness(
miner, chainBackend, binary, *useEtcd,
)
if err != nil {
ht.Fatalf("unable to create lightning network harness: %v", err)
}

View File

@ -183,6 +183,8 @@ type NodeConfig struct {
AcceptKeySend bool
FeeURL string
Etcd bool
}
func (cfg NodeConfig) P2PAddr() string {
@ -197,9 +199,13 @@ func (cfg NodeConfig) RESTAddr() string {
return net.JoinHostPort("127.0.0.1", strconv.Itoa(cfg.RESTPort))
}
// DBDir returns the holding directory path of the graph database.
func (cfg NodeConfig) DBDir() string {
return filepath.Join(cfg.DataDir, "graph", cfg.NetParams.Name)
}
func (cfg NodeConfig) DBPath() string {
return filepath.Join(cfg.DataDir, "graph",
fmt.Sprintf("%v/channel.db", cfg.NetParams.Name))
return filepath.Join(cfg.DBDir(), "channel.db")
}
func (cfg NodeConfig) ChanBackupPath() string {
@ -261,6 +267,11 @@ func (cfg NodeConfig) genArgs() []string {
args = append(args, "--accept-keysend")
}
if cfg.Etcd {
args = append(args, "--db.backend=etcd")
args = append(args, "--db.etcd.embedded")
}
if cfg.FeeURL != "" {
args = append(args, "--feeurl="+cfg.FeeURL)
}
@ -433,6 +444,11 @@ func (hn *HarnessNode) DBPath() string {
return hn.Cfg.DBPath()
}
// DBDir returns the path for the directory holding channeldb file(s).
func (hn *HarnessNode) DBDir() string {
return hn.Cfg.DBDir()
}
// Name returns the name of this node set during initialization.
func (hn *HarnessNode) Name() string {
return hn.Cfg.Name

View File

@ -1,4 +1,4 @@
// +build !darwin
// +build !darwin, !kvdb_etcd
package lntest

27
lntest/timeouts_etcd.go Normal file
View File

@ -0,0 +1,27 @@
// +build !darwin, kvdb_etcd
package lntest
import "time"
const (
// MinerMempoolTimeout is the max time we will wait for a transaction
// to propagate to the mining node's mempool.
MinerMempoolTimeout = time.Minute
// ChannelOpenTimeout is the max time we will wait before a channel to
// be considered opened.
ChannelOpenTimeout = time.Second * 30
// ChannelCloseTimeout is the max time we will wait before a channel is
// considered closed.
ChannelCloseTimeout = time.Second * 120
// DefaultTimeout is a timeout that will be used for various wait
// scenarios where no custom timeout value is defined.
DefaultTimeout = time.Second * 30
// AsyncBenchmarkTimeout is the timeout used when running the async
// payments benchmark.
AsyncBenchmarkTimeout = 2 * time.Minute
)

View File

@ -2,6 +2,7 @@ DEV_TAGS = dev
RPC_TAGS = autopilotrpc chainrpc invoicesrpc routerrpc signrpc verrpc walletrpc watchtowerrpc wtclientrpc
LOG_TAGS =
TEST_FLAGS =
ITEST_FLAGS =
COVER_PKG = $$(go list -deps ./... | grep '$(PKG)' | grep -v lnrpc)
NUM_ITEST_TRANCHES = 6
ITEST_PARALLELISM = $(NUM_ITEST_TRANCHES)
@ -41,6 +42,12 @@ ifneq ($(icase),)
TEST_FLAGS += -test.run="TestLightningNetworkDaemon/.*-of-.*/.*/$(icase)"
endif
# Run itests with etcd backend.
ifeq ($(etcd),1)
ITEST_FLAGS += -etcd
DEV_TAGS += kvdb_etcd
endif
ifneq ($(tags),)
DEV_TAGS += ${tags}
endif
@ -89,4 +96,4 @@ endif
# Construct the integration test command with the added build flags.
ITEST_TAGS := $(DEV_TAGS) $(RPC_TAGS) rpctest $(backend)
ITEST := rm lntest/itest/*.log; date; $(GOTEST) -v ./lntest/itest -tags="$(ITEST_TAGS)" $(TEST_FLAGS) -logoutput -goroutinedump
ITEST := rm -f lntest/itest/*.log; date; $(GOTEST) -v ./lntest/itest -tags="$(ITEST_TAGS)" $(TEST_FLAGS) $(ITEST_FLAGS) -logoutput -goroutinedump

View File

@ -952,6 +952,10 @@ litecoin.node=ltcd
; Whether to collect etcd commit stats.
; db.etcd.collect_stats=true
; If set LND will use an embedded etcd instance instead of the external one.
; Useful for testing.
; db.etcd.embedded=false
[bolt]
; If true, prevents the database from syncing its freelist to disk.
; db.bolt.nofreelistsync=1