Merge pull request #4475 from guggero/travis-windows

travis: add integration test on Windows
This commit is contained in:
Olaoluwa Osuntokun 2020-08-06 18:15:02 -07:00 committed by GitHub
commit fcb41dae37
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 294 additions and 50 deletions

@ -68,7 +68,32 @@ jobs:
- GOARM=7 GOARCH=arm GOOS=linux CGO_ENABLED=0 make itest-only
arch: arm64
- name: Btcd Integration Windows
script:
- make itest-windows
os: windows
before_install:
- choco upgrade --no-progress -y make netcat curl findutils
- export MAKE=mingw32-make
after_script:
- |-
case $TRAVIS_OS_NAME in
windows)
echo "Uploading to termbin.com..."
for f in ./lntest/itest/*.log; do cat $f | nc termbin.com 9999 | xargs -r0 printf "$f"' uploaded to %s'; done
;;
esac
after_script:
- LOG_FILES=./lntest/itest/*.log
- echo "Uploading to termbin.com..." && find $LOG_FILES | xargs -I{} sh -c "cat {} | nc termbin.com 9999 | xargs -r0 printf '{} uploaded to %s'"
- echo "Uploading to file.io..." && tar -zcvO $LOG_FILES | curl -s -F 'file=@-;filename=logs.tar.gz' https://file.io | xargs -r0 printf 'logs.tar.gz uploaded to %s\n'
- |-
case $TRAVIS_OS_NAME in
windows)
# Needs other commands, see after_script of the Windows build
;;
*)
LOG_FILES=./lntest/itest/*.log
echo "Uploading to termbin.com..." && find $LOG_FILES | xargs -I{} sh -c "cat {} | nc termbin.com 9999 | xargs -r0 printf '{} uploaded to %s'"
echo "Uploading to file.io..." && tar -zcvO $LOG_FILES | curl -s -F 'file=@-;filename=logs.tar.gz' https://file.io | xargs -r0 printf 'logs.tar.gz uploaded to %s\n'
;;
esac

@ -132,6 +132,11 @@ build-itest:
$(GOBUILD) -tags="$(ITEST_TAGS)" -o lnd-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lnd
$(GOBUILD) -tags="$(ITEST_TAGS)" -o lncli-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lncli
build-itest-windows:
@$(call print, "Building itest lnd and lncli.")
$(GOBUILD) -tags="$(ITEST_TAGS)" -o lnd-itest.exe $(ITEST_LDFLAGS) $(PKG)/cmd/lnd
$(GOBUILD) -tags="$(ITEST_TAGS)" -o lncli-itest.exe $(ITEST_LDFLAGS) $(PKG)/cmd/lncli
install:
@$(call print, "Installing lnd and lncli.")
$(GOINSTALL) -tags="${tags}" $(LDFLAGS) $(PKG)/cmd/lnd
@ -158,6 +163,8 @@ itest-only:
itest: btcd build-itest itest-only
itest-windows: btcd build-itest-windows itest-only
unit: btcd
@$(call print, "Running unit tests.")
$(UNIT)

@ -6,6 +6,7 @@ import (
"sort"
"time"
"github.com/btcsuite/btcwallet/walletdb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -104,10 +105,9 @@ func decodeForwardingEvent(r io.Reader, f *ForwardingEvent) error {
func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) error {
// Before we create the database transaction, we'll ensure that the set
// of forwarding events are properly sorted according to their
// timestamp.
sort.Slice(events, func(i, j int) bool {
return events[i].Timestamp.Before(events[j].Timestamp)
})
// timestamp and that no duplicate timestamps exist to avoid collisions
// in the key we are going to store the events under.
makeUniqueTimestamps(events)
var timestamp [8]byte
@ -124,22 +124,7 @@ func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) error {
// With the bucket obtained, we can now begin to write out the
// series of events.
for _, event := range events {
var eventBytes [forwardingEventSize]byte
eventBuf := bytes.NewBuffer(eventBytes[0:0:forwardingEventSize])
// First, we'll serialize this timestamp into our
// timestamp buffer.
byteOrder.PutUint64(
timestamp[:], uint64(event.Timestamp.UnixNano()),
)
// With the key encoded, we'll then encode the event
// into our buffer, then write it out to disk.
err := encodeForwardingEvent(eventBuf, &event)
if err != nil {
return err
}
err = logBucket.Put(timestamp[:], eventBuf.Bytes())
err := storeEvent(logBucket, event, timestamp[:])
if err != nil {
return err
}
@ -149,6 +134,55 @@ func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) error {
})
}
// storeEvent tries to store a forwarding event into the given bucket by trying
// to avoid collisions. If a key for the event timestamp already exists in the
// database, the timestamp is incremented in nanosecond intervals until a "free"
// slot is found.
func storeEvent(bucket walletdb.ReadWriteBucket, event ForwardingEvent,
timestampScratchSpace []byte) error {
// First, we'll serialize this timestamp into our
// timestamp buffer.
byteOrder.PutUint64(
timestampScratchSpace, uint64(event.Timestamp.UnixNano()),
)
// Next we'll loop until we find a "free" slot in the bucket to store
// the event under. This should almost never happen unless we're running
// on a system that has a very bad system clock that doesn't properly
// resolve to nanosecond scale. We try up to 100 times (which would come
// to a maximum shift of 0.1 microsecond which is acceptable for most
// use cases). If we don't find a free slot, we just give up and let
// the collision happen. Something must be wrong with the data in that
// case, even on a very fast machine forwarding payments _will_ take a
// few microseconds at least so we should find a nanosecond slot
// somewhere.
const maxTries = 100
tries := 0
for tries < maxTries {
val := bucket.Get(timestampScratchSpace)
if val == nil {
break
}
// Collision, try the next nanosecond timestamp.
nextNano := event.Timestamp.UnixNano() + 1
event.Timestamp = time.Unix(0, nextNano)
byteOrder.PutUint64(timestampScratchSpace, uint64(nextNano))
tries++
}
// With the key encoded, we'll then encode the event
// into our buffer, then write it out to disk.
var eventBytes [forwardingEventSize]byte
eventBuf := bytes.NewBuffer(eventBytes[0:0:forwardingEventSize])
err := encodeForwardingEvent(eventBuf, &event)
if err != nil {
return err
}
return bucket.Put(timestampScratchSpace, eventBuf.Bytes())
}
// ForwardingEventQuery represents a query to the forwarding log payment
// circuit time series database. The query allows a caller to retrieve all
// records for a particular time slice, offset in that time slice, limiting the
@ -272,3 +306,34 @@ func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, e
return resp, nil
}
// makeUniqueTimestamps takes a slice of forwarding events, sorts it by the
// event timestamps and then makes sure there are no duplicates in the
// timestamps. If duplicates are found, some of the timestamps are increased on
// the nanosecond scale until only unique values remain. This is a fix to
// address the problem that in some environments (looking at you, Windows) the
// system clock has such a bad resolution that two serial invocations of
// time.Now() might return the same timestamp, even if some time has elapsed
// between the calls.
func makeUniqueTimestamps(events []ForwardingEvent) {
sort.Slice(events, func(i, j int) bool {
return events[i].Timestamp.Before(events[j].Timestamp)
})
// Now that we know the events are sorted by timestamp, we can go
// through the list and fix all duplicates until only unique values
// remain.
for outer := 0; outer < len(events)-1; outer++ {
current := events[outer].Timestamp.UnixNano()
next := events[outer+1].Timestamp.UnixNano()
// We initially sorted the slice. So if the current is now
// greater or equal to the next one, it's either because it's a
// duplicate or because we increased the current in the last
// iteration.
if current >= next {
next = current + 1
events[outer+1].Timestamp = time.Unix(0, next)
}
}
}

@ -4,11 +4,11 @@ import (
"math/rand"
"reflect"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnwire"
"time"
"github.com/stretchr/testify/assert"
)
// TestForwardingLogBasicStorageAndQuery tests that we're able to store and
@ -20,10 +20,11 @@ func TestForwardingLogBasicStorageAndQuery(t *testing.T) {
// forwarding event log that we'll be using for the duration of the
// test.
db, cleanUp, err := MakeTestDB()
defer cleanUp()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
defer cleanUp()
log := ForwardingLog{
db: db,
}
@ -92,10 +93,11 @@ func TestForwardingLogQueryOptions(t *testing.T) {
// forwarding event log that we'll be using for the duration of the
// test.
db, cleanUp, err := MakeTestDB()
defer cleanUp()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
defer cleanUp()
log := ForwardingLog{
db: db,
}
@ -197,10 +199,11 @@ func TestForwardingLogQueryLimit(t *testing.T) {
// forwarding event log that we'll be using for the duration of the
// test.
db, cleanUp, err := MakeTestDB()
defer cleanUp()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
defer cleanUp()
log := ForwardingLog{
db: db,
}
@ -263,3 +266,118 @@ func TestForwardingLogQueryLimit(t *testing.T) {
timeSlice.LastIndexOffset)
}
}
// TestForwardingLogMakeUniqueTimestamps makes sure the function that creates
// unique timestamps does it job correctly.
func TestForwardingLogMakeUniqueTimestamps(t *testing.T) {
t.Parallel()
// Create a list of events where some of the timestamps collide. We
// expect no existing timestamp to be overwritten, instead the "gaps"
// between them should be filled.
inputSlice := []ForwardingEvent{
{Timestamp: time.Unix(0, 1001)},
{Timestamp: time.Unix(0, 2001)},
{Timestamp: time.Unix(0, 1001)},
{Timestamp: time.Unix(0, 1002)},
{Timestamp: time.Unix(0, 1004)},
{Timestamp: time.Unix(0, 1004)},
{Timestamp: time.Unix(0, 1007)},
{Timestamp: time.Unix(0, 1001)},
}
expectedSlice := []ForwardingEvent{
{Timestamp: time.Unix(0, 1001)},
{Timestamp: time.Unix(0, 1002)},
{Timestamp: time.Unix(0, 1003)},
{Timestamp: time.Unix(0, 1004)},
{Timestamp: time.Unix(0, 1005)},
{Timestamp: time.Unix(0, 1006)},
{Timestamp: time.Unix(0, 1007)},
{Timestamp: time.Unix(0, 2001)},
}
makeUniqueTimestamps(inputSlice)
for idx, in := range inputSlice {
expect := expectedSlice[idx]
assert.Equal(
t, expect.Timestamp.UnixNano(), in.Timestamp.UnixNano(),
)
}
}
// TestForwardingLogStoreEvent makes sure forwarding events are stored without
// colliding on duplicate timestamps.
func TestForwardingLogStoreEvent(t *testing.T) {
t.Parallel()
// First, we'll set up a test database, and use that to instantiate the
// forwarding event log that we'll be using for the duration of the
// test.
db, cleanUp, err := MakeTestDB()
if err != nil {
t.Fatalf("unable to make test db: %v", err)
}
defer cleanUp()
log := ForwardingLog{
db: db,
}
// We'll create 20 random events, with each event having a timestamp
// with just one nanosecond apart.
numEvents := 20
events := make([]ForwardingEvent, numEvents)
ts := time.Now().UnixNano()
for i := 0; i < numEvents; i++ {
events[i] = ForwardingEvent{
Timestamp: time.Unix(0, ts+int64(i)),
IncomingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())),
OutgoingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())),
AmtIn: lnwire.MilliSatoshi(rand.Int63()),
AmtOut: lnwire.MilliSatoshi(rand.Int63()),
}
}
// Now that all of our events are constructed, we'll add them to the
// database in a batched manner.
if err := log.AddForwardingEvents(events); err != nil {
t.Fatalf("unable to add events: %v", err)
}
// Because timestamps are de-duplicated when adding them in a single
// batch before they even hit the DB, we add the same events again but
// in a new batch. They now have to be de-duplicated on the DB level.
if err := log.AddForwardingEvents(events); err != nil {
t.Fatalf("unable to add second batch of events: %v", err)
}
// With all of our events added, we should be able to query for all
// events with a range of just 40 nanoseconds (2 times 20 events, all
// spaced one nanosecond apart).
eventQuery := ForwardingEventQuery{
StartTime: time.Unix(0, ts),
EndTime: time.Unix(0, ts+int64(numEvents*2)),
IndexOffset: 0,
NumMaxEvents: uint32(numEvents * 3),
}
timeSlice, err := log.Query(eventQuery)
if err != nil {
t.Fatalf("unable to query for events: %v", err)
}
// We should get exactly 40 events back.
if len(timeSlice.ForwardingEvents) != numEvents*2 {
t.Fatalf("wrong number of events: expected %v, got %v",
numEvents*2, len(timeSlice.ForwardingEvents))
}
// The timestamps should be spaced out evenly and in order.
for i := 0; i < numEvents*2; i++ {
eventTs := timeSlice.ForwardingEvents[i].Timestamp.UnixNano()
if eventTs != ts+int64(i) {
t.Fatalf("unexpected timestamp of event %d: expected "+
"%d, got %d", i, ts+int64(i), eventTs)
}
}
}

2
go.mod

@ -5,7 +5,7 @@ require (
github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e // indirect
github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82
github.com/Yawning/aez v0.0.0-20180114000226-4dad034d9db2
github.com/btcsuite/btcd v0.20.1-beta.0.20200515232429-9f0179fd2c46
github.com/btcsuite/btcd v0.20.1-beta.0.20200730232343-1db1b6f8217f
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f
github.com/btcsuite/btcutil v1.0.2
github.com/btcsuite/btcutil/psbt v1.0.2

6
go.sum

@ -27,8 +27,8 @@ github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcug
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd v0.20.1-beta.0.20200513120220-b470eee47728/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btcd v0.20.1-beta.0.20200515232429-9f0179fd2c46 h1:QyTpiR5nQe94vza2qkvf7Ns8XX2Rjh/vdIhO3RzGj4o=
github.com/btcsuite/btcd v0.20.1-beta.0.20200515232429-9f0179fd2c46/go.mod h1:Yktc19YNjh/Iz2//CX0vfRTS4IJKM/RKO5YZ9Fn+Pgo=
github.com/btcsuite/btcd v0.20.1-beta.0.20200730232343-1db1b6f8217f h1:m/GhMTvDQLbID616c4TYdHyt0MZ9lH5B/nf9Lu3okCY=
github.com/btcsuite/btcd v0.20.1-beta.0.20200730232343-1db1b6f8217f/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
@ -86,6 +86,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/decred/dcrd/lru v1.0.0 h1:Kbsb1SFDsIlaupWPwsPp+dkxiBY1frcS07PCPgotKz8=
github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=

@ -81,6 +81,7 @@ func NewBackend(miner string, netParams *chaincfg.Params) (
"--debuglevel=debug",
"--logdir=" + logDir,
"--connect=" + miner,
"--nowinservice",
}
chainBackend, err := rpctest.New(netParams, nil, args)
if err != nil {

@ -171,13 +171,18 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
// channel edges to relatively large non default values. This makes it
// possible to pick up more subtle fee calculation errors.
maxHtlc := uint64(calculateMaxHtlc(chanAmt))
const aliceBaseFeeSat = 1
const aliceFeeRatePPM = 100000
updateChannelPolicy(
t, net.Alice, chanPointAlice, 1000, 100000,
lnd.DefaultBitcoinTimeLockDelta, maxHtlc, carol,
t, net.Alice, chanPointAlice, aliceBaseFeeSat*1000,
aliceFeeRatePPM, lnd.DefaultBitcoinTimeLockDelta, maxHtlc,
carol,
)
const daveBaseFeeSat = 5
const daveFeeRatePPM = 150000
updateChannelPolicy(
t, dave, chanPointDave, 5000, 150000,
t, dave, chanPointDave, daveBaseFeeSat*1000, daveFeeRatePPM,
lnd.DefaultBitcoinTimeLockDelta, maxHtlc, carol,
)
@ -224,11 +229,6 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
t.Fatalf("unable to send payments: %v", err)
}
// When asserting the amount of satoshis moved, we'll factor in the
// default base fee, as we didn't modify the fee structure when
// creating the seed nodes in the network.
const baseFee = 1
// At this point all the channels within our proto network should be
// shifted by 5k satoshis in the direction of Bob, the sink within the
// payment flow generated above. The order of asserts corresponds to
@ -237,7 +237,7 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
// Alice, David, Carol.
// The final node bob expects to get paid five times 1000 sat.
expectedAmountPaidAtoB := int64(5 * 1000)
expectedAmountPaidAtoB := int64(numPayments * paymentAmt)
assertAmountPaid(t, "Alice(local) => Bob(remote)", net.Bob,
aliceFundPoint, int64(0), expectedAmountPaidAtoB)
@ -246,7 +246,9 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
// To forward a payment of 1000 sat, Alice is charging a fee of
// 1 sat + 10% = 101 sat.
const expectedFeeAlice = 5 * 101
const aliceFeePerPayment = aliceBaseFeeSat +
(paymentAmt * aliceFeeRatePPM / 1_000_000)
const expectedFeeAlice = numPayments * aliceFeePerPayment
// Dave needs to pay what Alice pays plus Alice's fee.
expectedAmountPaidDtoA := expectedAmountPaidAtoB + expectedFeeAlice
@ -258,7 +260,10 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
// To forward a payment of 1101 sat, Dave is charging a fee of
// 5 sat + 15% = 170.15 sat. This is rounded down in rpcserver to 170.
const expectedFeeDave = 5 * 170
const davePaymentAmt = paymentAmt + aliceFeePerPayment
const daveFeePerPayment = daveBaseFeeSat +
(davePaymentAmt * daveFeeRatePPM / 1_000_000)
const expectedFeeDave = numPayments * daveFeePerPayment
// Carol needs to pay what Dave pays plus Dave's fee.
expectedAmountPaidCtoD := expectedAmountPaidDtoA + expectedFeeDave
@ -303,9 +308,10 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
if err != nil {
t.Fatalf("unable to query for fee report: %v", err)
}
if len(fwdingHistory.ForwardingEvents) != 5 {
if len(fwdingHistory.ForwardingEvents) != numPayments {
t.Fatalf("wrong number of forwarding event: expected %v, "+
"got %v", 5, len(fwdingHistory.ForwardingEvents))
"got %v", numPayments,
len(fwdingHistory.ForwardingEvents))
}
expectedForwardingFee := uint64(expectedFeeDave / numPayments)
for _, event := range fwdingHistory.ForwardingEvents {

@ -15,6 +15,7 @@ import (
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"sync"
"sync/atomic"
@ -2462,9 +2463,14 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
)
// Set up a new miner that we can use to cause a reorg.
args := []string{"--rejectnonstd", "--txindex"}
tempMiner, err := rpctest.New(harnessNetParams,
&rpcclient.NotificationHandlers{}, args)
args := []string{
"--rejectnonstd",
"--txindex",
"--nowinservice",
}
tempMiner, err := rpctest.New(
harnessNetParams, &rpcclient.NotificationHandlers{}, args,
)
if err != nil {
t.Fatalf("unable to create mining node: %v", err)
}
@ -15284,6 +15290,7 @@ func TestLightningNetworkDaemon(t *testing.T) {
"--debuglevel=debug",
"--logdir=" + minerLogDir,
"--trickleinterval=100ms",
"--nowinservice",
}
handlers := &rpcclient.NotificationHandlers{
OnTxAccepted: func(hash *chainhash.Hash, amt btcutil.Amount) {
@ -15329,11 +15336,24 @@ func TestLightningNetworkDaemon(t *testing.T) {
ht.Fatalf("unable to request transaction notifications: %v", err)
}
binary := itestLndBinary
if runtime.GOOS == "windows" {
// Windows (even in a bash like environment like git bash as on
// Travis) doesn't seem to like relative paths to exe files...
currentDir, err := os.Getwd()
if err != nil {
ht.Fatalf("unable to get working directory: %v", err)
}
targetPath := filepath.Join(currentDir, "../../lnd-itest.exe")
binary, err = filepath.Abs(targetPath)
if err != nil {
ht.Fatalf("unable to get absolute path: %v", err)
}
}
// Now we can set up our test harness (LND instance), with the chain
// backend we just created.
lndHarness, err = lntest.NewNetworkHarness(
miner, chainBackend, itestLndBinary,
)
lndHarness, err = lntest.NewNetworkHarness(miner, chainBackend, binary)
if err != nil {
ht.Fatalf("unable to create lightning network harness: %v", err)
}