Merge pull request #3087 from joostjager/move-itest

lntest: move itest into package
This commit is contained in:
Conner Fromknecht 2019-05-29 16:29:06 -07:00 committed by GitHub
commit f802ebddba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 192 additions and 177 deletions

4
.gitignore vendored
View File

@ -33,8 +33,8 @@ _testmain.go
# Integration test log files
output*.log
/.backendlogs
/.minerlogs
lntest/itest/.backendlogs
lntest/itest/.minerlogs
cmd/cmd
*.key

View File

@ -34,10 +34,17 @@ import (
)
const (
defaultBitcoinMinHTLCMSat = lnwire.MilliSatoshi(1000)
defaultBitcoinBaseFeeMSat = lnwire.MilliSatoshi(1000)
defaultBitcoinFeeRate = lnwire.MilliSatoshi(1)
defaultBitcoinTimeLockDelta = 40
defaultBitcoinMinHTLCMSat = lnwire.MilliSatoshi(1000)
// DefaultBitcoinBaseFeeMSat is the default forwarding base fee.
DefaultBitcoinBaseFeeMSat = lnwire.MilliSatoshi(1000)
// DefaultBitcoinFeeRate is the default forwarding fee rate.
DefaultBitcoinFeeRate = lnwire.MilliSatoshi(1)
// DefaultBitcoinTimeLockDelta is the default forwarding time lock
// delta.
DefaultBitcoinTimeLockDelta = 40
defaultLitecoinMinHTLCMSat = lnwire.MilliSatoshi(1000)
defaultLitecoinBaseFeeMSat = lnwire.MilliSatoshi(1000)

View File

@ -34,23 +34,27 @@ import (
)
const (
defaultConfigFilename = "lnd.conf"
defaultDataDirname = "data"
defaultChainSubDirname = "chain"
defaultGraphSubDirname = "graph"
defaultTLSCertFilename = "tls.cert"
defaultTLSKeyFilename = "tls.key"
defaultAdminMacFilename = "admin.macaroon"
defaultReadMacFilename = "readonly.macaroon"
defaultInvoiceMacFilename = "invoice.macaroon"
defaultLogLevel = "info"
defaultLogDirname = "logs"
defaultLogFilename = "lnd.log"
defaultRPCPort = 10009
defaultRESTPort = 8080
defaultPeerPort = 9735
defaultRPCHost = "localhost"
defaultMaxPendingChannels = 1
defaultConfigFilename = "lnd.conf"
defaultDataDirname = "data"
defaultChainSubDirname = "chain"
defaultGraphSubDirname = "graph"
defaultTLSCertFilename = "tls.cert"
defaultTLSKeyFilename = "tls.key"
defaultAdminMacFilename = "admin.macaroon"
defaultReadMacFilename = "readonly.macaroon"
defaultInvoiceMacFilename = "invoice.macaroon"
defaultLogLevel = "info"
defaultLogDirname = "logs"
defaultLogFilename = "lnd.log"
defaultRPCPort = 10009
defaultRESTPort = 8080
defaultPeerPort = 9735
defaultRPCHost = "localhost"
// DefaultMaxPendingChannels is the default maximum number of incoming
// pending channels permitted per peer.
DefaultMaxPendingChannels = 1
defaultNoSeedBackup = false
defaultTrickleDelay = 90 * 1000
defaultChanStatusSampleInterval = time.Minute
@ -68,14 +72,14 @@ const (
defaultTorV2PrivateKeyFilename = "v2_onion_private_key"
defaultTorV3PrivateKeyFilename = "v3_onion_private_key"
// defaultIncomingBroadcastDelta defines the number of blocks before the
// DefaultIncomingBroadcastDelta defines the number of blocks before the
// expiry of an incoming htlc at which we force close the channel. We
// only go to chain if we also have the preimage to actually pull in the
// htlc. BOLT #2 suggests 7 blocks. We use a few more for extra safety.
// Within this window we need to get our sweep or 2nd level success tx
// confirmed, because after that the remote party is also able to claim
// the htlc using the timeout path.
defaultIncomingBroadcastDelta = 10
DefaultIncomingBroadcastDelta = 10
// defaultFinalCltvRejectDelta defines the number of blocks before the
// expiry of an incoming exit hop htlc at which we cancel it back
@ -90,9 +94,9 @@ const (
// window, we may still force close the channel. There is currently no
// way to reject an UpdateAddHtlc of which we already know that it will
// push us in the broadcast window.
defaultFinalCltvRejectDelta = defaultIncomingBroadcastDelta + 3
defaultFinalCltvRejectDelta = DefaultIncomingBroadcastDelta + 3
// defaultOutgoingBroadcastDelta defines the number of blocks before the
// DefaultOutgoingBroadcastDelta defines the number of blocks before the
// expiry of an outgoing htlc at which we force close the channel. We
// are not in a hurry to force close, because there is nothing to claim
// for us. We do need to time the htlc out, because there may be an
@ -100,7 +104,7 @@ const (
// a value of -1 here, but we allow one block less to prevent potential
// confusion around the negative value. It means we force close the
// channel at exactly the htlc expiry height.
defaultOutgoingBroadcastDelta = 0
DefaultOutgoingBroadcastDelta = 0
// defaultOutgoingCltvRejectDelta defines the number of blocks before
// the expiry of an outgoing htlc at which we don't want to offer it to
@ -111,7 +115,7 @@ const (
// value of 0. We pad it a bit, to prevent a slow round trip to the next
// peer and a block arriving during that round trip to trigger force
// closure.
defaultOutgoingCltvRejectDelta = defaultOutgoingBroadcastDelta + 3
defaultOutgoingCltvRejectDelta = DefaultOutgoingBroadcastDelta + 3
// minTimeLockDelta is the minimum timelock we require for incoming
// HTLCs on our channels.
@ -330,9 +334,9 @@ func loadConfig() (*config, error) {
MaxLogFileSize: defaultMaxLogFileSize,
Bitcoin: &chainConfig{
MinHTLC: defaultBitcoinMinHTLCMSat,
BaseFee: defaultBitcoinBaseFeeMSat,
FeeRate: defaultBitcoinFeeRate,
TimeLockDelta: defaultBitcoinTimeLockDelta,
BaseFee: DefaultBitcoinBaseFeeMSat,
FeeRate: DefaultBitcoinFeeRate,
TimeLockDelta: DefaultBitcoinTimeLockDelta,
Node: "btcd",
},
BtcdMode: &btcdConfig{
@ -360,7 +364,7 @@ func loadConfig() (*config, error) {
Dir: defaultLitecoindDir,
RPCHost: defaultRPCHost,
},
MaxPendingChannels: defaultMaxPendingChannels,
MaxPendingChannels: DefaultMaxPendingChannels,
NoSeedBackup: defaultNoSeedBackup,
MinBackoff: defaultMinBackoff,
MaxBackoff: defaultMaxBackoff,
@ -371,7 +375,7 @@ func loadConfig() (*config, error) {
MaxChannels: 5,
Allocation: 0.6,
MinChannelSize: int64(minChanFundingSize),
MaxChannelSize: int64(maxFundingAmount),
MaxChannelSize: int64(MaxFundingAmount),
Heuristic: map[string]float64{
"preferential": 1.0,
},
@ -535,8 +539,8 @@ func loadConfig() (*config, error) {
if cfg.Autopilot.MinChannelSize < int64(minChanFundingSize) {
cfg.Autopilot.MinChannelSize = int64(minChanFundingSize)
}
if cfg.Autopilot.MaxChannelSize > int64(maxFundingAmount) {
cfg.Autopilot.MaxChannelSize = int64(maxFundingAmount)
if cfg.Autopilot.MaxChannelSize > int64(MaxFundingAmount) {
cfg.Autopilot.MaxChannelSize = int64(MaxFundingAmount)
}
if _, err := validateAtplCfg(cfg.Autopilot); err != nil {
@ -720,8 +724,8 @@ func loadConfig() (*config, error) {
// Finally we'll register the litecoin chain as our current
// primary chain.
registeredChains.RegisterPrimaryChain(litecoinChain)
maxFundingAmount = maxLtcFundingAmount
maxPaymentMSat = maxLtcPaymentMSat
MaxFundingAmount = maxLtcFundingAmount
MaxPaymentMSat = maxLtcPaymentMSat
case cfg.Bitcoin.Active:
// Multiple networks can't be selected simultaneously. Count
@ -855,8 +859,8 @@ func loadConfig() (*config, error) {
if cfg.Autopilot.MinChannelSize < int64(minChanFundingSize) {
cfg.Autopilot.MinChannelSize = int64(minChanFundingSize)
}
if cfg.Autopilot.MaxChannelSize > int64(maxFundingAmount) {
cfg.Autopilot.MaxChannelSize = int64(maxFundingAmount)
if cfg.Autopilot.MaxChannelSize > int64(MaxFundingAmount) {
cfg.Autopilot.MaxChannelSize = int64(MaxFundingAmount)
}
// Validate profile port number.

View File

@ -57,21 +57,21 @@ const (
// created over the RPC interface.
minChanFundingSize = btcutil.Amount(20000)
// maxBtcFundingAmount is a soft-limit of the maximum channel size
// MaxBtcFundingAmount is a soft-limit of the maximum channel size
// currently accepted on the Bitcoin chain within the Lightning
// Protocol. This limit is defined in BOLT-0002, and serves as an
// initial precautionary limit while implementations are battle tested
// in the real world.
maxBtcFundingAmount = btcutil.Amount(1<<24) - 1
MaxBtcFundingAmount = btcutil.Amount(1<<24) - 1
// maxLtcFundingAmount is a soft-limit of the maximum channel size
// currently accepted on the Litecoin chain within the Lightning
// Protocol.
maxLtcFundingAmount = maxBtcFundingAmount * btcToLtcConversionRate
maxLtcFundingAmount = MaxBtcFundingAmount * btcToLtcConversionRate
)
var (
// maxFundingAmount is a soft-limit of the maximum channel size
// MaxFundingAmount is a soft-limit of the maximum channel size
// currently accepted within the Lightning Protocol. This limit is
// defined in BOLT-0002, and serves as an initial precautionary limit
// while implementations are battle tested in the real world.
@ -80,7 +80,7 @@ var (
// to the value under the Bitcoin chain as default.
//
// TODO(roasbeef): add command line param to modify
maxFundingAmount = maxBtcFundingAmount
MaxFundingAmount = MaxBtcFundingAmount
// ErrFundingManagerShuttingDown is an error returned when attempting to
// process a funding request/message but the funding manager has already
@ -1023,7 +1023,7 @@ func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) {
// We'll reject any request to create a channel that's above the
// current soft-limit for channel size.
if msg.FundingAmount > maxFundingAmount {
if msg.FundingAmount > MaxFundingAmount {
f.failFundingFlow(
fmsg.peer, fmsg.msg.PendingChannelID,
lnwire.ErrChanTooLarge,

View File

@ -1028,7 +1028,7 @@ func assertHandleFundingLocked(t *testing.T, alice, bob *testNode) {
}
func TestFundingManagerNormalWorkflow(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// We will consume the channel updates as we go, so no buffering is needed.
@ -1101,7 +1101,7 @@ func TestFundingManagerNormalWorkflow(t *testing.T) {
}
func TestFundingManagerRestartBehavior(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// Run through the process of opening the channel, up until the funding
@ -1239,7 +1239,7 @@ func TestFundingManagerRestartBehavior(t *testing.T) {
// server to notify when the peer comes online, in case sending the
// fundingLocked message fails the first time.
func TestFundingManagerOfflinePeer(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// Run through the process of opening the channel, up until the funding
@ -1373,7 +1373,7 @@ func TestFundingManagerOfflinePeer(t *testing.T) {
// will properly clean up a zombie reservation that times out after the
// initFundingMsg has been handled.
func TestFundingManagerPeerTimeoutAfterInitFunding(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// We will consume the channel updates as we go, so no buffering is needed.
@ -1433,7 +1433,7 @@ func TestFundingManagerPeerTimeoutAfterInitFunding(t *testing.T) {
// will properly clean up a zombie reservation that times out after the
// fundingOpenMsg has been handled.
func TestFundingManagerPeerTimeoutAfterFundingOpen(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// We will consume the channel updates as we go, so no buffering is needed.
@ -1502,7 +1502,7 @@ func TestFundingManagerPeerTimeoutAfterFundingOpen(t *testing.T) {
// will properly clean up a zombie reservation that times out after the
// fundingAcceptMsg has been handled.
func TestFundingManagerPeerTimeoutAfterFundingAccept(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// We will consume the channel updates as we go, so no buffering is needed.
@ -1576,7 +1576,7 @@ func TestFundingManagerPeerTimeoutAfterFundingAccept(t *testing.T) {
}
func TestFundingManagerFundingTimeout(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// We will consume the channel updates as we go, so no buffering is needed.
@ -1621,7 +1621,7 @@ func TestFundingManagerFundingTimeout(t *testing.T) {
// the channel initiator, that it does not timeout when the lnd restarts.
func TestFundingManagerFundingNotTimeoutInitiator(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// We will consume the channel updates as we go, so no buffering is needed.
@ -1688,7 +1688,7 @@ func TestFundingManagerFundingNotTimeoutInitiator(t *testing.T) {
// continues to operate as expected in case we receive a duplicate fundingLocked
// message.
func TestFundingManagerReceiveFundingLockedTwice(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// We will consume the channel updates as we go, so no buffering is needed.
@ -1780,7 +1780,7 @@ func TestFundingManagerReceiveFundingLockedTwice(t *testing.T) {
// handles receiving a fundingLocked after the its own fundingLocked and channel
// announcement is sent and gets restarted.
func TestFundingManagerRestartAfterChanAnn(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// We will consume the channel updates as we go, so no buffering is needed.
@ -1857,7 +1857,7 @@ func TestFundingManagerRestartAfterChanAnn(t *testing.T) {
// fundingManager continues to operate as expected after it has received
// fundingLocked and then gets restarted.
func TestFundingManagerRestartAfterReceivingFundingLocked(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// We will consume the channel updates as we go, so no buffering is needed.
@ -1930,7 +1930,7 @@ func TestFundingManagerRestartAfterReceivingFundingLocked(t *testing.T) {
// (a channel not supposed to be announced to the rest of the network),
// the announcementSignatures nor the nodeAnnouncement messages are sent.
func TestFundingManagerPrivateChannel(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// We will consume the channel updates as we go, so no buffering is needed.
@ -2032,7 +2032,7 @@ func TestFundingManagerPrivateChannel(t *testing.T) {
// announcement signatures nor the node announcement messages are sent upon
// restart.
func TestFundingManagerPrivateRestart(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// We will consume the channel updates as we go, so no buffering is needed.
@ -2154,7 +2154,7 @@ func TestFundingManagerPrivateRestart(t *testing.T) {
// TestFundingManagerCustomChannelParameters checks that custom requirements we
// specify during the channel funding flow is preserved correcly on both sides.
func TestFundingManagerCustomChannelParameters(t *testing.T) {
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// This is the custom parameters we'll use.
@ -2544,7 +2544,7 @@ func TestFundingManagerMaxPendingChannels(t *testing.T) {
// option, namely that non-zero incoming push amounts are disabled.
func TestFundingManagerRejectPush(t *testing.T) {
// Enable 'rejectpush' option and initialize funding managers.
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
rejectPush := cfg.RejectPush
cfg.RejectPush = true
defer func() {
@ -2606,7 +2606,7 @@ func TestFundingManagerRejectPush(t *testing.T) {
func TestFundingManagerMaxConfs(t *testing.T) {
t.Parallel()
alice, bob := setupFundingManagers(t, defaultMaxPendingChannels)
alice, bob := setupFundingManagers(t, DefaultMaxPendingChannels)
defer tearDownFundingManagers(t, alice, bob)
// Create a funding request and start the workflow.

View File

@ -1,6 +1,6 @@
// +build rpctest
package lnd
package itest
import (
"context"
@ -9,6 +9,7 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lntest"
@ -125,7 +126,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest)
// We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive.
numBlocks := uint32(invoiceReq.CltvExpiry -
defaultIncomingBroadcastDelta)
lnd.DefaultIncomingBroadcastDelta)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
@ -136,7 +137,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint)
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}

View File

@ -1,6 +1,6 @@
// +build rpctest
package lnd
package itest
import (
"context"
@ -10,6 +10,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lntest"
@ -114,7 +115,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest)
// chain in order to sweep her HTLC since the value is high enough.
// TODO(roasbeef): modify once go to chain policy changes
numBlocks := uint32(
invoiceReq.CltvExpiry - defaultIncomingBroadcastDelta,
invoiceReq.CltvExpiry - lnd.DefaultIncomingBroadcastDelta,
)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
@ -127,7 +128,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest)
t.Fatalf("expected transaction not found in mempool: %v", err)
}
bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint)
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}

View File

@ -1,6 +1,6 @@
// +build rpctest
package lnd
package itest
import (
"context"
@ -9,6 +9,7 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lntest"
@ -138,7 +139,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
// We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive.
numBlocks := uint32(invoiceReq.CltvExpiry-
defaultIncomingBroadcastDelta) - defaultCSV
lnd.DefaultIncomingBroadcastDelta) - defaultCSV
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
@ -149,7 +150,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint)
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}

View File

@ -1,6 +1,6 @@
// +build rpctest
package lnd
package itest
import (
"bytes"
@ -29,6 +29,7 @@ import (
"github.com/btcsuite/btcutil"
"github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntest"
@ -131,7 +132,7 @@ func assertTxInBlock(t *harnessTest, block *wire.MsgBlock, txid *chainhash.Hash)
}
func rpcPointToWirePoint(t *harnessTest, chanPoint *lnrpc.ChannelPoint) wire.OutPoint {
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -212,7 +213,7 @@ func openChannelAndAssert(ctx context.Context, t *harnessTest,
if err != nil {
t.Fatalf("error while waiting for channel open: %v", err)
}
fundingTxID, err := getChanPointFundingTxid(fundingChanPoint)
fundingTxID, err := lnd.GetChanPointFundingTxid(fundingChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -313,7 +314,7 @@ func assertChannelClosed(ctx context.Context, t *harnessTest,
fundingChanPoint *lnrpc.ChannelPoint,
closeUpdates lnrpc.Lightning_CloseChannelClient) *chainhash.Hash {
txid, err := getChanPointFundingTxid(fundingChanPoint)
txid, err := lnd.GetChanPointFundingTxid(fundingChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -381,7 +382,7 @@ func assertChannelClosed(ctx context.Context, t *harnessTest,
func waitForChannelPendingForceClose(ctx context.Context,
node *lntest.HarnessNode, fundingChanPoint *lnrpc.ChannelPoint) error {
txid, err := getChanPointFundingTxid(fundingChanPoint)
txid, err := lnd.GetChanPointFundingTxid(fundingChanPoint)
if err != nil {
return err
}
@ -921,7 +922,7 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) {
func testBasicChannelFunding(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
chanAmt := maxBtcFundingAmount
chanAmt := lnd.MaxBtcFundingAmount
pushAmt := btcutil.Amount(100000)
// First establish a channel with a capacity of 0.5 BTC between Alice
@ -984,7 +985,7 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
const (
chanAmt = maxBtcFundingAmount
chanAmt = lnd.MaxBtcFundingAmount
pushAmt = btcutil.Amount(100000)
)
@ -1089,7 +1090,7 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) {
// txStr returns the string representation of the channel's funding transaction.
func txStr(chanPoint *lnrpc.ChannelPoint) string {
fundingTxID, err := getChanPointFundingTxid(chanPoint)
fundingTxID, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
return ""
}
@ -1292,7 +1293,7 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) {
const (
defaultFeeBase = 1000
defaultFeeRate = 1
defaultTimeLockDelta = defaultBitcoinTimeLockDelta
defaultTimeLockDelta = lnd.DefaultBitcoinTimeLockDelta
defaultMinHtlc = 1000
)
@ -1304,7 +1305,7 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) {
bobSub := subscribeGraphNotifications(t, ctxb, net.Bob)
defer close(bobSub.quit)
chanAmt := maxBtcFundingAmount
chanAmt := lnd.MaxBtcFundingAmount
pushAmt := chanAmt / 2
// Create a channel Alice->Bob.
@ -1838,7 +1839,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
// Create a new channel that requires 1 confs before it's considered
// open, then broadcast the funding transaction
chanAmt := maxBtcFundingAmount
chanAmt := lnd.MaxBtcFundingAmount
pushAmt := btcutil.Amount(0)
ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout)
pendingUpdate, err := net.OpenPendingChannel(ctxt, net.Alice, net.Bob,
@ -2025,7 +2026,7 @@ func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) {
// Check existing connection.
assertNumConnections(t, net.Alice, net.Bob, 1)
chanAmt := maxBtcFundingAmount
chanAmt := lnd.MaxBtcFundingAmount
pushAmt := btcutil.Amount(0)
// Create a new channel that requires 1 confs before it's considered
@ -2155,7 +2156,7 @@ func testDisconnectingTargetPeer(net *lntest.NetworkHarness, t *harnessTest) {
func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
chanAmt := maxBtcFundingAmount
chanAmt := lnd.MaxBtcFundingAmount
pushAmt := btcutil.Amount(0)
// As we need to create a channel that requires more than 1
@ -2287,7 +2288,7 @@ func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) {
// Open a channel with 0.16 BTC between Alice and Bob, ensuring the
// channel has been opened properly.
amount := maxBtcFundingAmount
amount := lnd.MaxBtcFundingAmount
// Creates a helper closure to be used below which asserts the proper
// response to a channel balance RPC.
@ -2420,7 +2421,7 @@ func testChannelUnsettledBalance(net *lntest.NetworkHarness, t *harnessTest) {
Dest: carolPubKey,
Amt: int64(payAmt),
PaymentHash: makeFakePayHash(t),
FinalCltvDelta: defaultBitcoinTimeLockDelta,
FinalCltvDelta: lnd.DefaultBitcoinTimeLockDelta,
})
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
@ -2618,7 +2619,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) {
// TODO(roasbeef): should check default value in config here
// instead, or make delay a param
defaultCLTV := uint32(defaultBitcoinTimeLockDelta)
defaultCLTV := uint32(lnd.DefaultBitcoinTimeLockDelta)
// Since we'd like to test failure scenarios with outstanding htlcs,
// we'll introduce another node into our test network: Carol.
@ -2687,7 +2688,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) {
Dest: carolPubKey,
Amt: int64(paymentAmt),
PaymentHash: makeFakePayHash(t),
FinalCltvDelta: defaultBitcoinTimeLockDelta,
FinalCltvDelta: lnd.DefaultBitcoinTimeLockDelta,
})
if err != nil {
t.Fatalf("unable to send alice htlc: %v", err)
@ -2759,7 +2760,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) {
// Compute the outpoint of the channel, which we will use repeatedly to
// locate the pending channel information in the rpc responses.
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -3979,7 +3980,7 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
)
networkChans = append(networkChans, chanPointAlice)
aliceChanTXID, err := getChanPointFundingTxid(chanPointAlice)
aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4018,7 +4019,7 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
},
)
networkChans = append(networkChans, chanPointDave)
daveChanTXID, err := getChanPointFundingTxid(chanPointDave)
daveChanTXID, err := lnd.GetChanPointFundingTxid(chanPointDave)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4053,7 +4054,7 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
)
networkChans = append(networkChans, chanPointCarol)
carolChanTXID, err := getChanPointFundingTxid(chanPointCarol)
carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4067,7 +4068,7 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
nodeNames := []string{"Alice", "Bob", "Carol", "Dave"}
for _, chanPoint := range networkChans {
for i, node := range nodes {
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4118,12 +4119,12 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) {
// possible to pick up more subtle fee calculation errors.
updateChannelPolicy(
t, net.Alice, chanPointAlice, 1000, 100000,
defaultBitcoinTimeLockDelta, carol,
lnd.DefaultBitcoinTimeLockDelta, carol,
)
updateChannelPolicy(
t, dave, chanPointDave, 5000, 150000,
defaultBitcoinTimeLockDelta, carol,
lnd.DefaultBitcoinTimeLockDelta, carol,
)
// Using Carol as the source, pay to the 5 invoices from Bob created
@ -4257,7 +4258,7 @@ func testSingleHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) {
)
networkChans = append(networkChans, chanPointAlice)
aliceChanTXID, err := getChanPointFundingTxid(chanPointAlice)
aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4271,7 +4272,7 @@ func testSingleHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) {
nodeNames := []string{"Alice", "Bob"}
for _, chanPoint := range networkChans {
for i, node := range nodes {
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4298,7 +4299,7 @@ func testSingleHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) {
routesReq := &lnrpc.QueryRoutesRequest{
PubKey: net.Bob.PubKeyStr,
Amt: paymentAmt,
FinalCltvDelta: defaultBitcoinTimeLockDelta,
FinalCltvDelta: lnd.DefaultBitcoinTimeLockDelta,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
routes, err := net.Alice.QueryRoutes(ctxt, routesReq)
@ -4393,7 +4394,7 @@ func testMultiHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) {
)
networkChans = append(networkChans, chanPointAlice)
aliceChanTXID, err := getChanPointFundingTxid(chanPointAlice)
aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4428,7 +4429,7 @@ func testMultiHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) {
},
)
networkChans = append(networkChans, chanPointBob)
bobChanTXID, err := getChanPointFundingTxid(chanPointBob)
bobChanTXID, err := lnd.GetChanPointFundingTxid(chanPointBob)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4442,7 +4443,7 @@ func testMultiHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) {
nodeNames := []string{"Alice", "Bob", "Carol"}
for _, chanPoint := range networkChans {
for i, node := range nodes {
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4469,7 +4470,7 @@ func testMultiHopSendToRoute(net *lntest.NetworkHarness, t *harnessTest) {
routesReq := &lnrpc.QueryRoutesRequest{
PubKey: carol.PubKeyStr,
Amt: paymentAmt,
FinalCltvDelta: defaultBitcoinTimeLockDelta,
FinalCltvDelta: lnd.DefaultBitcoinTimeLockDelta,
}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
routes, err := net.Alice.QueryRoutes(ctxt, routesReq)
@ -4690,7 +4691,7 @@ func testSendToRouteErrorPropagation(net *lntest.NetworkHarness, t *harnessTest)
func testUnannouncedChannels(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
amount := maxBtcFundingAmount
amount := lnd.MaxBtcFundingAmount
// Open a channel between Alice and Bob, ensuring the
// channel has been opened properly.
@ -4825,7 +4826,7 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) {
)
networkChans = append(networkChans, chanPointAlice)
aliceChanTXID, err := getChanPointFundingTxid(chanPointAlice)
aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4858,7 +4859,7 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) {
},
)
networkChans = append(networkChans, chanPointDave)
daveChanTXID, err := getChanPointFundingTxid(chanPointDave)
daveChanTXID, err := lnd.GetChanPointFundingTxid(chanPointDave)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4893,7 +4894,7 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) {
)
networkChans = append(networkChans, chanPointCarol)
carolChanTXID, err := getChanPointFundingTxid(chanPointCarol)
carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4908,7 +4909,7 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) {
nodeNames := []string{"Alice", "Bob", "Carol", "Dave"}
for _, chanPoint := range networkChans {
for i, node := range nodes {
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -4953,7 +4954,7 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) {
if err != nil {
t.Fatalf("error while waiting for channel open: %v", err)
}
fundingTxID, err := getChanPointFundingTxid(chanPointPrivate)
fundingTxID, err := lnd.GetChanPointFundingTxid(chanPointPrivate)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -5392,7 +5393,7 @@ func testMultiHopOverPrivateChannels(net *lntest.NetworkHarness, t *harnessTest)
}
// Retrieve Alice's funding outpoint.
aliceChanTXID, err := getChanPointFundingTxid(chanPointAlice)
aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -5441,7 +5442,7 @@ func testMultiHopOverPrivateChannels(net *lntest.NetworkHarness, t *harnessTest)
}
// Retrieve Bob's funding outpoint.
bobChanTXID, err := getChanPointFundingTxid(chanPointBob)
bobChanTXID, err := lnd.GetChanPointFundingTxid(chanPointBob)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -5496,7 +5497,7 @@ func testMultiHopOverPrivateChannels(net *lntest.NetworkHarness, t *harnessTest)
}
// Retrieve Carol's funding point.
carolChanTXID, err := getChanPointFundingTxid(chanPointCarol)
carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -5922,7 +5923,7 @@ func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTe
ctxb := context.Background()
const (
numChannels = 2
amount = maxBtcFundingAmount
amount = lnd.MaxBtcFundingAmount
)
// Let Bob subscribe to channel notifications.
@ -6034,8 +6035,8 @@ func testBasicChannelCreationAndUpdates(net *lntest.NetworkHarness, t *harnessTe
func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
maxPendingChannels := defaultMaxPendingChannels + 1
amount := maxBtcFundingAmount
maxPendingChannels := lnd.DefaultMaxPendingChannels + 1
amount := lnd.MaxBtcFundingAmount
// Create a new node (Carol) with greater number of max pending
// channels.
@ -6111,7 +6112,7 @@ func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) {
t.Fatalf("error while waiting for channel open: %v", err)
}
fundingTxID, err := getChanPointFundingTxid(fundingChanPoint)
fundingTxID, err := lnd.GetChanPointFundingTxid(fundingChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -6202,7 +6203,7 @@ func testFailingChannel(net *lntest.NetworkHarness, t *harnessTest) {
paymentAmt = 10000
)
chanAmt := maxFundingAmount
chanAmt := lnd.MaxFundingAmount
// We'll introduce Carol, which will settle any incoming invoice with a
// totally unrelated preimage.
@ -6578,7 +6579,7 @@ func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) {
// We'll need to mine some blocks in order to mark the channel fully
// closed.
_, err = net.Miner.Node.Generate(defaultBitcoinTimeLockDelta - defaultCSV)
_, err = net.Miner.Node.Generate(lnd.DefaultBitcoinTimeLockDelta - defaultCSV)
if err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
@ -6659,7 +6660,7 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
const (
chanAmt = maxBtcFundingAmount
chanAmt = lnd.MaxBtcFundingAmount
paymentAmt = 10000
numInvoices = 6
)
@ -6914,7 +6915,7 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness
ctxb := context.Background()
const (
chanAmt = maxBtcFundingAmount
chanAmt = lnd.MaxBtcFundingAmount
paymentAmt = 10000
numInvoices = 6
)
@ -7160,7 +7161,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
ctxb := context.Background()
const (
chanAmt = maxBtcFundingAmount
chanAmt = lnd.MaxBtcFundingAmount
pushAmt = 200000
paymentAmt = 10000
numInvoices = 6
@ -7206,7 +7207,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
// In order to test Dave's response to an uncooperative channel closure
// by Carol, we'll first open up a channel between them with a
// maxBtcFundingAmount (2^24) satoshis value.
// lnd.MaxBtcFundingAmount (2^24) satoshis value.
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
chanPoint := openChannelAndAssert(
ctxt, t, net, dave, carol,
@ -7717,7 +7718,7 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest,
func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
const (
chanAmt = maxBtcFundingAmount
chanAmt = lnd.MaxBtcFundingAmount
paymentAmt = 10000
numInvoices = 6
)
@ -8069,7 +8070,7 @@ func testHtlcErrorPropagation(net *lntest.NetworkHarness, t *harnessTest) {
// In this test we wish to exercise the daemon's correct parsing,
// handling, and propagation of errors that occur while processing a
// multi-hop payment.
const chanAmt = maxBtcFundingAmount
const chanAmt = lnd.MaxBtcFundingAmount
// First establish a channel with a capacity of 0.5 BTC between Alice
// and Bob.
@ -8124,7 +8125,7 @@ func testHtlcErrorPropagation(net *lntest.NetworkHarness, t *harnessTest) {
t.Fatalf("unable to connect bob to carol: %v", err)
}
ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout)
const bobChanAmt = maxBtcFundingAmount
const bobChanAmt = lnd.MaxBtcFundingAmount
chanPointBob := openChannelAndAssert(
ctxt, t, net, net.Bob, carol,
lntest.OpenChannelParams{
@ -8282,7 +8283,7 @@ out:
// We'll send in chunks of the max payment amount. If we're
// about to send too much, then we'll only send the amount
// remaining.
toSend := int64(maxPaymentMSat.ToSatoshis())
toSend := int64(lnd.MaxPaymentMSat.ToSatoshis())
if toSend+amtSent > amtToSend {
toSend = amtToSend - amtSent
}
@ -8460,7 +8461,7 @@ func subscribeGraphNotifications(t *harnessTest, ctxb context.Context,
func testGraphTopologyNotifications(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
const chanAmt = maxBtcFundingAmount
const chanAmt = lnd.MaxBtcFundingAmount
// Let Alice subscribe to graph notifications.
graphSub := subscribeGraphNotifications(
@ -8558,11 +8559,11 @@ out:
"expected %v, got %v", blockHeight+1,
closedChan.ClosedHeight)
}
chanPointTxid, err := getChanPointFundingTxid(chanPoint)
chanPointTxid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
closedChanTxid, err := getChanPointFundingTxid(
closedChanTxid, err := lnd.GetChanPointFundingTxid(
closedChan.ChanPoint,
)
if err != nil {
@ -8780,7 +8781,7 @@ func testNodeAnnouncement(net *lntest.NetworkHarness, t *harnessTest) {
func testNodeSignVerify(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
chanAmt := maxBtcFundingAmount
chanAmt := lnd.MaxBtcFundingAmount
pushAmt := btcutil.Amount(100000)
// Create a channel between alice and bob.
@ -9497,13 +9498,13 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) {
// commitment transaction due to the fact that the HTLC is about to
// timeout. With the default outgoing broadcast delta of zero, this will
// be the same height as the htlc expiry height.
numBlocks := uint32(finalCltvDelta - defaultOutgoingBroadcastDelta)
numBlocks := uint32(finalCltvDelta - lnd.DefaultOutgoingBroadcastDelta)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// Bob's force close transaction should now be found in the mempool.
bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint)
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -10160,7 +10161,7 @@ func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) {
)
networkChans = append(networkChans, chanPointAlice)
aliceChanTXID, err := getChanPointFundingTxid(chanPointAlice)
aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -10200,7 +10201,7 @@ func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) {
},
)
networkChans = append(networkChans, chanPointDave)
daveChanTXID, err := getChanPointFundingTxid(chanPointDave)
daveChanTXID, err := lnd.GetChanPointFundingTxid(chanPointDave)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -10237,7 +10238,7 @@ func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) {
)
networkChans = append(networkChans, chanPointCarol)
carolChanTXID, err := getChanPointFundingTxid(chanPointCarol)
carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -10251,7 +10252,7 @@ func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) {
nodeNames := []string{"Alice", "Bob", "Carol", "Dave"}
for _, chanPoint := range networkChans {
for i, node := range nodes {
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -10483,7 +10484,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) {
)
networkChans = append(networkChans, chanPointAlice)
aliceChanTXID, err := getChanPointFundingTxid(chanPointAlice)
aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -10523,7 +10524,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) {
},
)
networkChans = append(networkChans, chanPointDave)
daveChanTXID, err := getChanPointFundingTxid(chanPointDave)
daveChanTXID, err := lnd.GetChanPointFundingTxid(chanPointDave)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -10560,7 +10561,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) {
)
networkChans = append(networkChans, chanPointCarol)
carolChanTXID, err := getChanPointFundingTxid(chanPointCarol)
carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -10574,7 +10575,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) {
nodeNames := []string{"Alice", "Bob", "Carol", "Dave"}
for _, chanPoint := range networkChans {
for i, node := range nodes {
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -10813,7 +10814,7 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness
)
networkChans = append(networkChans, chanPointAlice)
aliceChanTXID, err := getChanPointFundingTxid(chanPointAlice)
aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -10854,7 +10855,7 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness
)
networkChans = append(networkChans, chanPointDave)
daveChanTXID, err := getChanPointFundingTxid(chanPointDave)
daveChanTXID, err := lnd.GetChanPointFundingTxid(chanPointDave)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -10891,7 +10892,7 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness
)
networkChans = append(networkChans, chanPointCarol)
carolChanTXID, err := getChanPointFundingTxid(chanPointCarol)
carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -10905,7 +10906,7 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness
nodeNames := []string{"Alice", "Bob", "Carol", "Dave"}
for _, chanPoint := range networkChans {
for i, node := range nodes {
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -11150,7 +11151,7 @@ func testSwitchOfflineDeliveryOutgoingOffline(
)
networkChans = append(networkChans, chanPointAlice)
aliceChanTXID, err := getChanPointFundingTxid(chanPointAlice)
aliceChanTXID, err := lnd.GetChanPointFundingTxid(chanPointAlice)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -11190,7 +11191,7 @@ func testSwitchOfflineDeliveryOutgoingOffline(
},
)
networkChans = append(networkChans, chanPointDave)
daveChanTXID, err := getChanPointFundingTxid(chanPointDave)
daveChanTXID, err := lnd.GetChanPointFundingTxid(chanPointDave)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -11225,7 +11226,7 @@ func testSwitchOfflineDeliveryOutgoingOffline(
)
networkChans = append(networkChans, chanPointCarol)
carolChanTXID, err := getChanPointFundingTxid(chanPointCarol)
carolChanTXID, err := lnd.GetChanPointFundingTxid(chanPointCarol)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -11239,7 +11240,7 @@ func testSwitchOfflineDeliveryOutgoingOffline(
nodeNames := []string{"Alice", "Bob", "Carol", "Dave"}
for _, chanPoint := range networkChans {
for i, node := range nodes {
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -11487,7 +11488,7 @@ func testQueryRoutes(net *lntest.NetworkHarness, t *harnessTest) {
nodeNames := []string{"Alice", "Bob", "Carol", "Dave"}
for _, chanPoint := range networkChans {
for i, node := range nodes {
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -11699,7 +11700,7 @@ func testRouteFeeCutoff(net *lntest.NetworkHarness, t *harnessTest) {
}
for _, chanPoint := range networkChans {
for i, node := range nodes {
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := lnd.GetChanPointFundingTxid(chanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
@ -11725,7 +11726,7 @@ func testRouteFeeCutoff(net *lntest.NetworkHarness, t *harnessTest) {
// Alice -> Carol -> Dave
baseFee := int64(10000)
feeRate := int64(5)
timeLockDelta := uint32(defaultBitcoinTimeLockDelta)
timeLockDelta := uint32(lnd.DefaultBitcoinTimeLockDelta)
expectedPolicy := &lnrpc.RoutingPolicy{
FeeBaseMsat: baseFee,
@ -11984,9 +11985,9 @@ func testSendUpdateDisableChannel(net *lntest.NetworkHarness, t *harnessTest) {
// We should expect to see a channel update with the default routing
// policy, except that it should indicate the channel is disabled.
expectedPolicy := &lnrpc.RoutingPolicy{
FeeBaseMsat: int64(defaultBitcoinBaseFeeMSat),
FeeRateMilliMsat: int64(defaultBitcoinFeeRate),
TimeLockDelta: defaultBitcoinTimeLockDelta,
FeeBaseMsat: int64(lnd.DefaultBitcoinBaseFeeMSat),
FeeRateMilliMsat: int64(lnd.DefaultBitcoinFeeRate),
TimeLockDelta: lnd.DefaultBitcoinTimeLockDelta,
MinHtlc: 1000, // default value
Disabled: true,
}
@ -12125,7 +12126,7 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) {
// First establish a channel between Alice and Bob.
channelParam := lntest.OpenChannelParams{
Amt: maxBtcFundingAmount,
Amt: lnd.MaxBtcFundingAmount,
PushAmt: btcutil.Amount(100000),
}

View File

@ -316,7 +316,7 @@ func (hn *HarnessNode) start(lndError chan<- error) error {
args := hn.cfg.genArgs()
args = append(args, fmt.Sprintf("--profile=%d", 9000+hn.NodeID))
hn.cmd = exec.Command("./lnd-itest", args...)
hn.cmd = exec.Command("../../lnd-itest", args...)
// Redirect stderr output to buffer
var errb bytes.Buffer

View File

@ -65,4 +65,4 @@ else
ITEST_TAGS += btcd
endif
ITEST := rm output*.log; date; $(GOTEST) -tags="$(ITEST_TAGS)" $(TEST_FLAGS) -logoutput
ITEST := rm output*.log; date; $(GOTEST) ./lntest/itest -tags="$(ITEST_TAGS)" $(TEST_FLAGS) -logoutput

View File

@ -69,10 +69,10 @@ const (
var (
zeroHash [32]byte
// maxPaymentMSat is the maximum allowed payment currently permitted as
// MaxPaymentMSat is the maximum allowed payment currently permitted as
// defined in BOLT-002. This value depends on which chain is active.
// It is set to the value under the Bitcoin chain as default.
maxPaymentMSat = maxBtcPaymentMSat
MaxPaymentMSat = maxBtcPaymentMSat
defaultAccount uint32 = waddrmgr.DefaultAccountNum
@ -450,7 +450,7 @@ func newRPCServer(s *server, macService *macaroons.Service,
}
graph := s.chanDB.ChannelGraph()
routerBackend := &routerrpc.RouterBackend{
MaxPaymentMSat: maxPaymentMSat,
MaxPaymentMSat: MaxPaymentMSat,
SelfNode: selfNode.PubKeyBytes,
FetchChannelCapacity: func(chanID uint64) (btcutil.Amount,
error) {
@ -1356,9 +1356,9 @@ func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest,
// Ensure that the user doesn't exceed the current soft-limit for
// channel size. If the funding amount is above the soft-limit, then
// we'll reject the request.
if localFundingAmt > maxFundingAmount {
if localFundingAmt > MaxFundingAmount {
return fmt.Errorf("funding amount is too large, the max "+
"channel size is: %v", maxFundingAmount)
"channel size is: %v", MaxFundingAmount)
}
// Restrict the size of the channel we'll actually open. At a later
@ -1458,7 +1458,7 @@ out:
switch update := fundingUpdate.Update.(type) {
case *lnrpc.OpenStatusUpdate_ChanOpen:
chanPoint := update.ChanOpen.ChannelPoint
txid, err := getChanPointFundingTxid(chanPoint)
txid, err := GetChanPointFundingTxid(chanPoint)
if err != nil {
return err
}
@ -1609,9 +1609,9 @@ func (r *rpcServer) OpenChannelSync(ctx context.Context,
}
}
// getChanPointFundingTxid returns the given channel point's funding txid in
// GetChanPointFundingTxid returns the given channel point's funding txid in
// raw bytes.
func getChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) (*chainhash.Hash, error) {
func GetChanPointFundingTxid(chanPoint *lnrpc.ChannelPoint) (*chainhash.Hash, error) {
var txid []byte
// A channel point's funding txid can be get/set as a byte slice or a
@ -1646,7 +1646,7 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
force := in.Force
index := in.ChannelPoint.OutputIndex
txid, err := getChanPointFundingTxid(in.GetChannelPoint())
txid, err := GetChanPointFundingTxid(in.GetChannelPoint())
if err != nil {
rpcsLog.Errorf("[closechannel] unable to get funding txid: %v", err)
return err
@ -1854,7 +1854,7 @@ func (r *rpcServer) AbandonChannel(ctx context.Context,
// We'll parse out the arguments to we can obtain the chanPoint of the
// target channel.
txid, err := getChanPointFundingTxid(in.GetChannelPoint())
txid, err := GetChanPointFundingTxid(in.GetChannelPoint())
if err != nil {
return nil, err
}
@ -3025,12 +3025,12 @@ func extractPaymentIntent(rpcPayReq *rpcPaymentRequest) (rpcPaymentIntent, error
// Currently, within the bootstrap phase of the network, we limit the
// largest payment size allotted to (2^32) - 1 mSAT or 4.29 million
// satoshis.
if payIntent.msat > maxPaymentMSat {
if payIntent.msat > MaxPaymentMSat {
// In this case, we'll send an error to the caller, but
// continue our loop for the next payment.
return payIntent, fmt.Errorf("payment of %v is too large, "+
"max payment allowed is %v", payIntent.msat,
maxPaymentMSat)
MaxPaymentMSat)
}
@ -3350,7 +3350,7 @@ func (r *rpcServer) AddInvoice(ctx context.Context,
IsChannelActive: r.server.htlcSwitch.HasActiveLink,
ChainParams: activeNetParams.Params,
NodeSigner: r.server.nodeSigner,
MaxPaymentMSat: maxPaymentMSat,
MaxPaymentMSat: MaxPaymentMSat,
DefaultCLTVExpiry: defaultDelta,
ChanDB: r.server.chanDB,
}
@ -4417,7 +4417,7 @@ func (r *rpcServer) UpdateChannelPolicy(ctx context.Context,
// Otherwise, we're targeting an individual channel by its channel
// point.
case *lnrpc.PolicyUpdateRequest_ChanPoint:
txid, err := getChanPointFundingTxid(scope.ChanPoint)
txid, err := GetChanPointFundingTxid(scope.ChanPoint)
if err != nil {
return nil, err
}
@ -4598,7 +4598,7 @@ func (r *rpcServer) ExportChannelBackup(ctx context.Context,
// First, we'll convert the lnrpc channel point into a wire.OutPoint
// that we can manipulate.
txid, err := getChanPointFundingTxid(in.ChanPoint)
txid, err := GetChanPointFundingTxid(in.ChanPoint)
if err != nil {
return nil, err
}

View File

@ -762,8 +762,8 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
s.chainArb = contractcourt.NewChainArbitrator(contractcourt.ChainArbitratorConfig{
ChainHash: *activeNetParams.GenesisHash,
IncomingBroadcastDelta: defaultIncomingBroadcastDelta,
OutgoingBroadcastDelta: defaultOutgoingBroadcastDelta,
IncomingBroadcastDelta: DefaultIncomingBroadcastDelta,
OutgoingBroadcastDelta: DefaultOutgoingBroadcastDelta,
NewSweepAddr: func() ([]byte, error) {
return newSweepPkScript(cc.wallet)
},
@ -941,7 +941,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
minConf := uint64(3)
maxConf := uint64(6)
maxChannelSize := uint64(
lnwire.NewMSatFromSatoshis(maxFundingAmount))
lnwire.NewMSatFromSatoshis(MaxFundingAmount))
stake := lnwire.NewMSatFromSatoshis(chanAmt) + pushAmt
conf := maxConf * uint64(stake) / maxChannelSize
if conf < minConf {
@ -957,7 +957,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
// remote have to claim funds in case of a unilateral
// close) linearly from minRemoteDelay blocks
// for small channels, to maxRemoteDelay blocks
// for channels of size maxFundingAmount.
// for channels of size MaxFundingAmount.
// TODO(halseth): Litecoin parameter for LTC.
// In case the user has explicitly specified
@ -970,7 +970,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
// If not we scale according to channel size.
delay := uint16(btcutil.Amount(maxRemoteDelay) *
chanAmt / maxFundingAmount)
chanAmt / MaxFundingAmount)
if delay < minRemoteDelay {
delay = minRemoteDelay
}

View File

@ -172,7 +172,7 @@ func (s *subRPCServerConfigs) PopulateDependencies(cc *chainControl,
reflect.ValueOf(nodeSigner),
)
subCfgValue.FieldByName("MaxPaymentMSat").Set(
reflect.ValueOf(maxPaymentMSat),
reflect.ValueOf(MaxPaymentMSat),
)
defaultDelta := cfg.Bitcoin.TimeLockDelta
if registeredChains.PrimaryChain() == litecoinChain {