build: update btcd and btcwallet dependencies
This commit is contained in:
parent
a329c80612
commit
a620ce3682
@ -77,7 +77,7 @@ func syncNotifierWithMiner(t *testing.T, notifier *BitcoindNotifier,
|
||||
|
||||
t.Helper()
|
||||
|
||||
_, minerHeight, err := miner.Node.GetBestBlock()
|
||||
_, minerHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to retrieve miner's current height: %v", err)
|
||||
}
|
||||
@ -173,7 +173,7 @@ func TestHistoricalConfDetailsTxIndex(t *testing.T) {
|
||||
"mempool, but did not: %v", txStatus)
|
||||
}
|
||||
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
if _, err := miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
@ -247,14 +247,14 @@ func TestHistoricalConfDetailsNoTxIndex(t *testing.T) {
|
||||
// ensured above.
|
||||
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
|
||||
spendTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
|
||||
spendTxHash, err := miner.Node.SendRawTransaction(spendTx, true)
|
||||
spendTxHash, err := miner.Client.SendRawTransaction(spendTx, true)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to broadcast tx: %v", err)
|
||||
}
|
||||
if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil {
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
if _, err := miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ func TestHistoricalConfDetailsTxIndex(t *testing.T) {
|
||||
|
||||
// We'll now confirm this transaction and re-attempt to retrieve its
|
||||
// confirmation details.
|
||||
if _, err := harness.Node.Generate(1); err != nil {
|
||||
if _, err := harness.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
@ -188,7 +188,7 @@ func TestHistoricalConfDetailsNoTxIndex(t *testing.T) {
|
||||
// Now, we'll create a test transaction and attempt to retrieve its
|
||||
// confirmation details. We'll note its broadcast height to use as the
|
||||
// height hint when manually scanning the chain.
|
||||
_, currentHeight, err := harness.Node.GetBestBlock()
|
||||
_, currentHeight, err := harness.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to retrieve current height: %v", err)
|
||||
}
|
||||
@ -219,7 +219,7 @@ func TestHistoricalConfDetailsNoTxIndex(t *testing.T) {
|
||||
|
||||
// We'll now confirm this transaction and re-attempt to retrieve its
|
||||
// confirmation details.
|
||||
if _, err := harness.Node.Generate(1); err != nil {
|
||||
if _, err := harness.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ func testSingleConfirmationNotification(miner *rpctest.Harness,
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
|
||||
_, currentHeight, err := miner.Node.GetBestBlock()
|
||||
_, currentHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -67,7 +67,7 @@ func testSingleConfirmationNotification(miner *rpctest.Harness,
|
||||
|
||||
// Now generate a single block, the transaction should be included which
|
||||
// should trigger a notification event.
|
||||
blockHash, err := miner.Node.Generate(1)
|
||||
blockHash, err := miner.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
@ -81,7 +81,7 @@ func testSingleConfirmationNotification(miner *rpctest.Harness,
|
||||
|
||||
// Finally, we'll verify that the tx index returned is the exact same
|
||||
// as the tx index of the transaction within the block itself.
|
||||
msgBlock, err := miner.Node.GetBlock(blockHash[0])
|
||||
msgBlock, err := miner.Client.GetBlock(blockHash[0])
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch block: %v", err)
|
||||
}
|
||||
@ -117,7 +117,7 @@ func testMultiConfirmationNotification(miner *rpctest.Harness,
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
|
||||
_, currentHeight, err := miner.Node.GetBestBlock()
|
||||
_, currentHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -139,7 +139,7 @@ func testMultiConfirmationNotification(miner *rpctest.Harness,
|
||||
|
||||
// Now generate a six blocks. The transaction should be included in the
|
||||
// first block, which will be built upon by the other 5 blocks.
|
||||
if _, err := miner.Node.Generate(6); err != nil {
|
||||
if _, err := miner.Client.Generate(6); err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
|
||||
@ -163,7 +163,7 @@ func testBatchConfirmationNotification(miner *rpctest.Harness,
|
||||
confSpread := [6]uint32{1, 2, 3, 6, 20, 22}
|
||||
confIntents := make([]*chainntnfs.ConfirmationEvent, len(confSpread))
|
||||
|
||||
_, currentHeight, err := miner.Node.GetBestBlock()
|
||||
_, currentHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -215,7 +215,7 @@ func testBatchConfirmationNotification(miner *rpctest.Harness,
|
||||
|
||||
// Generate the number of blocks necessary to trigger this
|
||||
// current confirmation notification.
|
||||
if _, err := miner.Node.Generate(blocksToGen); err != nil {
|
||||
if _, err := miner.Client.Generate(blocksToGen); err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
|
||||
@ -275,7 +275,7 @@ func testSpendNotification(miner *rpctest.Harness,
|
||||
// To do so, we first create a new output to our test target address.
|
||||
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
|
||||
|
||||
_, currentHeight, err := miner.Node.GetBestBlock()
|
||||
_, currentHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -308,7 +308,7 @@ func testSpendNotification(miner *rpctest.Harness,
|
||||
spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
|
||||
|
||||
// Broadcast our spending transaction.
|
||||
spenderSha, err := miner.Node.SendRawTransaction(spendingTx, true)
|
||||
spenderSha, err := miner.Client.SendRawTransaction(spendingTx, true)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to broadcast tx: %v", err)
|
||||
}
|
||||
@ -365,11 +365,11 @@ func testSpendNotification(miner *rpctest.Harness,
|
||||
|
||||
// Now we mine a single block, which should include our spend. The
|
||||
// notification should also be sent off.
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
if _, err := miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
|
||||
_, currentHeight, err = miner.Node.GetBestBlock()
|
||||
_, currentHeight, err = miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -425,7 +425,7 @@ func testBlockEpochNotification(miner *rpctest.Harness,
|
||||
|
||||
// Now generate 10 blocks, the clients above should each receive 10
|
||||
// notifications, thereby unblocking the goroutine above.
|
||||
if _, err := miner.Node.Generate(numBlocks); err != nil {
|
||||
if _, err := miner.Client.Generate(numBlocks); err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
|
||||
@ -455,7 +455,7 @@ func testMultiClientConfirmationNotification(miner *rpctest.Harness,
|
||||
numConfs = 1
|
||||
)
|
||||
|
||||
_, currentHeight, err := miner.Node.GetBestBlock()
|
||||
_, currentHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -492,7 +492,7 @@ func testMultiClientConfirmationNotification(miner *rpctest.Harness,
|
||||
|
||||
// Finally, generate a single block which should trigger the unblocking
|
||||
// of all numConfsClients blocked on the channel read above.
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
if _, err := miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
@ -525,7 +525,7 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness,
|
||||
// older blocks when the confirmation event is registered below to ensure
|
||||
// that the TXID hasn't already been included in the chain, otherwise the
|
||||
// notification will never be sent.
|
||||
_, err = miner.Node.Generate(1)
|
||||
_, err = miner.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
@ -546,13 +546,13 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness,
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
|
||||
_, currentHeight, err := miner.Node.GetBestBlock()
|
||||
_, currentHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
|
||||
// Now generate another block containing txs 1 & 2.
|
||||
blockHash, err := miner.Node.Generate(1)
|
||||
blockHash, err := miner.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
@ -579,7 +579,7 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness,
|
||||
case confInfo := <-ntfn1.Confirmed:
|
||||
// Finally, we'll verify that the tx index returned is the exact same
|
||||
// as the tx index of the transaction within the block itself.
|
||||
msgBlock, err := miner.Node.GetBlock(blockHash[0])
|
||||
msgBlock, err := miner.Client.GetBlock(blockHash[0])
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch block: %v", err)
|
||||
}
|
||||
@ -622,7 +622,7 @@ func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness,
|
||||
}
|
||||
|
||||
// Fully confirm tx3.
|
||||
_, err = miner.Node.Generate(2)
|
||||
_, err = miner.Client.Generate(2)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
@ -710,7 +710,7 @@ func testLazyNtfnConsumer(miner *rpctest.Harness,
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
|
||||
_, currentHeight, err := miner.Node.GetBestBlock()
|
||||
_, currentHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -719,7 +719,7 @@ func testLazyNtfnConsumer(miner *rpctest.Harness,
|
||||
|
||||
// Add a block right before registering, this makes race conditions
|
||||
// between the historical dispatcher and the normal dispatcher more obvious
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
if _, err := miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
|
||||
@ -738,7 +738,7 @@ func testLazyNtfnConsumer(miner *rpctest.Harness,
|
||||
}
|
||||
|
||||
// Generate another 2 blocks, this should dispatch the confirm notification
|
||||
if _, err := miner.Node.Generate(2); err != nil {
|
||||
if _, err := miner.Client.Generate(2); err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
|
||||
@ -753,7 +753,7 @@ func testLazyNtfnConsumer(miner *rpctest.Harness,
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
|
||||
_, currentHeight, err = miner.Node.GetBestBlock()
|
||||
_, currentHeight, err = miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -773,7 +773,7 @@ func testLazyNtfnConsumer(miner *rpctest.Harness,
|
||||
t.Fatalf("unable to register ntfn: %v", err)
|
||||
}
|
||||
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
if _, err := miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
|
||||
@ -806,14 +806,14 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness,
|
||||
// To do so, we first create a new output to our test target address.
|
||||
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
|
||||
|
||||
_, heightHint, err := miner.Node.GetBestBlock()
|
||||
_, heightHint, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
|
||||
// We'll then spend this output and broadcast the spend transaction.
|
||||
spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
|
||||
spenderSha, err := miner.Node.SendRawTransaction(spendingTx, true)
|
||||
spenderSha, err := miner.Client.SendRawTransaction(spendingTx, true)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to broadcast tx: %v", err)
|
||||
}
|
||||
@ -829,10 +829,10 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness,
|
||||
}
|
||||
|
||||
// Now we mine an additional block, which should include our spend.
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
if _, err := miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
_, spendHeight, err := miner.Node.GetBestBlock()
|
||||
_, spendHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -895,7 +895,7 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness,
|
||||
|
||||
// Bury the spend even deeper, and do the same check.
|
||||
const numBlocks = 10
|
||||
if _, err := miner.Node.Generate(numBlocks); err != nil {
|
||||
if _, err := miner.Client.Generate(numBlocks); err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
|
||||
@ -925,7 +925,7 @@ func testCancelSpendNtfn(node *rpctest.Harness,
|
||||
// ourselves.
|
||||
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, node)
|
||||
|
||||
_, currentHeight, err := node.Node.GetBestBlock()
|
||||
_, currentHeight, err := node.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -961,7 +961,7 @@ func testCancelSpendNtfn(node *rpctest.Harness,
|
||||
spendClients[1].Cancel()
|
||||
|
||||
// Broadcast our spending transaction.
|
||||
spenderSha, err := node.Node.SendRawTransaction(spendingTx, true)
|
||||
spenderSha, err := node.Client.SendRawTransaction(spendingTx, true)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to broadcast tx: %v", err)
|
||||
}
|
||||
@ -972,7 +972,7 @@ func testCancelSpendNtfn(node *rpctest.Harness,
|
||||
|
||||
// Now we mine a single block, which should include our spend. The
|
||||
// notification should also be sent off.
|
||||
if _, err := node.Node.Generate(1); err != nil {
|
||||
if _, err := node.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
|
||||
@ -1036,7 +1036,7 @@ func testCancelEpochNtfn(node *rpctest.Harness,
|
||||
|
||||
// Now mine a single block, this should trigger the logic to dispatch
|
||||
// epoch notifications.
|
||||
if _, err := node.Node.Generate(1); err != nil {
|
||||
if _, err := node.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
|
||||
@ -1089,12 +1089,12 @@ func testReorgConf(miner *rpctest.Harness,
|
||||
}
|
||||
|
||||
// The two should be on the same blockheight.
|
||||
_, nodeHeight1, err := miner.Node.GetBestBlock()
|
||||
_, nodeHeight1, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
|
||||
_, nodeHeight2, err := miner2.Node.GetBestBlock()
|
||||
_, nodeHeight2, err := miner2.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
@ -1106,7 +1106,7 @@ func testReorgConf(miner *rpctest.Harness,
|
||||
|
||||
// We disconnect the two nodes, such that we can start mining on them
|
||||
// individually without the other one learning about the new blocks.
|
||||
err = miner.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
|
||||
err = miner.Client.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to remove node: %v", err)
|
||||
}
|
||||
@ -1119,7 +1119,7 @@ func testReorgConf(miner *rpctest.Harness,
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
|
||||
_, currentHeight, err := miner.Node.GetBestBlock()
|
||||
_, currentHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -1142,7 +1142,7 @@ func testReorgConf(miner *rpctest.Harness,
|
||||
}
|
||||
|
||||
// Now generate a single block, the transaction should be included.
|
||||
_, err = miner.Node.Generate(1)
|
||||
_, err = miner.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
@ -1157,7 +1157,7 @@ func testReorgConf(miner *rpctest.Harness,
|
||||
|
||||
// Reorganize transaction out of the chain by generating a longer fork
|
||||
// from the other miner. The transaction is not included in this fork.
|
||||
miner2.Node.Generate(2)
|
||||
miner2.Client.Generate(2)
|
||||
|
||||
// Reconnect nodes to reach consensus on the longest chain. miner2's chain
|
||||
// should win and become active on miner1.
|
||||
@ -1169,12 +1169,12 @@ func testReorgConf(miner *rpctest.Harness,
|
||||
t.Fatalf("unable to join node on blocks: %v", err)
|
||||
}
|
||||
|
||||
_, nodeHeight1, err = miner.Node.GetBestBlock()
|
||||
_, nodeHeight1, err = miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
|
||||
_, nodeHeight2, err = miner2.Node.GetBestBlock()
|
||||
_, nodeHeight2, err = miner2.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
@ -1195,12 +1195,12 @@ func testReorgConf(miner *rpctest.Harness,
|
||||
|
||||
// Now confirm the transaction on the longest chain and verify that we
|
||||
// receive the notification.
|
||||
tx, err := miner.Node.GetRawTransaction(txid)
|
||||
tx, err := miner.Client.GetRawTransaction(txid)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get raw tx: %v", err)
|
||||
}
|
||||
|
||||
txid, err = miner2.Node.SendRawTransaction(tx.MsgTx(), false)
|
||||
txid, err = miner2.Client.SendRawTransaction(tx.MsgTx(), false)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get send tx: %v", err)
|
||||
}
|
||||
@ -1208,7 +1208,7 @@ func testReorgConf(miner *rpctest.Harness,
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
|
||||
_, err = miner.Node.Generate(3)
|
||||
_, err = miner.Client.Generate(3)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
@ -1229,7 +1229,7 @@ func testReorgSpend(miner *rpctest.Harness,
|
||||
// We'll start by creating an output and registering a spend
|
||||
// notification for it.
|
||||
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
|
||||
_, heightHint, err := miner.Node.GetBestBlock()
|
||||
_, heightHint, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to retrieve current height: %v", err)
|
||||
}
|
||||
@ -1270,11 +1270,11 @@ func testReorgSpend(miner *rpctest.Harness,
|
||||
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
|
||||
t.Fatalf("unable to sync miners: %v", err)
|
||||
}
|
||||
_, minerHeight1, err := miner.Node.GetBestBlock()
|
||||
_, minerHeight1, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get miner1's current height: %v", err)
|
||||
}
|
||||
_, minerHeight2, err := miner2.Node.GetBestBlock()
|
||||
_, minerHeight2, err := miner2.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get miner2's current height: %v", err)
|
||||
}
|
||||
@ -1285,7 +1285,7 @@ func testReorgSpend(miner *rpctest.Harness,
|
||||
|
||||
// We disconnect the two nodes, such that we can start mining on them
|
||||
// individually without the other one learning about the new blocks.
|
||||
err = miner.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
|
||||
err = miner.Client.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to disconnect miners: %v", err)
|
||||
}
|
||||
@ -1293,7 +1293,7 @@ func testReorgSpend(miner *rpctest.Harness,
|
||||
// Craft the spending transaction for the outpoint created above and
|
||||
// confirm it under the chain of the original miner.
|
||||
spendTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
|
||||
spendTxHash, err := miner.Node.SendRawTransaction(spendTx, true)
|
||||
spendTxHash, err := miner.Client.SendRawTransaction(spendTx, true)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to broadcast spend tx: %v", err)
|
||||
}
|
||||
@ -1301,10 +1301,10 @@ func testReorgSpend(miner *rpctest.Harness,
|
||||
t.Fatalf("spend tx not relayed to miner: %v", err)
|
||||
}
|
||||
const numBlocks = 1
|
||||
if _, err := miner.Node.Generate(numBlocks); err != nil {
|
||||
if _, err := miner.Client.Generate(numBlocks); err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
_, spendHeight, err := miner.Node.GetBestBlock()
|
||||
_, spendHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get spend height: %v", err)
|
||||
}
|
||||
@ -1322,7 +1322,7 @@ func testReorgSpend(miner *rpctest.Harness,
|
||||
|
||||
// Now, with the other miner, we'll generate one more block than the
|
||||
// other miner and connect them to cause a reorg.
|
||||
if _, err := miner2.Node.Generate(numBlocks + 1); err != nil {
|
||||
if _, err := miner2.Client.Generate(numBlocks + 1); err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
if err := rpctest.ConnectNode(miner, miner2); err != nil {
|
||||
@ -1332,11 +1332,11 @@ func testReorgSpend(miner *rpctest.Harness,
|
||||
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
|
||||
t.Fatalf("unable to sync miners: %v", err)
|
||||
}
|
||||
_, minerHeight1, err = miner.Node.GetBestBlock()
|
||||
_, minerHeight1, err = miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get miner1's current height: %v", err)
|
||||
}
|
||||
_, minerHeight2, err = miner2.Node.GetBestBlock()
|
||||
_, minerHeight2, err = miner2.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get miner2's current height: %v", err)
|
||||
}
|
||||
@ -1358,16 +1358,16 @@ func testReorgSpend(miner *rpctest.Harness,
|
||||
// Now that both miners are on the same chain, we'll confirm the
|
||||
// spending transaction of the outpoint and receive a notification for
|
||||
// it.
|
||||
if _, err = miner2.Node.SendRawTransaction(spendTx, true); err != nil {
|
||||
if _, err = miner2.Client.SendRawTransaction(spendTx, true); err != nil {
|
||||
t.Fatalf("unable to broadcast spend tx: %v", err)
|
||||
}
|
||||
if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil {
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
if _, err := miner.Node.Generate(numBlocks); err != nil {
|
||||
if _, err := miner.Client.Generate(numBlocks); err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
_, spendHeight, err = miner.Node.GetBestBlock()
|
||||
_, spendHeight, err = miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to retrieve current height: %v", err)
|
||||
}
|
||||
@ -1392,7 +1392,7 @@ func testCatchUpClientOnMissedBlocks(miner *rpctest.Harness,
|
||||
const numClients = 5
|
||||
var wg sync.WaitGroup
|
||||
|
||||
outdatedHash, outdatedHeight, err := miner.Node.GetBestBlock()
|
||||
outdatedHash, outdatedHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to retrieve current height: %v", err)
|
||||
}
|
||||
@ -1400,7 +1400,7 @@ func testCatchUpClientOnMissedBlocks(miner *rpctest.Harness,
|
||||
// This function is used by UnsafeStart to ensure all notifications
|
||||
// are fully drained before clients register for notifications.
|
||||
generateBlocks := func() error {
|
||||
_, err = miner.Node.Generate(numBlocks)
|
||||
_, err = miner.Client.Generate(numBlocks)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1484,7 +1484,7 @@ func testCatchUpOnMissedBlocks(miner *rpctest.Harness,
|
||||
const numClients = 5
|
||||
var wg sync.WaitGroup
|
||||
|
||||
_, bestHeight, err := miner.Node.GetBestBlock()
|
||||
_, bestHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
@ -1492,7 +1492,7 @@ func testCatchUpOnMissedBlocks(miner *rpctest.Harness,
|
||||
// This function is used by UnsafeStart to ensure all notifications
|
||||
// are fully drained before clients register for notifications.
|
||||
generateBlocks := func() error {
|
||||
_, err = miner.Node.Generate(numBlocks)
|
||||
_, err = miner.Client.Generate(numBlocks)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1527,7 +1527,7 @@ func testCatchUpOnMissedBlocks(miner *rpctest.Harness,
|
||||
|
||||
// Generate a single block to trigger the backlog of historical
|
||||
// notifications for the previously mined blocks.
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
if _, err := miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
|
||||
@ -1618,12 +1618,12 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness,
|
||||
}
|
||||
|
||||
// The two should be on the same blockheight.
|
||||
_, nodeHeight1, err := miner1.Node.GetBestBlock()
|
||||
_, nodeHeight1, err := miner1.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
|
||||
_, nodeHeight2, err := miner2.Node.GetBestBlock()
|
||||
_, nodeHeight2, err := miner2.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
@ -1635,20 +1635,20 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness,
|
||||
|
||||
// We disconnect the two nodes, such that we can start mining on them
|
||||
// individually without the other one learning about the new blocks.
|
||||
err = miner1.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
|
||||
err = miner1.Client.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to remove node: %v", err)
|
||||
}
|
||||
|
||||
// Now mine on each chain separately
|
||||
blocks, err := miner1.Node.Generate(numBlocks)
|
||||
blocks, err := miner1.Client.Generate(numBlocks)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
|
||||
// We generate an extra block on miner 2's chain to ensure it is the
|
||||
// longer chain.
|
||||
_, err = miner2.Node.Generate(numBlocks + 1)
|
||||
_, err = miner2.Client.Generate(numBlocks + 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
@ -1665,12 +1665,12 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness,
|
||||
// The two should be on the same block hash.
|
||||
timeout := time.After(10 * time.Second)
|
||||
for {
|
||||
nodeHash1, _, err := miner1.Node.GetBestBlock()
|
||||
nodeHash1, _, err := miner1.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current block hash: %v", err)
|
||||
}
|
||||
|
||||
nodeHash2, _, err := miner2.Node.GetBestBlock()
|
||||
nodeHash2, _, err := miner2.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current block hash: %v", err)
|
||||
}
|
||||
@ -1721,7 +1721,7 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness,
|
||||
|
||||
// Generate a single block, which should trigger the notifier to rewind
|
||||
// to the common ancestor and dispatch notifications from there.
|
||||
_, err = miner2.Node.Generate(1)
|
||||
_, err = miner2.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ func WaitForMempoolTx(miner *rpctest.Harness, txid *chainhash.Hash) error {
|
||||
trickle := time.After(2 * TrickleInterval)
|
||||
for {
|
||||
// Check for the harness' knowledge of the txid.
|
||||
tx, err := miner.Node.GetRawTransaction(txid)
|
||||
tx, err := miner.Client.GetRawTransaction(txid)
|
||||
if err != nil {
|
||||
jsonErr, ok := err.(*btcjson.RPCError)
|
||||
if ok && jsonErr.Code == btcjson.ErrRPCNoTxInfo {
|
||||
@ -138,7 +138,7 @@ func CreateSpendableOutput(t *testing.T,
|
||||
if err := WaitForMempoolTx(miner, txid); err != nil {
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
if _, err := miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
|
||||
|
12
go.mod
12
go.mod
@ -5,13 +5,14 @@ require (
|
||||
github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e // indirect
|
||||
github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82
|
||||
github.com/Yawning/aez v0.0.0-20180114000226-4dad034d9db2
|
||||
github.com/btcsuite/btcd v0.21.0-beta.0.20201208033208-6bd4c64a54fa
|
||||
github.com/btcsuite/btcd v0.21.0-beta.0.20210401013323-36a96f6a0025
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f
|
||||
github.com/btcsuite/btcutil v1.0.2
|
||||
github.com/btcsuite/btcutil/psbt v1.0.3-0.20200826194809-5f93e33af2b0
|
||||
github.com/btcsuite/btcwallet v0.11.1-0.20210312232944-4ec908df9386
|
||||
github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/btcsuite/btcutil/psbt v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/btcsuite/btcwallet v0.11.1-0.20210329233242-e0607006dce6
|
||||
github.com/btcsuite/btcwallet/wallet/txauthor v1.0.1-0.20210329233242-e0607006dce6
|
||||
github.com/btcsuite/btcwallet/wallet/txrules v1.0.0
|
||||
github.com/btcsuite/btcwallet/wallet/txsizes v1.0.1-0.20210329233242-e0607006dce6 // indirect
|
||||
github.com/btcsuite/btcwallet/walletdb v1.3.4
|
||||
github.com/btcsuite/btcwallet/wtxmgr v1.2.1-0.20210312232944-4ec908df9386
|
||||
github.com/coreos/etcd v3.3.22+incompatible
|
||||
@ -21,6 +22,7 @@ require (
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.9 // indirect
|
||||
github.com/go-errors/errors v1.0.1
|
||||
github.com/go-openapi/strfmt v0.19.5 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||
|
24
go.sum
24
go.sum
@ -26,23 +26,27 @@ github.com/btcsuite/btcd v0.0.0-20190629003639-c26ffa870fd8/go.mod h1:3J08xEfcug
|
||||
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd v0.20.1-beta.0.20200513120220-b470eee47728/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btcd v0.21.0-beta.0.20201208033208-6bd4c64a54fa h1:sobXG8TE1VEBX4QWOzSKyulSwuOFdb8vzyhGyblXrmQ=
|
||||
github.com/btcsuite/btcd v0.21.0-beta.0.20201208033208-6bd4c64a54fa/go.mod h1:Sv4JPQ3/M+teHz9Bo5jBpkNcP0x6r7rdihlNL/7tTAs=
|
||||
github.com/btcsuite/btcd v0.21.0-beta.0.20210401013323-36a96f6a0025 h1:aoVqvZk4mLyF3WZbqEVPq+vXnwL2wekZg4P4mjYJNLs=
|
||||
github.com/btcsuite/btcd v0.21.0-beta.0.20210401013323-36a96f6a0025/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts=
|
||||
github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts=
|
||||
github.com/btcsuite/btcutil/psbt v1.0.3-0.20200826194809-5f93e33af2b0 h1:3Zumkyl6PWyHuVJ04me0xeD9CnPOhNgeGpapFbzy7O4=
|
||||
github.com/btcsuite/btcutil/psbt v1.0.3-0.20200826194809-5f93e33af2b0/go.mod h1:LVveMu4VaNSkIRTZu2+ut0HDBRuYjqGocxDMNS1KuGQ=
|
||||
github.com/btcsuite/btcwallet v0.11.1-0.20210312232944-4ec908df9386 h1:DfZIXWPAm35bW83OtS/AXH9A9pE6dxxIUhf260S9Wmo=
|
||||
github.com/btcsuite/btcwallet v0.11.1-0.20210312232944-4ec908df9386/go.mod h1:P1U4LKSB/bhFQdOM7ab1XqNoBGFyFAe7eKObEBD9mIo=
|
||||
github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0 h1:KGHMW5sd7yDdDMkCZ/JpP0KltolFsQcB973brBnfj4c=
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ=
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
|
||||
github.com/btcsuite/btcutil/psbt v1.0.3-0.20201208143702-a53e38424cce h1:3PRwz+js0AMMV1fHRrCdQ55akoomx4Q3ulozHC3BDDY=
|
||||
github.com/btcsuite/btcutil/psbt v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:LVveMu4VaNSkIRTZu2+ut0HDBRuYjqGocxDMNS1KuGQ=
|
||||
github.com/btcsuite/btcwallet v0.11.1-0.20210329233242-e0607006dce6 h1:5Y6ui667YQrFCxPYV4Pmf9jpEsIkcJxMKsXJzNsMU9o=
|
||||
github.com/btcsuite/btcwallet v0.11.1-0.20210329233242-e0607006dce6/go.mod h1:JBUz2SCnYLn2Dw9bcnqZYvKchnKVvWSLv8OUzihHTcc=
|
||||
github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0/go.mod h1:VufDts7bd/zs3GV13f/lXc/0lXrPnvxD/NvmpG/FEKU=
|
||||
github.com/btcsuite/btcwallet/wallet/txauthor v1.0.1-0.20210329233242-e0607006dce6 h1:mO7NxcfgLe75paLDHx+LWNG5BskiDQigHnSVT2KvNZA=
|
||||
github.com/btcsuite/btcwallet/wallet/txauthor v1.0.1-0.20210329233242-e0607006dce6/go.mod h1:VufDts7bd/zs3GV13f/lXc/0lXrPnvxD/NvmpG/FEKU=
|
||||
github.com/btcsuite/btcwallet/wallet/txrules v1.0.0 h1:2VsfS0sBedcM5KmDzRMT3+b6xobqWveZGvjb+jFez5w=
|
||||
github.com/btcsuite/btcwallet/wallet/txrules v1.0.0/go.mod h1:UwQE78yCerZ313EXZwEiu3jNAtfXj2n2+c8RWiE/WNA=
|
||||
github.com/btcsuite/btcwallet/wallet/txsizes v1.0.0 h1:6DxkcoMnCPY4E9cUDPB5tbuuf40SmmMkSQkoE8vCT+s=
|
||||
github.com/btcsuite/btcwallet/wallet/txsizes v1.0.0/go.mod h1:pauEU8UuMFiThe5PB3EO+gO5kx87Me5NvdQDsTuq6cs=
|
||||
github.com/btcsuite/btcwallet/wallet/txsizes v1.0.1-0.20210329233242-e0607006dce6 h1:n9SLPLz2PRg2X+lnWxioxTmtAa2ZqjR8EwL/tZD7BAY=
|
||||
github.com/btcsuite/btcwallet/wallet/txsizes v1.0.1-0.20210329233242-e0607006dce6/go.mod h1:pauEU8UuMFiThe5PB3EO+gO5kx87Me5NvdQDsTuq6cs=
|
||||
github.com/btcsuite/btcwallet/walletdb v1.0.0/go.mod h1:bZTy9RyYZh9fLnSua+/CD48TJtYJSHjjYcSaszuxCCk=
|
||||
github.com/btcsuite/btcwallet/walletdb v1.3.2/go.mod h1:GZCMPNpUu5KE3ASoVd+k06p/1OW8OwNGCCaNWRto2cQ=
|
||||
github.com/btcsuite/btcwallet/walletdb v1.3.4 h1:ExdPQSfYRLoYMEENsjWyl4w0PePLm9w3wg69nsRS2xc=
|
||||
@ -88,8 +92,9 @@ github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/frankban/quicktest v1.2.2 h1:xfmOhhoH5fGPgbEAlhLpJH9p0z/0Qizio9osmvn9IUY=
|
||||
github.com/frankban/quicktest v1.2.2/go.mod h1:Qh/WofXFeiAFII1aEBu529AtJo6Zg2VHscnEsbBnJ20=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
@ -310,6 +315,7 @@ golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -222,9 +222,9 @@ func (b *BtcWalletKeyRing) DeriveKey(keyLoc KeyLocator) (KeyDescriptor, error) {
|
||||
}
|
||||
|
||||
path := waddrmgr.DerivationPath{
|
||||
Account: uint32(keyLoc.Family),
|
||||
Branch: 0,
|
||||
Index: uint32(keyLoc.Index),
|
||||
InternalAccount: uint32(keyLoc.Family),
|
||||
Branch: 0,
|
||||
Index: keyLoc.Index,
|
||||
}
|
||||
addr, err := scope.DeriveFromKeyPath(addrmgrNs, path)
|
||||
if err != nil {
|
||||
@ -278,9 +278,9 @@ func (b *BtcWalletKeyRing) DerivePrivKey(keyDesc KeyDescriptor) (
|
||||
// Now that we know the account exists, we can safely
|
||||
// derive the full private key from the given path.
|
||||
path := waddrmgr.DerivationPath{
|
||||
Account: uint32(keyDesc.Family),
|
||||
Branch: 0,
|
||||
Index: uint32(keyDesc.Index),
|
||||
InternalAccount: uint32(keyDesc.Family),
|
||||
Branch: 0,
|
||||
Index: keyDesc.Index,
|
||||
}
|
||||
addr, err := scope.DeriveFromKeyPath(addrmgrNs, path)
|
||||
if err != nil {
|
||||
@ -299,9 +299,9 @@ func (b *BtcWalletKeyRing) DerivePrivKey(keyDesc KeyDescriptor) (
|
||||
// need to scan for the private key, assuming that we know the
|
||||
// valid key family.
|
||||
nextPath := waddrmgr.DerivationPath{
|
||||
Account: uint32(keyDesc.Family),
|
||||
Branch: 0,
|
||||
Index: 0,
|
||||
InternalAccount: uint32(keyDesc.Family),
|
||||
Branch: 0,
|
||||
Index: 0,
|
||||
}
|
||||
|
||||
// We'll now iterate through our key range in an attempt to
|
||||
|
@ -56,12 +56,12 @@ func (b BtcdBackendConfig) GenArgs() []string {
|
||||
|
||||
// ConnectMiner is called to establish a connection to the test miner.
|
||||
func (b BtcdBackendConfig) ConnectMiner() error {
|
||||
return b.harness.Node.Node(btcjson.NConnect, b.minerAddr, &temp)
|
||||
return b.harness.Client.Node(btcjson.NConnect, b.minerAddr, &temp)
|
||||
}
|
||||
|
||||
// DisconnectMiner is called to disconnect the miner.
|
||||
func (b BtcdBackendConfig) DisconnectMiner() error {
|
||||
return b.harness.Node.Node(btcjson.NDisconnect, b.minerAddr, &temp)
|
||||
return b.harness.Client.Node(btcjson.NDisconnect, b.minerAddr, &temp)
|
||||
}
|
||||
|
||||
// Name returns the name of the backend type.
|
||||
|
@ -211,7 +211,7 @@ func (n *NetworkHarness) SetUp(testCase string, lndArgs []string) error {
|
||||
|
||||
// We generate several blocks in order to give the outputs created
|
||||
// above a good number of confirmations.
|
||||
if _, err := n.Miner.Node.Generate(10); err != nil {
|
||||
if _, err := n.Miner.Client.Generate(10); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -803,7 +803,7 @@ func (n *NetworkHarness) WaitForTxInMempool(ctx context.Context,
|
||||
|
||||
case <-ticker.C:
|
||||
var err error
|
||||
mempool, err = n.Miner.Node.GetRawMempool()
|
||||
mempool, err = n.Miner.Client.GetRawMempool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1383,7 +1383,7 @@ func (n *NetworkHarness) sendCoins(ctx context.Context, amt btcutil.Amount,
|
||||
// Otherwise, we'll generate 6 new blocks to ensure the output gains a
|
||||
// sufficient number of confirmations and wait for the balance to
|
||||
// reflect what's expected.
|
||||
if _, err := n.Miner.Node.Generate(6); err != nil {
|
||||
if _, err := n.Miner.Client.Generate(6); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -176,7 +176,7 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
||||
numBlocks := padCLTV(
|
||||
uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
|
||||
)
|
||||
_, err = net.Miner.Node.Generate(numBlocks)
|
||||
_, err = net.Miner.Client.Generate(numBlocks)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Bob's force close transaction should now be found in the mempool. If
|
||||
@ -189,11 +189,11 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
||||
bobFundingTxid, err := lnrpc.GetChanPointFundingTxid(bobChanPoint)
|
||||
require.NoError(t.t, err)
|
||||
_, err = waitForNTxsInMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
closeTx := getSpendingTxInMempool(
|
||||
t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{
|
||||
t, net.Miner.Client, minerMempoolTimeout, wire.OutPoint{
|
||||
Hash: *bobFundingTxid,
|
||||
Index: bobChanPoint.OutputIndex,
|
||||
},
|
||||
@ -258,7 +258,7 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
||||
}
|
||||
|
||||
txes, err := getNTxsFromMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
@ -334,13 +334,13 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// If we then mine additional blocks, Bob can sweep his commitment
|
||||
// output.
|
||||
_, err = net.Miner.Node.Generate(defaultCSV - 2)
|
||||
_, err = net.Miner.Client.Generate(defaultCSV - 2)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Find the commitment sweep.
|
||||
bobCommitSweepHash, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
bobCommitSweepHash, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
require.NoError(t.t, err)
|
||||
bobCommitSweep, err := net.Miner.Node.GetRawTransaction(bobCommitSweepHash)
|
||||
bobCommitSweep, err := net.Miner.Client.GetRawTransaction(bobCommitSweepHash)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
require.Equal(
|
||||
@ -375,11 +375,11 @@ func testMultiHopHtlcAggregation(net *lntest.NetworkHarness, t *harnessTest,
|
||||
_ = mineBlocks(t, net, 2, 1)
|
||||
}
|
||||
|
||||
bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
bobSweep, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Make sure it spends from the second level tx.
|
||||
secondLevelSweep, err := net.Miner.Node.GetRawTransaction(bobSweep)
|
||||
secondLevelSweep, err := net.Miner.Client.GetRawTransaction(bobSweep)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// It should be sweeping all the second-level outputs.
|
||||
|
@ -97,7 +97,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
expectedTxes = 2
|
||||
}
|
||||
_, err = waitForNTxsInMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
@ -122,13 +122,13 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry -
|
||||
lncfg.DefaultIncomingBroadcastDelta))
|
||||
|
||||
_, err = net.Miner.Node.Generate(numBlocks)
|
||||
_, err = net.Miner.Client.Generate(numBlocks)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Carol's commitment transaction should now be in the mempool. If there
|
||||
// is an anchor, Carol will sweep that too.
|
||||
_, err = waitForNTxsInMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
bobFundingTxid, err := lnrpc.GetChanPointFundingTxid(bobChanPoint)
|
||||
@ -141,7 +141,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// Look up the closing transaction. It should be spending from the
|
||||
// funding transaction,
|
||||
closingTx := getSpendingTxInMempool(
|
||||
t, net.Miner.Node, minerMempoolTimeout, carolFundingPoint,
|
||||
t, net.Miner.Client, minerMempoolTimeout, carolFundingPoint,
|
||||
)
|
||||
closingTxid := closingTx.TxHash()
|
||||
|
||||
@ -166,7 +166,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
expectedTxes = 3
|
||||
}
|
||||
txes, err := getNTxsFromMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
@ -206,12 +206,12 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// will extract the preimage and broadcast a second level tx to claim
|
||||
// the HTLC in his (already closed) channel with Alice.
|
||||
bobSecondLvlTx, err := waitForTxInMempool(
|
||||
net.Miner.Node, minerMempoolTimeout,
|
||||
net.Miner.Client, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// It should spend from the commitment in the channel with Alice.
|
||||
tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx)
|
||||
tx, err := net.Miner.Client.GetRawTransaction(bobSecondLvlTx)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
require.Equal(
|
||||
@ -261,11 +261,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// If we then mine 3 additional blocks, Carol's second level tx should
|
||||
// mature, and she can pull the funds from it with a sweep tx.
|
||||
_, err = net.Miner.Node.Generate(carolSecondLevelCSV)
|
||||
_, err = net.Miner.Client.Generate(carolSecondLevelCSV)
|
||||
require.NoError(t.t, err)
|
||||
bobSecondLevelCSV -= carolSecondLevelCSV
|
||||
|
||||
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
carolSweep, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Mining one additional block, Bob's second level tx is mature, and he
|
||||
@ -273,11 +273,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
block = mineBlocks(t, net, bobSecondLevelCSV, 1)[0]
|
||||
assertTxInBlock(t, block, carolSweep)
|
||||
|
||||
bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
bobSweep, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Make sure it spends from the second level tx.
|
||||
tx, err = net.Miner.Node.GetRawTransaction(bobSweep)
|
||||
tx, err = net.Miner.Client.GetRawTransaction(bobSweep)
|
||||
require.NoError(t.t, err)
|
||||
require.Equal(
|
||||
t.t, *bobSecondLvlTx, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
|
||||
|
@ -98,7 +98,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
||||
numBlocks := padCLTV(
|
||||
uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
|
||||
)
|
||||
_, err = net.Miner.Node.Generate(numBlocks)
|
||||
_, err = net.Miner.Client.Generate(numBlocks)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Bob's force close transaction should now be found in the mempool. If
|
||||
@ -111,11 +111,11 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
||||
bobFundingTxid, err := lnrpc.GetChanPointFundingTxid(bobChanPoint)
|
||||
require.NoError(t.t, err)
|
||||
_, err = waitForNTxsInMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
closeTx := getSpendingTxInMempool(
|
||||
t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{
|
||||
t, net.Miner.Client, minerMempoolTimeout, wire.OutPoint{
|
||||
Hash: *bobFundingTxid,
|
||||
Index: bobChanPoint.OutputIndex,
|
||||
},
|
||||
@ -138,7 +138,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// timeout transaction to be broadcast due to the expiry being reached.
|
||||
// If there are anchors, we also expect Carol's anchor sweep now.
|
||||
txes, err := getNTxsFromMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
@ -166,7 +166,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
||||
mineBlocks(t, net, defaultCSV-1, expectedTxes)
|
||||
|
||||
// Check that the sweep spends from the mined commitment.
|
||||
txes, err = getNTxsFromMempool(net.Miner.Node, 1, minerMempoolTimeout)
|
||||
txes, err = getNTxsFromMempool(net.Miner.Client, 1, minerMempoolTimeout)
|
||||
require.NoError(t.t, err)
|
||||
assertAllTxesSpendFrom(t, txes, closeTxid)
|
||||
|
||||
@ -193,7 +193,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// layer sweep due to the CSV on the HTLC timeout output.
|
||||
mineBlocks(t, net, 1, 0)
|
||||
assertSpendingTxInMempool(
|
||||
t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{
|
||||
t, net.Miner.Client, minerMempoolTimeout, wire.OutPoint{
|
||||
Hash: *htlcTimeout,
|
||||
Index: 0,
|
||||
},
|
||||
@ -218,7 +218,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// Next, we'll mine a final block that should confirm the second-layer
|
||||
// sweeping transaction.
|
||||
_, err = net.Miner.Node.Generate(1)
|
||||
_, err = net.Miner.Client.Generate(1)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Once this transaction has been confirmed, Bob should detect that he
|
||||
|
@ -106,7 +106,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
numBlocks := padCLTV(uint32(
|
||||
invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta,
|
||||
))
|
||||
_, err = net.Miner.Node.Generate(numBlocks)
|
||||
_, err = net.Miner.Client.Generate(numBlocks)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// At this point, Carol should broadcast her active commitment
|
||||
@ -117,7 +117,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
expectedTxes = 2
|
||||
}
|
||||
_, err = getNTxsFromMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
@ -132,7 +132,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// The commitment transaction should be spending from the funding
|
||||
// transaction.
|
||||
closingTx := getSpendingTxInMempool(
|
||||
t, net.Miner.Node, minerMempoolTimeout, carolFundingPoint,
|
||||
t, net.Miner.Client, minerMempoolTimeout, carolFundingPoint,
|
||||
)
|
||||
closingTxid := closingTx.TxHash()
|
||||
|
||||
@ -154,7 +154,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
expectedTxes = 3
|
||||
}
|
||||
txes, err := getNTxsFromMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
@ -163,7 +163,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// We'll now mine an additional block which should confirm both the
|
||||
// second layer transactions.
|
||||
_, err = net.Miner.Node.Generate(1)
|
||||
_, err = net.Miner.Client.Generate(1)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
time.Sleep(time.Second * 4)
|
||||
@ -197,17 +197,17 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// If we mine 4 additional blocks, then both outputs should now be
|
||||
// mature.
|
||||
_, err = net.Miner.Node.Generate(defaultCSV)
|
||||
_, err = net.Miner.Client.Generate(defaultCSV)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// We should have a new transaction in the mempool.
|
||||
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Finally, if we mine an additional block to confirm these two sweep
|
||||
// transactions, Carol should not show a pending channel in her report
|
||||
// afterwards.
|
||||
_, err = net.Miner.Node.Generate(1)
|
||||
_, err = net.Miner.Client.Generate(1)
|
||||
require.NoError(t.t, err)
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
|
||||
|
@ -101,7 +101,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
||||
// type is of that type).
|
||||
if c == commitTypeAnchors {
|
||||
_, err = waitForNTxsInMempool(
|
||||
net.Miner.Node, 1, minerMempoolTimeout,
|
||||
net.Miner.Client, 1, minerMempoolTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find bob's anchor commit sweep: %v",
|
||||
@ -114,12 +114,12 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
||||
// containing the commitment tx and the commit sweep tx will be
|
||||
// broadcast immediately before it can be included in a block, so mine
|
||||
// one less than defaultCSV in order to perform mempool assertions.
|
||||
_, err = net.Miner.Node.Generate(defaultCSV - 1)
|
||||
_, err = net.Miner.Client.Generate(defaultCSV - 1)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Alice should now sweep her funds.
|
||||
_, err = waitForNTxsInMempool(
|
||||
net.Miner.Node, 1, minerMempoolTimeout,
|
||||
net.Miner.Client, 1, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
@ -145,7 +145,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
||||
invoiceReq.CltvExpiry-lncfg.DefaultIncomingBroadcastDelta,
|
||||
) - defaultCSV)
|
||||
|
||||
_, err = net.Miner.Node.Generate(numBlocks)
|
||||
_, err = net.Miner.Client.Generate(numBlocks)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
expectedTxes := 1
|
||||
@ -156,7 +156,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
||||
// Carol's commitment transaction should now be in the mempool. If
|
||||
// there are anchors, Carol also sweeps her anchor.
|
||||
_, err = waitForNTxsInMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
bobFundingTxid, err := lnrpc.GetChanPointFundingTxid(bobChanPoint)
|
||||
@ -169,7 +169,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
||||
// The closing transaction should be spending from the funding
|
||||
// transaction.
|
||||
closingTx := getSpendingTxInMempool(
|
||||
t, net.Miner.Node, minerMempoolTimeout, carolFundingPoint,
|
||||
t, net.Miner.Client, minerMempoolTimeout, carolFundingPoint,
|
||||
)
|
||||
closingTxid := closingTx.TxHash()
|
||||
|
||||
@ -193,7 +193,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
||||
expectedTxes = 3
|
||||
}
|
||||
txes, err := getNTxsFromMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
@ -211,12 +211,12 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
||||
// will extract the preimage and broadcast a sweep tx to directly claim
|
||||
// the HTLC in his (already closed) channel with Alice.
|
||||
bobHtlcSweep, err := waitForTxInMempool(
|
||||
net.Miner.Node, minerMempoolTimeout,
|
||||
net.Miner.Client, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// It should spend from the commitment in the channel with Alice.
|
||||
tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep)
|
||||
tx, err := net.Miner.Client.GetRawTransaction(bobHtlcSweep)
|
||||
require.NoError(t.t, err)
|
||||
require.Equal(
|
||||
t.t, *aliceForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
|
||||
@ -238,11 +238,11 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
||||
|
||||
// If we then mine 3 additional blocks, Carol's second level tx will
|
||||
// mature, and she should pull the funds.
|
||||
_, err = net.Miner.Node.Generate(carolSecondLevelCSV)
|
||||
_, err = net.Miner.Client.Generate(carolSecondLevelCSV)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
carolSweep, err := waitForTxInMempool(
|
||||
net.Miner.Node, minerMempoolTimeout,
|
||||
net.Miner.Client, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
|
@ -96,7 +96,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
||||
// transaction of Bob's funding output. If there are anchors, mine
|
||||
// Carol's anchor sweep too.
|
||||
if c == commitTypeAnchors {
|
||||
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
require.NoError(t.t, err)
|
||||
}
|
||||
|
||||
@ -104,16 +104,16 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
||||
// expires and the commitment was already mined inside
|
||||
// closeChannelAndAssertType(), so mine one block less than defaultCSV
|
||||
// in order to perform mempool assertions.
|
||||
_, err = net.Miner.Node.Generate(defaultCSV - 1)
|
||||
_, err = net.Miner.Client.Generate(defaultCSV - 1)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// We'll now mine enough blocks for the HTLC to expire. After this, Bob
|
||||
// should hand off the now expired HTLC output to the utxo nursery.
|
||||
numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV))
|
||||
_, err = net.Miner.Node.Generate(numBlocks)
|
||||
_, err = net.Miner.Client.Generate(numBlocks)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Bob's pending channel report should show that he has a single HTLC
|
||||
@ -138,7 +138,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
||||
|
||||
// We should also now find a transaction in the mempool, as Bob should
|
||||
// have broadcast his second layer timeout transaction.
|
||||
timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
timeoutTx, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Next, we'll mine an additional block. This should serve to confirm
|
||||
@ -177,10 +177,10 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
||||
// We'll now mine 4 additional blocks. This should be enough for Bob's
|
||||
// CSV timelock to expire and the sweeping transaction of the HTLC to be
|
||||
// broadcast.
|
||||
_, err = net.Miner.Node.Generate(defaultCSV)
|
||||
_, err = net.Miner.Client.Generate(defaultCSV)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
sweepTx, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// We'll then mine a final block which should confirm this second layer
|
||||
|
@ -93,7 +93,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
||||
}
|
||||
|
||||
_, err = waitForNTxsInMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
@ -101,7 +101,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
||||
// point, Bob should hand off the output to his internal utxo nursery,
|
||||
// which will broadcast a sweep transaction.
|
||||
numBlocks := padCLTV(finalCltvDelta - 1)
|
||||
_, err = net.Miner.Node.Generate(numBlocks)
|
||||
_, err = net.Miner.Client.Generate(numBlocks)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// If we check Bob's pending channel report, it should show that he has
|
||||
@ -126,12 +126,12 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// We need to generate an additional block to trigger the sweep.
|
||||
_, err = net.Miner.Node.Generate(1)
|
||||
_, err = net.Miner.Client.Generate(1)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// Bob's sweeping transaction should now be found in the mempool at
|
||||
// this point.
|
||||
sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
sweepTx, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
// If Bob's transaction isn't yet in the mempool, then due to
|
||||
// internal message passing and the low period between blocks
|
||||
@ -141,9 +141,9 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
||||
// we'll fail.
|
||||
// TODO(halseth): can we use waitForChannelPendingForceClose to
|
||||
// avoid this hack?
|
||||
_, err = net.Miner.Node.Generate(1)
|
||||
_, err = net.Miner.Client.Generate(1)
|
||||
require.NoError(t.t, err)
|
||||
sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
sweepTx, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
require.NoError(t.t, err)
|
||||
}
|
||||
|
||||
|
@ -58,14 +58,14 @@ func testCPFP(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
t.Fatalf("unable to send coins to bob: %v", err)
|
||||
}
|
||||
|
||||
txid, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
txid, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("expected one mempool transaction: %v", err)
|
||||
}
|
||||
|
||||
// We'll then extract the raw transaction from the mempool in order to
|
||||
// determine the index of Bob's output.
|
||||
tx, err := net.Miner.Node.GetRawTransaction(txid)
|
||||
tx, err := net.Miner.Client.GetRawTransaction(txid)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to extract raw transaction from mempool: %v",
|
||||
err)
|
||||
@ -110,7 +110,7 @@ func testCPFP(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// We should now expect to see two transactions within the mempool, a
|
||||
// parent and its child.
|
||||
_, err = waitForNTxsInMempool(net.Miner.Node, 2, minerMempoolTimeout)
|
||||
_, err = waitForNTxsInMempool(net.Miner.Client, 2, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("expected two mempool transactions: %v", err)
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ func testPsbtChanFunding(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
}
|
||||
|
||||
// No transaction should have been published yet.
|
||||
mempool, err := net.Miner.Node.GetRawMempool()
|
||||
mempool, err := net.Miner.Client.GetRawMempool()
|
||||
require.NoError(t.t, err)
|
||||
require.Equal(t.t, 0, len(mempool))
|
||||
|
||||
|
@ -190,7 +190,7 @@ func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) {
|
||||
run: func(t *testing.T, a, b *lntest.HarnessNode) {
|
||||
// Find out the current best block so we can subscribe
|
||||
// to the next one.
|
||||
hash, height, err := net.Miner.Node.GetBestBlock()
|
||||
hash, height, err := net.Miner.Client.GetBestBlock()
|
||||
require.Nil(t, err, "get best block")
|
||||
|
||||
// Create a new subscription to get block epoch events.
|
||||
@ -257,7 +257,7 @@ func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) {
|
||||
}()
|
||||
|
||||
// Mine a block and make sure we get a message for it.
|
||||
blockHashes, err := net.Miner.Node.Generate(1)
|
||||
blockHashes, err := net.Miner.Client.Generate(1)
|
||||
require.Nil(t, err, "generate blocks")
|
||||
assert.Equal(t, 1, len(blockHashes), "num blocks")
|
||||
select {
|
||||
@ -279,7 +279,7 @@ func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) {
|
||||
run: func(t *testing.T, a, b *lntest.HarnessNode) {
|
||||
// Find out the current best block so we can subscribe
|
||||
// to the next one.
|
||||
hash, height, err := net.Miner.Node.GetBestBlock()
|
||||
hash, height, err := net.Miner.Client.GetBestBlock()
|
||||
require.Nil(t, err, "get best block")
|
||||
|
||||
// Create a new subscription to get block epoch events.
|
||||
@ -366,7 +366,7 @@ func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) {
|
||||
}()
|
||||
|
||||
// Mine a block and make sure we get a message for it.
|
||||
blockHashes, err := net.Miner.Node.Generate(1)
|
||||
blockHashes, err := net.Miner.Client.Generate(1)
|
||||
require.Nil(t, err, "generate blocks")
|
||||
assert.Equal(t, 1, len(blockHashes), "num blocks")
|
||||
select {
|
||||
|
@ -479,7 +479,7 @@ func cleanupForceClose(t *harnessTest, net *lntest.NetworkHarness,
|
||||
//
|
||||
// The commit sweep resolver is able to broadcast the sweep tx up to
|
||||
// one block before the CSV elapses, so wait until defaulCSV-1.
|
||||
_, err = net.Miner.Node.Generate(defaultCSV - 1)
|
||||
_, err = net.Miner.Client.Generate(defaultCSV - 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
@ -812,7 +812,7 @@ func testGetRecoveryInfo(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
}
|
||||
|
||||
// Wait for Carol to sync to the chain.
|
||||
_, minerHeight, err := net.Miner.Node.GetBestBlock()
|
||||
_, minerHeight, err := net.Miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
@ -1088,7 +1088,7 @@ func testOnchainFundRecovery(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
t.Fatalf("unable to send coins to miner: %v", err)
|
||||
}
|
||||
txid, err := waitForTxInMempool(
|
||||
net.Miner.Node, minerMempoolTimeout,
|
||||
net.Miner.Client, minerMempoolTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("transaction not found in mempool: %v", err)
|
||||
@ -1536,7 +1536,7 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
}
|
||||
|
||||
// Make sure the unconfirmed tx is seen in the mempool.
|
||||
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to find tx in miner mempool: %v", err)
|
||||
}
|
||||
@ -2497,14 +2497,14 @@ func assertMinerBlockHeightDelta(t *harnessTest,
|
||||
// Ensure the chain lengths are what we expect.
|
||||
var predErr error
|
||||
err := wait.Predicate(func() bool {
|
||||
_, tempMinerHeight, err := tempMiner.Node.GetBestBlock()
|
||||
_, tempMinerHeight, err := tempMiner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
predErr = fmt.Errorf("unable to get current "+
|
||||
"blockheight %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
_, minerHeight, err := miner.Node.GetBestBlock()
|
||||
_, minerHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
predErr = fmt.Errorf("unable to get current "+
|
||||
"blockheight %v", err)
|
||||
@ -2562,7 +2562,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// We start by connecting the new miner to our original miner,
|
||||
// such that it will sync to our original chain.
|
||||
err = net.Miner.Node.Node(
|
||||
err = net.Miner.Client.Node(
|
||||
btcjson.NConnect, tempMiner.P2PAddress(), &temp,
|
||||
)
|
||||
if err != nil {
|
||||
@ -2578,7 +2578,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// We disconnect the two miners, such that we can mine two different
|
||||
// chains and can cause a reorg later.
|
||||
err = net.Miner.Node.Node(
|
||||
err = net.Miner.Client.Node(
|
||||
btcjson.NDisconnect, tempMiner.P2PAddress(), &temp,
|
||||
)
|
||||
if err != nil {
|
||||
@ -2598,7 +2598,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Wait for miner to have seen the funding tx. The temporary miner is
|
||||
// disconnected, and won't see the transaction.
|
||||
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to find funding tx in mempool: %v", err)
|
||||
}
|
||||
@ -2620,7 +2620,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// open.
|
||||
block := mineBlocks(t, net, 10, 1)[0]
|
||||
assertTxInBlock(t, block, fundingTxID)
|
||||
if _, err := tempMiner.Node.Generate(15); err != nil {
|
||||
if _, err := tempMiner.Client.Generate(15); err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
|
||||
@ -2629,7 +2629,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
assertMinerBlockHeightDelta(t, net.Miner, tempMiner, 5)
|
||||
|
||||
// Wait for Alice to sync to the original miner's chain.
|
||||
_, minerHeight, err := net.Miner.Node.GetBestBlock()
|
||||
_, minerHeight, err := net.Miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
@ -2690,7 +2690,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Connecting to the temporary miner should now cause our original
|
||||
// chain to be re-orged out.
|
||||
err = net.Miner.Node.Node(
|
||||
err = net.Miner.Client.Node(
|
||||
btcjson.NConnect, tempMiner.P2PAddress(), &temp,
|
||||
)
|
||||
if err != nil {
|
||||
@ -2707,7 +2707,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Now we disconnect the two miners, and connect our original miner to
|
||||
// our chain backend once again.
|
||||
err = net.Miner.Node.Node(
|
||||
err = net.Miner.Client.Node(
|
||||
btcjson.NDisconnect, tempMiner.P2PAddress(), &temp,
|
||||
)
|
||||
if err != nil {
|
||||
@ -2721,7 +2721,7 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// This should have caused a reorg, and Alice should sync to the longer
|
||||
// chain, where the funding transaction is not confirmed.
|
||||
_, tempMinerHeight, err := tempMiner.Node.GetBestBlock()
|
||||
_, tempMinerHeight, err := tempMiner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
@ -3001,7 +3001,7 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
assertTxInBlock(t, block, fundingTxID)
|
||||
|
||||
// Get the height that our transaction confirmed at.
|
||||
_, height, err := net.Miner.Node.GetBestBlock()
|
||||
_, height, err := net.Miner.Client.GetBestBlock()
|
||||
require.NoError(t.t, err, "could not get best block")
|
||||
|
||||
// Restart both nodes to test that the appropriate state has been
|
||||
@ -3022,7 +3022,7 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Next, mine enough blocks s.t the channel will open with a single
|
||||
// additional block mined.
|
||||
if _, err := net.Miner.Node.Generate(3); err != nil {
|
||||
if _, err := net.Miner.Client.Generate(3); err != nil {
|
||||
t.Fatalf("unable to mine blocks: %v", err)
|
||||
}
|
||||
|
||||
@ -3042,7 +3042,7 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, carol, 1)
|
||||
|
||||
// Finally, mine the last block which should mark the channel as open.
|
||||
if _, err := net.Miner.Node.Generate(1); err != nil {
|
||||
if _, err := net.Miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to mine blocks: %v", err)
|
||||
}
|
||||
|
||||
@ -3699,7 +3699,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// Fetch starting height of this test so we can compute the block
|
||||
// heights we expect certain events to take place.
|
||||
_, curHeight, err := net.Miner.Node.GetBestBlock()
|
||||
_, curHeight, err := net.Miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get best block height")
|
||||
}
|
||||
@ -3807,7 +3807,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
}
|
||||
|
||||
sweepTxns, err := getNTxsFromMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to find commitment in miner mempool: %v", err)
|
||||
@ -3819,7 +3819,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
utx := btcutil.NewTx(tx)
|
||||
totalWeight += blockchain.GetTransactionWeight(utx)
|
||||
|
||||
fee, err := getTxFee(net.Miner.Node, tx)
|
||||
fee, err := getTxFee(net.Miner.Client, tx)
|
||||
require.NoError(t.t, err)
|
||||
totalFee += int64(fee)
|
||||
}
|
||||
@ -3852,7 +3852,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := net.Miner.Node.Generate(1); err != nil {
|
||||
if _, err := net.Miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
@ -3921,7 +3921,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// not timelocked. If there are anchors, we also expect Carol's anchor
|
||||
// sweep now.
|
||||
sweepTxns, err = getNTxsFromMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to find Carol's sweep in miner mempool: %v",
|
||||
@ -3953,7 +3953,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// For the persistence test, we generate two blocks, then trigger
|
||||
// a restart and then generate the final block that should trigger
|
||||
// the creation of the sweep transaction.
|
||||
if _, err := net.Miner.Node.Generate(defaultCSV - 2); err != nil {
|
||||
if _, err := net.Miner.Client.Generate(defaultCSV - 2); err != nil {
|
||||
t.Fatalf("unable to mine blocks: %v", err)
|
||||
}
|
||||
|
||||
@ -4026,21 +4026,21 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// Generate an additional block, which should cause the CSV delayed
|
||||
// output from the commitment txn to expire.
|
||||
if _, err := net.Miner.Node.Generate(1); err != nil {
|
||||
if _, err := net.Miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to mine blocks: %v", err)
|
||||
}
|
||||
|
||||
// At this point, the CSV will expire in the next block, meaning that
|
||||
// the sweeping transaction should now be broadcast. So we fetch the
|
||||
// node's mempool to ensure it has been properly broadcast.
|
||||
sweepingTXID, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
sweepingTXID, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get sweep tx from mempool: %v", err)
|
||||
}
|
||||
|
||||
// Fetch the sweep transaction, all input it's spending should be from
|
||||
// the commitment transaction which was broadcast on-chain.
|
||||
sweepTx, err := net.Miner.Node.GetRawTransaction(sweepingTXID)
|
||||
sweepTx, err := net.Miner.Client.GetRawTransaction(sweepingTXID)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch sweep tx: %v", err)
|
||||
}
|
||||
@ -4091,11 +4091,11 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// Next, we mine an additional block which should include the sweep
|
||||
// transaction as the input scripts and the sequence locks on the
|
||||
// inputs should be properly met.
|
||||
blockHash, err := net.Miner.Node.Generate(1)
|
||||
blockHash, err := net.Miner.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
block, err := net.Miner.Node.GetBlock(blockHash[0])
|
||||
block, err := net.Miner.Client.GetBlock(blockHash[0])
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get block: %v", err)
|
||||
}
|
||||
@ -4103,7 +4103,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
assertTxInBlock(t, block, sweepTx.Hash())
|
||||
|
||||
// Update current height
|
||||
_, curHeight, err = net.Miner.Node.GetBestBlock()
|
||||
_, curHeight, err = net.Miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get best block height")
|
||||
}
|
||||
@ -4167,8 +4167,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// Advance the blockchain until just before the CLTV expires, nothing
|
||||
// exciting should have happened during this time.
|
||||
blockHash, err = net.Miner.Node.Generate(cltvHeightDelta)
|
||||
if err != nil {
|
||||
if _, err := net.Miner.Client.Generate(cltvHeightDelta); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
@ -4233,8 +4232,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// Now, generate the block which will cause Alice to broadcast the
|
||||
// presigned htlc timeout txns.
|
||||
blockHash, err = net.Miner.Node.Generate(1)
|
||||
if err != nil {
|
||||
if _, err = net.Miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
@ -4250,7 +4248,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// Wait for them all to show up in the mempool.
|
||||
htlcTxIDs, err := waitForNTxsInMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find htlc timeout txns in mempool: %v", err)
|
||||
@ -4279,7 +4277,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// on-chain. In case of an anchor type channel, we expect one
|
||||
// extra input that is not spending from the commitment, that
|
||||
// is added for fees.
|
||||
htlcTx, err := net.Miner.Node.GetRawTransaction(htlcTxID)
|
||||
htlcTx, err := net.Miner.Client.GetRawTransaction(htlcTxID)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch sweep tx: %v", err)
|
||||
}
|
||||
@ -4376,8 +4374,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// Generate a block that mines the htlc timeout txns. Doing so now
|
||||
// activates the 2nd-stage CSV delayed outputs.
|
||||
blockHash, err = net.Miner.Node.Generate(1)
|
||||
if err != nil {
|
||||
if _, err = net.Miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
@ -4395,7 +4392,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
numBlocks = defaultCSV - 2
|
||||
|
||||
}
|
||||
_, err = net.Miner.Node.Generate(numBlocks)
|
||||
_, err = net.Miner.Client.Generate(numBlocks)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
@ -4449,21 +4446,20 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest,
|
||||
|
||||
// Generate a block that causes Alice to sweep the htlc outputs in the
|
||||
// kindergarten bucket.
|
||||
blockHash, err = net.Miner.Node.Generate(1)
|
||||
if err != nil {
|
||||
if _, err := net.Miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
// Wait for the single sweep txn to appear in the mempool.
|
||||
htlcSweepTxID, err := waitForTxInMempool(
|
||||
net.Miner.Node, minerMempoolTimeout,
|
||||
net.Miner.Client, minerMempoolTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get sweep tx from mempool: %v", err)
|
||||
}
|
||||
|
||||
// Fetch the htlc sweep transaction from the mempool.
|
||||
htlcSweepTx, err := net.Miner.Node.GetRawTransaction(htlcSweepTxID)
|
||||
htlcSweepTx, err := net.Miner.Client.GetRawTransaction(htlcSweepTxID)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch sweep tx: %v", err)
|
||||
}
|
||||
@ -4650,7 +4646,7 @@ func findCommitAndAnchor(t *harnessTest, net *lntest.NetworkHarness,
|
||||
|
||||
for _, tx := range sweepTxns {
|
||||
txHash := tx.TxHash()
|
||||
sweepTx, err := net.Miner.Node.GetRawTransaction(&txHash)
|
||||
sweepTx, err := net.Miner.Client.GetRawTransaction(&txHash)
|
||||
require.NoError(t.t, err)
|
||||
|
||||
// We expect our commitment sweep to have a single input, and,
|
||||
@ -5963,7 +5959,7 @@ func testSingleHopSendToRouteCase(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// Assert Carol and Dave are synced to the chain before proceeding, to
|
||||
// ensure the queried route will have a valid final CLTV once the HTLC
|
||||
// reaches Dave.
|
||||
_, minerHeight, err := net.Miner.Node.GetBestBlock()
|
||||
_, minerHeight, err := net.Miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get best height: %v", err)
|
||||
}
|
||||
@ -8251,26 +8247,26 @@ func testFailingChannel(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
}
|
||||
|
||||
// Carol will use the correct preimage to resolve the HTLC on-chain.
|
||||
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Carol's resolve tx in mempool: %v", err)
|
||||
}
|
||||
|
||||
// Mine enough blocks for Alice to sweep her funds from the force
|
||||
// closed channel.
|
||||
_, err = net.Miner.Node.Generate(defaultCSV - 1)
|
||||
_, err = net.Miner.Client.Generate(defaultCSV - 1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
|
||||
// Wait for the sweeping tx to be broadcast.
|
||||
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Alice's sweep tx in mempool: %v", err)
|
||||
}
|
||||
|
||||
// Mine the sweep.
|
||||
_, err = net.Miner.Node.Generate(1)
|
||||
_, err = net.Miner.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
@ -8476,7 +8472,7 @@ func testGarbageCollectLinkNodes(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// We'll need to mine some blocks in order to mark the channel fully
|
||||
// closed.
|
||||
_, err = net.Miner.Node.Generate(chainreg.DefaultBitcoinTimeLockDelta - defaultCSV)
|
||||
_, err = net.Miner.Client.Generate(chainreg.DefaultBitcoinTimeLockDelta - defaultCSV)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
@ -8734,7 +8730,7 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
|
||||
// Wait for Bob's breach transaction to show up in the mempool to ensure
|
||||
// that Carol's node has started waiting for confirmations.
|
||||
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Bob's breach tx in mempool: %v", err)
|
||||
}
|
||||
@ -8763,7 +8759,7 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// Query the mempool for Carol's justice transaction, this should be
|
||||
// broadcast as Bob's contract breaching transaction gets confirmed
|
||||
// above.
|
||||
justiceTXID, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
justiceTXID, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Carol's justice tx in mempool: %v", err)
|
||||
}
|
||||
@ -8772,7 +8768,7 @@ func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// Query for the mempool transaction found above. Then assert that all
|
||||
// the inputs of this transaction are spending outputs generated by
|
||||
// Bob's breach transaction above.
|
||||
justiceTx, err := net.Miner.Node.GetRawTransaction(justiceTXID)
|
||||
justiceTx, err := net.Miner.Client.GetRawTransaction(justiceTXID)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query for justice tx: %v", err)
|
||||
}
|
||||
@ -8988,7 +8984,7 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness
|
||||
|
||||
// Query the mempool for the breaching closing transaction, this should
|
||||
// be broadcast by Carol when she force closes the channel above.
|
||||
txid, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
txid, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Carol's force close tx in mempool: %v",
|
||||
err)
|
||||
@ -9020,7 +9016,7 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness
|
||||
// Query the mempool for Dave's justice transaction, this should be
|
||||
// broadcast as Carol's contract breaching transaction gets confirmed
|
||||
// above.
|
||||
justiceTXID, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
justiceTXID, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Dave's justice tx in mempool: %v",
|
||||
err)
|
||||
@ -9030,7 +9026,7 @@ func testRevokedCloseRetributionZeroValueRemoteOutput(net *lntest.NetworkHarness
|
||||
// Query for the mempool transaction found above. Then assert that all
|
||||
// the inputs of this transaction are spending outputs generated by
|
||||
// Carol's breach transaction above.
|
||||
justiceTx, err := net.Miner.Node.GetRawTransaction(justiceTXID)
|
||||
justiceTx, err := net.Miner.Client.GetRawTransaction(justiceTXID)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query for justice tx: %v", err)
|
||||
}
|
||||
@ -9318,7 +9314,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
|
||||
|
||||
// Query the mempool for the breaching closing transaction, this should
|
||||
// be broadcast by Carol when she force closes the channel above.
|
||||
txid, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
txid, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Carol's force close tx in mempool: %v",
|
||||
err)
|
||||
@ -9360,7 +9356,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
|
||||
var justiceTxid *chainhash.Hash
|
||||
errNotFound := errors.New("justice tx not found")
|
||||
findJusticeTx := func() (*chainhash.Hash, error) {
|
||||
mempool, err := net.Miner.Node.GetRawMempool()
|
||||
mempool, err := net.Miner.Client.GetRawMempool()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get mempool from "+
|
||||
"miner: %v", err)
|
||||
@ -9369,7 +9365,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
|
||||
for _, txid := range mempool {
|
||||
// Check that the justice tx has the appropriate number
|
||||
// of inputs.
|
||||
tx, err := net.Miner.Node.GetRawTransaction(txid)
|
||||
tx, err := net.Miner.Client.GetRawTransaction(txid)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to query for "+
|
||||
"txs: %v", err)
|
||||
@ -9418,7 +9414,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
|
||||
t.Fatalf(predErr.Error())
|
||||
}
|
||||
|
||||
justiceTx, err := net.Miner.Node.GetRawTransaction(justiceTxid)
|
||||
justiceTx, err := net.Miner.Client.GetRawTransaction(justiceTxid)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query for justice tx: %v", err)
|
||||
}
|
||||
@ -9426,7 +9422,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
|
||||
// isSecondLevelSpend checks that the passed secondLevelTxid is a
|
||||
// potentitial second level spend spending from the commit tx.
|
||||
isSecondLevelSpend := func(commitTxid, secondLevelTxid *chainhash.Hash) bool {
|
||||
secondLevel, err := net.Miner.Node.GetRawTransaction(
|
||||
secondLevel, err := net.Miner.Client.GetRawTransaction(
|
||||
secondLevelTxid)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query for tx: %v", err)
|
||||
@ -9785,7 +9781,7 @@ func testRevokedCloseRetributionAltruistWatchtowerCase(
|
||||
|
||||
// Query the mempool for the breaching closing transaction, this should
|
||||
// be broadcast by Carol when she force closes the channel above.
|
||||
txid, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
txid, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Carol's force close tx in mempool: %v",
|
||||
err)
|
||||
@ -9810,7 +9806,7 @@ func testRevokedCloseRetributionAltruistWatchtowerCase(
|
||||
// Query the mempool for Dave's justice transaction, this should be
|
||||
// broadcast as Carol's contract breaching transaction gets confirmed
|
||||
// above.
|
||||
justiceTXID, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
justiceTXID, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Dave's justice tx in mempool: %v",
|
||||
err)
|
||||
@ -9820,7 +9816,7 @@ func testRevokedCloseRetributionAltruistWatchtowerCase(
|
||||
// Query for the mempool transaction found above. Then assert that all
|
||||
// the inputs of this transaction are spending outputs generated by
|
||||
// Carol's breach transaction above.
|
||||
justiceTx, err := net.Miner.Node.GetRawTransaction(justiceTXID)
|
||||
justiceTx, err := net.Miner.Client.GetRawTransaction(justiceTXID)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query for justice tx: %v", err)
|
||||
}
|
||||
@ -10006,7 +10002,7 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest,
|
||||
expectedTxes = 2
|
||||
}
|
||||
_, err = waitForNTxsInMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Carol's force close tx in mempool: %v",
|
||||
@ -10035,7 +10031,7 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// We also expect Dave to sweep his anchor, if present.
|
||||
|
||||
_, err = waitForNTxsInMempool(
|
||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||
net.Miner.Client, expectedTxes, minerMempoolTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Dave's sweep tx in mempool: %v", err)
|
||||
@ -10078,7 +10074,7 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest,
|
||||
// take that into account.
|
||||
mineBlocks(t, net, defaultCSV-1-1, 0)
|
||||
carolSweep, err := waitForTxInMempool(
|
||||
net.Miner.Node, minerMempoolTimeout,
|
||||
net.Miner.Client, minerMempoolTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Carol's sweep tx in mempool: %v", err)
|
||||
@ -10368,7 +10364,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// Mine enough blocks for Carol to sweep her funds.
|
||||
mineBlocks(t, net, defaultCSV-1, 0)
|
||||
|
||||
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
carolSweep, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Carol's sweep tx in mempool: %v", err)
|
||||
}
|
||||
@ -10401,7 +10397,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
}
|
||||
|
||||
// Dave should sweep his funds.
|
||||
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||
_, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find Dave's sweep tx in mempool: %v", err)
|
||||
}
|
||||
@ -10873,7 +10869,7 @@ func testGraphTopologyNtfns(net *lntest.NetworkHarness, t *harnessTest, pinned b
|
||||
}
|
||||
}
|
||||
|
||||
_, blockHeight, err := net.Miner.Node.GetBestBlock()
|
||||
_, blockHeight, err := net.Miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current blockheight %v", err)
|
||||
}
|
||||
@ -14920,7 +14916,7 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
||||
|
||||
// Set up miner and connect chain backend to it.
|
||||
require.NoError(t, miner.SetUp(true, 50))
|
||||
require.NoError(t, miner.Node.NotifyNewTransactions(false))
|
||||
require.NoError(t, miner.Client.NotifyNewTransactions(false))
|
||||
require.NoError(t, chainBackend.ConnectMiner(), "connect miner")
|
||||
|
||||
// Now we can set up our test harness (LND instance), with the chain
|
||||
@ -14955,7 +14951,7 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
||||
// Next mine enough blocks in order for segwit and the CSV package
|
||||
// soft-fork to activate on SimNet.
|
||||
numBlocks := harnessNetParams.MinerConfirmationWindow * 2
|
||||
if _, err := miner.Node.Generate(numBlocks); err != nil {
|
||||
if _, err := miner.Client.Generate(numBlocks); err != nil {
|
||||
ht.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
|
||||
|
@ -206,7 +206,7 @@ func mineBlocks(t *harnessTest, net *lntest.NetworkHarness,
|
||||
var err error
|
||||
if numTxs > 0 {
|
||||
txids, err = waitForNTxsInMempool(
|
||||
net.Miner.Node, numTxs, minerMempoolTimeout,
|
||||
net.Miner.Client, numTxs, minerMempoolTimeout,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find txns in mempool: %v", err)
|
||||
@ -215,13 +215,13 @@ func mineBlocks(t *harnessTest, net *lntest.NetworkHarness,
|
||||
|
||||
blocks := make([]*wire.MsgBlock, num)
|
||||
|
||||
blockHashes, err := net.Miner.Node.Generate(num)
|
||||
blockHashes, err := net.Miner.Client.Generate(num)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
|
||||
for i, blockHash := range blockHashes {
|
||||
block, err := net.Miner.Node.GetBlock(blockHash)
|
||||
block, err := net.Miner.Client.GetBlock(blockHash)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get block: %v", err)
|
||||
}
|
||||
|
@ -315,7 +315,7 @@ func (b *BtcWallet) SendOutputs(outputs []*wire.TxOut,
|
||||
}
|
||||
|
||||
return b.wallet.SendOutputs(
|
||||
outputs, defaultAccount, minconf, feeSatPerKB, label,
|
||||
outputs, nil, defaultAccount, minconf, feeSatPerKB, label,
|
||||
)
|
||||
}
|
||||
|
||||
@ -357,7 +357,9 @@ func (b *BtcWallet) CreateSimpleTx(outputs []*wire.TxOut,
|
||||
}
|
||||
}
|
||||
|
||||
return b.wallet.CreateSimpleTx(defaultAccount, outputs, 1, feeSatPerKB, dryRun)
|
||||
return b.wallet.CreateSimpleTx(
|
||||
nil, defaultAccount, outputs, 1, feeSatPerKB, dryRun,
|
||||
)
|
||||
}
|
||||
|
||||
// LockOutpoint marks an outpoint as locked meaning it will no longer be deemed
|
||||
@ -428,7 +430,7 @@ func (b *BtcWallet) ReleaseOutput(id wtxmgr.LockID, op wire.OutPoint) error {
|
||||
func (b *BtcWallet) ListUnspentWitness(minConfs, maxConfs int32) (
|
||||
[]*lnwallet.Utxo, error) {
|
||||
// First, grab all the unfiltered currently unspent outputs.
|
||||
unspentOutputs, err := b.wallet.ListUnspent(minConfs, maxConfs, nil)
|
||||
unspentOutputs, err := b.wallet.ListUnspent(minConfs, maxConfs, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -663,7 +665,7 @@ func (b *BtcWallet) ListTransactionDetails(startHeight,
|
||||
// We'll attempt to find all transactions from start to end height.
|
||||
start := base.NewBlockIdentifierFromHeight(startHeight)
|
||||
stop := base.NewBlockIdentifierFromHeight(endHeight)
|
||||
txns, err := b.wallet.GetTransactions(start, stop, nil)
|
||||
txns, err := b.wallet.GetTransactions(start, stop, "", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -721,7 +723,7 @@ func (b *BtcWallet) FundPsbt(packet *psbt.Packet,
|
||||
|
||||
// Let the wallet handle coin selection and/or fee estimation based on
|
||||
// the partial TX information in the packet.
|
||||
return b.wallet.FundPsbt(packet, defaultAccount, feeSatPerKB)
|
||||
return b.wallet.FundPsbt(packet, nil, defaultAccount, feeSatPerKB)
|
||||
}
|
||||
|
||||
// FinalizePsbt expects a partial transaction with all inputs and
|
||||
@ -738,7 +740,7 @@ func (b *BtcWallet) FundPsbt(packet *psbt.Packet,
|
||||
//
|
||||
// This is a part of the WalletController interface.
|
||||
func (b *BtcWallet) FinalizePsbt(packet *psbt.Packet) error {
|
||||
return b.wallet.FinalizePsbt(packet)
|
||||
return b.wallet.FinalizePsbt(nil, defaultAccount, packet)
|
||||
}
|
||||
|
||||
// txSubscriptionClient encapsulates the transaction notification client from
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
//
|
||||
// This is a part of the WalletController interface.
|
||||
func (b *BtcWallet) FetchInputInfo(prevOut *wire.OutPoint) (*lnwallet.Utxo, error) {
|
||||
_, txOut, confirmations, err := b.wallet.FetchInputInfo(prevOut)
|
||||
_, txOut, _, confirmations, err := b.wallet.FetchInputInfo(prevOut)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -51,9 +51,10 @@ func deriveFromKeyLoc(scopedMgr *waddrmgr.ScopedKeyManager,
|
||||
keyLoc keychain.KeyLocator) (*btcec.PrivateKey, error) {
|
||||
|
||||
path := waddrmgr.DerivationPath{
|
||||
Account: uint32(keyLoc.Family),
|
||||
Branch: 0,
|
||||
Index: uint32(keyLoc.Index),
|
||||
InternalAccount: uint32(keyLoc.Family),
|
||||
Account: uint32(keyLoc.Family),
|
||||
Branch: 0,
|
||||
Index: keyLoc.Index,
|
||||
}
|
||||
addr, err := scopedMgr.DeriveFromKeyPath(addrmgrNs, path)
|
||||
if err != nil {
|
||||
|
@ -130,13 +130,13 @@ func mineAndAssertTxInBlock(t *testing.T, miner *rpctest.Harness,
|
||||
}
|
||||
|
||||
// We'll mined a block to confirm it.
|
||||
blockHashes, err := miner.Node.Generate(1)
|
||||
blockHashes, err := miner.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate new block: %v", err)
|
||||
}
|
||||
|
||||
// Finally, we'll check it was actually mined in this block.
|
||||
block, err := miner.Node.GetBlock(blockHashes[0])
|
||||
block, err := miner.Client.GetBlock(blockHashes[0])
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get block %v: %v", blockHashes[0], err)
|
||||
}
|
||||
@ -282,7 +282,7 @@ func loadTestCredits(miner *rpctest.Harness, w *lnwallet.LightningWallet,
|
||||
// Generate 10 blocks with the mining node, this should mine all
|
||||
// numOutputs transactions created above. We generate 10 blocks here
|
||||
// in order to give all the outputs a "sufficient" number of confirmations.
|
||||
if _, err := miner.Node.Generate(10); err != nil {
|
||||
if _, err := miner.Client.Generate(10); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -380,7 +380,7 @@ func testGetRecoveryInfo(miner *rpctest.Harness,
|
||||
|
||||
// Generate 5 blocks and check the recovery process again.
|
||||
const numBlocksMined = 5
|
||||
_, err = miner.Node.Generate(numBlocksMined)
|
||||
_, err = miner.Client.Generate(numBlocksMined)
|
||||
require.NoError(t, err, "unable to mine blocks")
|
||||
|
||||
// Check the recovery process. Once synced, the progress should be 1.
|
||||
@ -591,11 +591,11 @@ func testDualFundingReservationWorkflow(miner *rpctest.Harness,
|
||||
if err != nil {
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
blockHashes, err := miner.Node.Generate(1)
|
||||
blockHashes, err := miner.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
block, err := miner.Node.GetBlock(blockHashes[0])
|
||||
block, err := miner.Client.GetBlock(blockHashes[0])
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find block: %v", err)
|
||||
}
|
||||
@ -1091,11 +1091,11 @@ func testSingleFunderReservationWorkflow(miner *rpctest.Harness,
|
||||
if err != nil {
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
blockHashes, err := miner.Node.Generate(1)
|
||||
blockHashes, err := miner.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
block, err := miner.Node.GetBlock(blockHashes[0])
|
||||
block, err := miner.Client.GetBlock(blockHashes[0])
|
||||
if err != nil {
|
||||
t.Fatalf("unable to find block: %v", err)
|
||||
}
|
||||
@ -1159,14 +1159,14 @@ func testListTransactionDetails(miner *rpctest.Harness,
|
||||
}
|
||||
|
||||
// Get the miner's current best block height before we mine blocks.
|
||||
_, startHeight, err := miner.Node.GetBestBlock()
|
||||
_, startHeight, err := miner.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("cannot get best block: %v", err)
|
||||
}
|
||||
|
||||
// Generate 10 blocks to mine all the transactions created above.
|
||||
const numBlocksMined = 10
|
||||
blocks, err := miner.Node.Generate(numBlocksMined)
|
||||
blocks, err := miner.Client.Generate(numBlocksMined)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to mine blocks: %v", err)
|
||||
}
|
||||
@ -1339,7 +1339,7 @@ func testListTransactionDetails(miner *rpctest.Harness,
|
||||
|
||||
// Generate one block for our transaction to confirm in.
|
||||
var numBlocks int32 = 1
|
||||
burnBlock, err := miner.Node.Generate(uint32(numBlocks))
|
||||
burnBlock, err := miner.Client.Generate(uint32(numBlocks))
|
||||
if err != nil {
|
||||
t.Fatalf("unable to mine block: %v", err)
|
||||
}
|
||||
@ -1392,7 +1392,7 @@ func testListTransactionDetails(miner *rpctest.Harness,
|
||||
|
||||
// Generate a block which has no wallet transactions in it.
|
||||
chainTip += numBlocks
|
||||
_, err = miner.Node.Generate(uint32(numBlocks))
|
||||
_, err = miner.Client.Generate(uint32(numBlocks))
|
||||
if err != nil {
|
||||
t.Fatalf("unable to mine block: %v", err)
|
||||
}
|
||||
@ -1524,7 +1524,7 @@ func testTransactionSubscriptions(miner *rpctest.Harness,
|
||||
|
||||
// Next mine a single block, all the transactions generated above
|
||||
// should be included.
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
if _, err := miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
@ -1607,12 +1607,12 @@ func mineAndAssert(r *rpctest.Harness, tx *wire.MsgTx) error {
|
||||
return fmt.Errorf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
|
||||
blockHashes, err := r.Node.Generate(1)
|
||||
blockHashes, err := r.Client.Generate(1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
block, err := r.Node.GetBlock(blockHashes[0])
|
||||
block, err := r.Client.GetBlock(blockHashes[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to find block: %v", err)
|
||||
}
|
||||
@ -1803,7 +1803,7 @@ func testPublishTransaction(r *rpctest.Harness,
|
||||
}
|
||||
|
||||
// Mine the transaction.
|
||||
if _, err := r.Node.Generate(1); err != nil {
|
||||
if _, err := r.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
@ -2126,7 +2126,7 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet,
|
||||
// reorganization that doesn't invalidate any existing transactions or
|
||||
// create any new non-coinbase transactions. We'll then check if it's
|
||||
// the same after the empty reorg.
|
||||
_, err := r.Node.Generate(5)
|
||||
_, err := r.Client.Generate(5)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate blocks on passed node: %v", err)
|
||||
}
|
||||
@ -2168,7 +2168,7 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet,
|
||||
if err != nil {
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
_, err = r.Node.Generate(50)
|
||||
_, err = r.Client.Generate(50)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate blocks on passed node: %v", err)
|
||||
}
|
||||
@ -2207,7 +2207,7 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet,
|
||||
|
||||
// Step 2: connect the miner to the passed miner and wait for
|
||||
// synchronization.
|
||||
err = r2.Node.AddNode(r.P2PAddress(), rpcclient.ANAdd)
|
||||
err = r2.Client.AddNode(r.P2PAddress(), rpcclient.ANAdd)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to connect mining nodes together: %v", err)
|
||||
}
|
||||
@ -2232,12 +2232,12 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet,
|
||||
t.Fatalf("timeout waiting for miner disconnect")
|
||||
default:
|
||||
}
|
||||
err = r2.Node.AddNode(r.P2PAddress(), rpcclient.ANRemove)
|
||||
err = r2.Client.AddNode(r.P2PAddress(), rpcclient.ANRemove)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to disconnect mining nodes: %v",
|
||||
err)
|
||||
}
|
||||
peers, err = r2.Node.GetPeerInfo()
|
||||
peers, err = r2.Client.GetPeerInfo()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get peer info: %v", err)
|
||||
}
|
||||
@ -2249,19 +2249,19 @@ func testReorgWalletBalance(r *rpctest.Harness, w *lnwallet.LightningWallet,
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = r.Node.Generate(2)
|
||||
_, err = r.Client.Generate(2)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate blocks on passed node: %v",
|
||||
err)
|
||||
}
|
||||
_, err = r2.Node.Generate(3)
|
||||
_, err = r2.Client.Generate(3)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate blocks on created node: %v",
|
||||
err)
|
||||
}
|
||||
|
||||
// Step 5: Reconnect the miners and wait for them to synchronize.
|
||||
err = r2.Node.AddNode(r.P2PAddress(), rpcclient.ANAdd)
|
||||
err = r2.Client.AddNode(r.P2PAddress(), rpcclient.ANAdd)
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
case *btcjson.RPCError:
|
||||
@ -2455,7 +2455,7 @@ func testSpendUnconfirmed(miner *rpctest.Harness,
|
||||
if err != nil {
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
if _, err := miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
if err := waitForWalletSync(miner, alice); err != nil {
|
||||
@ -2483,7 +2483,7 @@ func testSpendUnconfirmed(miner *rpctest.Harness,
|
||||
func testLastUnusedAddr(miner *rpctest.Harness,
|
||||
alice, bob *lnwallet.LightningWallet, t *testing.T) {
|
||||
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
if _, err := miner.Client.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
@ -2895,7 +2895,7 @@ func waitForMempoolTx(r *rpctest.Harness, txid *chainhash.Hash) error {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Check for the harness' knowledge of the txid
|
||||
tx, err = r.Node.GetRawTransaction(txid)
|
||||
tx, err = r.Client.GetRawTransaction(txid)
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case *btcjson.RPCError:
|
||||
@ -2931,7 +2931,7 @@ func waitForWalletSync(r *rpctest.Harness, w *lnwallet.LightningWallet) error {
|
||||
|
||||
// Check whether the chain source of the wallet is caught up to
|
||||
// the harness it's supposed to be catching up to.
|
||||
bestHash, bestHeight, err = r.Node.GetBestBlock()
|
||||
bestHash, bestHeight, err = r.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -3107,7 +3107,7 @@ func TestLightningWallet(t *testing.T, targetBackEnd string) {
|
||||
// Next mine enough blocks in order for segwit and the CSV package
|
||||
// soft-fork to activate on RegNet.
|
||||
numBlocks := netParams.MinerConfirmationWindow * 2
|
||||
if _, err := miningNode.Node.Generate(numBlocks); err != nil {
|
||||
if _, err := miningNode.Client.Generate(numBlocks); err != nil {
|
||||
t.Fatalf("unable to generate blocks: %v", err)
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ func waitForMempoolTx(r *rpctest.Harness, txid *chainhash.Hash) error {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Check for the harness' knowledge of the txid
|
||||
tx, err = r.Node.GetRawTransaction(txid)
|
||||
tx, err = r.Client.GetRawTransaction(txid)
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case *btcjson.RPCError:
|
||||
@ -188,12 +188,12 @@ func testFilterBlockNotifications(node *rpctest.Harness,
|
||||
blockChan := chainView.FilteredBlocks()
|
||||
|
||||
// Next we'll mine a block confirming the output generated above.
|
||||
newBlockHashes, err := node.Node.Generate(1)
|
||||
newBlockHashes, err := node.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
_, currentHeight, err := node.Node.GetBestBlock()
|
||||
_, currentHeight, err := node.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -211,11 +211,11 @@ func testFilterBlockNotifications(node *rpctest.Harness,
|
||||
// Now that the block has been mined, we'll fetch the two transactions
|
||||
// so we can add them to the filter, and also craft transaction
|
||||
// spending the outputs we created.
|
||||
tx1, err := node.Node.GetRawTransaction(txid1)
|
||||
tx1, err := node.Client.GetRawTransaction(txid1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch transaction: %v", err)
|
||||
}
|
||||
tx2, err := node.Node.GetRawTransaction(txid2)
|
||||
tx2, err := node.Client.GetRawTransaction(txid2)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch transaction: %v", err)
|
||||
}
|
||||
@ -236,7 +236,7 @@ func testFilterBlockNotifications(node *rpctest.Harness,
|
||||
t.Fatalf("unable to find output: %v", err)
|
||||
}
|
||||
|
||||
_, currentHeight, err = node.Node.GetBestBlock()
|
||||
_, currentHeight, err = node.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -264,7 +264,7 @@ func testFilterBlockNotifications(node *rpctest.Harness,
|
||||
|
||||
// Now we'll broadcast the first spending transaction and also mine a
|
||||
// block which should include it.
|
||||
spendTxid1, err := node.Node.SendRawTransaction(spendingTx1, true)
|
||||
spendTxid1, err := node.Client.SendRawTransaction(spendingTx1, true)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to broadcast transaction: %v", err)
|
||||
}
|
||||
@ -272,7 +272,7 @@ func testFilterBlockNotifications(node *rpctest.Harness,
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get spending txid in mempool: %v", err)
|
||||
}
|
||||
newBlockHashes, err = node.Node.Generate(1)
|
||||
newBlockHashes, err = node.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
@ -290,7 +290,7 @@ func testFilterBlockNotifications(node *rpctest.Harness,
|
||||
|
||||
// Next, mine the second transaction which spends the second output.
|
||||
// This should also generate a notification.
|
||||
spendTxid2, err := node.Node.SendRawTransaction(spendingTx2, true)
|
||||
spendTxid2, err := node.Client.SendRawTransaction(spendingTx2, true)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to broadcast transaction: %v", err)
|
||||
}
|
||||
@ -298,7 +298,7 @@ func testFilterBlockNotifications(node *rpctest.Harness,
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get spending txid in mempool: %v", err)
|
||||
}
|
||||
newBlockHashes, err = node.Node.Generate(1)
|
||||
newBlockHashes, err = node.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
@ -328,14 +328,14 @@ func testUpdateFilterBackTrack(node *rpctest.Harness,
|
||||
}
|
||||
|
||||
// Next we'll mine a block confirming the output generated above.
|
||||
initBlockHashes, err := node.Node.Generate(1)
|
||||
initBlockHashes, err := node.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
blockChan := chainView.FilteredBlocks()
|
||||
|
||||
_, currentHeight, err := node.Node.GetBestBlock()
|
||||
_, currentHeight, err := node.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -352,7 +352,7 @@ func testUpdateFilterBackTrack(node *rpctest.Harness,
|
||||
|
||||
// Next, create a transaction which spends the output created above,
|
||||
// mining the spend into a block.
|
||||
tx, err := node.Node.GetRawTransaction(txid)
|
||||
tx, err := node.Client.GetRawTransaction(txid)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch transaction: %v", err)
|
||||
}
|
||||
@ -364,7 +364,7 @@ func testUpdateFilterBackTrack(node *rpctest.Harness,
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create spending tx: %v", err)
|
||||
}
|
||||
spendTxid, err := node.Node.SendRawTransaction(spendingTx, true)
|
||||
spendTxid, err := node.Client.SendRawTransaction(spendingTx, true)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to broadcast transaction: %v", err)
|
||||
}
|
||||
@ -372,7 +372,7 @@ func testUpdateFilterBackTrack(node *rpctest.Harness,
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get spending txid in mempool: %v", err)
|
||||
}
|
||||
newBlockHashes, err := node.Node.Generate(1)
|
||||
newBlockHashes, err := node.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
@ -435,12 +435,12 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView,
|
||||
blockChan := chainView.FilteredBlocks()
|
||||
|
||||
// Next we'll mine a block confirming the output generated above.
|
||||
newBlockHashes, err := node.Node.Generate(1)
|
||||
newBlockHashes, err := node.Client.Generate(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to generate block: %v", err)
|
||||
}
|
||||
|
||||
_, currentHeight, err := node.Node.GetBestBlock()
|
||||
_, currentHeight, err := node.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -455,11 +455,11 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView,
|
||||
t.Fatalf("filtered block notification didn't arrive")
|
||||
}
|
||||
|
||||
tx1, err := node.Node.GetRawTransaction(txid1)
|
||||
tx1, err := node.Client.GetRawTransaction(txid1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch transaction: %v", err)
|
||||
}
|
||||
tx2, err := node.Node.GetRawTransaction(txid2)
|
||||
tx2, err := node.Client.GetRawTransaction(txid2)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch transaction: %v", err)
|
||||
}
|
||||
@ -496,7 +496,7 @@ func testFilterSingleBlock(node *rpctest.Harness, chainView FilteredChainView,
|
||||
t.Fatalf("filtered block notification didn't arrive")
|
||||
}
|
||||
|
||||
_, currentHeight, err = node.Node.GetBestBlock()
|
||||
_, currentHeight, err = node.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -579,7 +579,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
|
||||
_, oldHeight, err := reorgNode.Node.GetBestBlock()
|
||||
_, oldHeight, err := reorgNode.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -595,7 +595,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
|
||||
t.Fatalf("unable to join node on blocks: %v", err)
|
||||
}
|
||||
|
||||
_, newHeight, err := reorgNode.Node.GetBestBlock()
|
||||
_, newHeight, err := reorgNode.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -635,21 +635,21 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
|
||||
|
||||
// Now we trigger a small reorg, by disconnecting the nodes, mining
|
||||
// a few blocks on each, then connecting them again.
|
||||
peers, err := reorgNode.Node.GetPeerInfo()
|
||||
peers, err := reorgNode.Client.GetPeerInfo()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get peer info: %v", err)
|
||||
}
|
||||
numPeers := len(peers)
|
||||
|
||||
// Disconnect the nodes.
|
||||
err = reorgNode.Node.AddNode(node.P2PAddress(), rpcclient.ANRemove)
|
||||
err = reorgNode.Client.AddNode(node.P2PAddress(), rpcclient.ANRemove)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to disconnect mining nodes: %v", err)
|
||||
}
|
||||
|
||||
// Wait for disconnection
|
||||
for {
|
||||
peers, err = reorgNode.Node.GetPeerInfo()
|
||||
peers, err = reorgNode.Client.GetPeerInfo()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get peer info: %v", err)
|
||||
}
|
||||
@ -661,8 +661,12 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
|
||||
|
||||
// Mine 10 blocks on the main chain, 5 on the chain that will be
|
||||
// reorged out,
|
||||
node.Node.Generate(10)
|
||||
reorgNode.Node.Generate(5)
|
||||
if _, err := node.Client.Generate(10); err != nil {
|
||||
t.Fatalf("unable to generate blocks on main chain: %v", err)
|
||||
}
|
||||
if _, err := reorgNode.Client.Generate(5); err != nil {
|
||||
t.Fatalf("unable to generate blocks on reorged chain: %v", err)
|
||||
}
|
||||
|
||||
// 5 new blocks should get notified.
|
||||
for i := uint32(0); i < 5; i++ {
|
||||
@ -682,7 +686,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
|
||||
}
|
||||
}
|
||||
|
||||
_, oldHeight, err = reorgNode.Node.GetBestBlock()
|
||||
_, oldHeight, err = reorgNode.Client.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
@ -695,8 +699,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
|
||||
t.Fatalf("unable to join node on blocks: %v", err)
|
||||
}
|
||||
|
||||
_, newHeight, err = reorgNode.Node.GetBestBlock()
|
||||
if err != nil {
|
||||
if _, _, err := reorgNode.Client.GetBestBlock(); err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user