2018-09-20 13:21:32 +03:00
|
|
|
// +build dev
|
2018-08-17 05:29:37 +03:00
|
|
|
|
2020-12-11 07:56:10 +03:00
|
|
|
package chainntnfstest
|
2016-02-27 03:34:40 +03:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2017-07-05 01:54:35 +03:00
|
|
|
"fmt"
|
2017-05-24 04:13:45 +03:00
|
|
|
"io/ioutil"
|
2016-09-08 21:27:07 +03:00
|
|
|
"log"
|
|
|
|
"sync"
|
2016-02-27 03:34:40 +03:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2018-07-17 02:50:47 +03:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/integration/rpctest"
|
|
|
|
"github.com/btcsuite/btcd/rpcclient"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
2018-12-07 08:14:37 +03:00
|
|
|
"github.com/btcsuite/btcwallet/chain"
|
|
|
|
_ "github.com/btcsuite/btcwallet/walletdb/bdb" // Required to auto-register the boltdb walletdb implementation.
|
2018-07-17 02:50:47 +03:00
|
|
|
"github.com/lightninglabs/neutrino"
|
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2018-08-09 10:05:30 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs/bitcoindnotify"
|
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs/btcdnotify"
|
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs/neutrinonotify"
|
2018-12-07 08:14:37 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2016-02-27 03:34:40 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
func testSingleConfirmationNotification(miner *rpctest.Harness,
|
2018-12-07 08:14:37 +03:00
|
|
|
notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
|
2016-02-27 03:34:40 +03:00
|
|
|
|
|
|
|
// We'd like to test the case of being notified once a txid reaches
|
|
|
|
// a *single* confirmation.
|
|
|
|
//
|
2018-02-07 06:11:11 +03:00
|
|
|
// So first, let's send some coins to "ourself", obtaining a txid.
|
2016-02-27 03:34:40 +03:00
|
|
|
// We're spending from a coinbase output here, so we use the dedicated
|
|
|
|
// function.
|
2018-08-17 05:29:37 +03:00
|
|
|
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
|
2016-02-27 03:34:40 +03:00
|
|
|
if err != nil {
|
2016-09-09 21:25:12 +03:00
|
|
|
t.Fatalf("unable to create test tx: %v", err)
|
2016-02-27 03:34:40 +03:00
|
|
|
}
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-11 03:00:18 +03:00
|
|
|
_, currentHeight, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-11-10 08:29:23 +03:00
|
|
|
// Now that we have a txid, register a confirmation notification with
|
2016-02-27 03:34:40 +03:00
|
|
|
// the chainntfn source.
|
|
|
|
numConfs := uint32(1)
|
2018-12-07 08:14:37 +03:00
|
|
|
var confIntent *chainntnfs.ConfirmationEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
confIntent, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
nil, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
confIntent, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
txid, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
}
|
2016-02-27 03:34:40 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register ntfn: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now generate a single block, the transaction should be included which
|
|
|
|
// should trigger a notification event.
|
2017-05-16 03:48:31 +03:00
|
|
|
blockHash, err := miner.Node.Generate(1)
|
|
|
|
if err != nil {
|
2016-02-27 03:34:40 +03:00
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2017-09-28 05:38:30 +03:00
|
|
|
case confInfo := <-confIntent.Confirmed:
|
2017-11-14 02:49:58 +03:00
|
|
|
if !confInfo.BlockHash.IsEqual(blockHash[0]) {
|
|
|
|
t.Fatalf("mismatched block hashes: expected %v, got %v",
|
|
|
|
blockHash[0], confInfo.BlockHash)
|
|
|
|
}
|
|
|
|
|
2017-05-16 03:48:31 +03:00
|
|
|
// Finally, we'll verify that the tx index returned is the exact same
|
|
|
|
// as the tx index of the transaction within the block itself.
|
|
|
|
msgBlock, err := miner.Node.GetBlock(blockHash[0])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
block := btcutil.NewBlock(msgBlock)
|
|
|
|
specifiedTxHash, err := block.TxHash(int(confInfo.TxIndex))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to index into block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !specifiedTxHash.IsEqual(txid) {
|
|
|
|
t.Fatalf("mismatched tx indexes: expected %v, got %v",
|
|
|
|
txid, specifiedTxHash)
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
case <-time.After(20 * time.Second):
|
2016-02-27 03:34:40 +03:00
|
|
|
t.Fatalf("confirmation notification never received")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func testMultiConfirmationNotification(miner *rpctest.Harness,
|
2018-12-07 08:14:37 +03:00
|
|
|
notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
|
2016-02-27 03:34:40 +03:00
|
|
|
|
|
|
|
// We'd like to test the case of being notified once a txid reaches
|
|
|
|
// N confirmations, where N > 1.
|
|
|
|
//
|
2017-05-24 04:13:45 +03:00
|
|
|
// Again, we'll begin by creating a fresh transaction, so we can obtain
|
|
|
|
// a fresh txid.
|
2018-08-17 05:29:37 +03:00
|
|
|
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
|
2016-02-27 03:34:40 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test addr: %v", err)
|
|
|
|
}
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-11 03:00:18 +03:00
|
|
|
_, currentHeight, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-02-27 03:34:40 +03:00
|
|
|
numConfs := uint32(6)
|
2018-12-07 08:14:37 +03:00
|
|
|
var confIntent *chainntnfs.ConfirmationEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
confIntent, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
nil, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
confIntent, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
txid, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
}
|
2016-02-27 03:34:40 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register ntfn: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now generate a six blocks. The transaction should be included in the
|
|
|
|
// first block, which will be built upon by the other 5 blocks.
|
|
|
|
if _, err := miner.Node.Generate(6); err != nil {
|
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// TODO(roasbeef): reduce all timeouts after neutrino sync tightended
|
|
|
|
// up
|
|
|
|
|
2016-02-27 03:34:40 +03:00
|
|
|
select {
|
2017-09-28 05:38:30 +03:00
|
|
|
case <-confIntent.Confirmed:
|
2016-02-27 03:34:40 +03:00
|
|
|
break
|
2017-05-24 04:13:45 +03:00
|
|
|
case <-time.After(20 * time.Second):
|
2016-02-27 03:34:40 +03:00
|
|
|
t.Fatalf("confirmation notification never received")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func testBatchConfirmationNotification(miner *rpctest.Harness,
|
2018-12-07 08:14:37 +03:00
|
|
|
notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
|
2016-02-27 03:34:40 +03:00
|
|
|
|
2018-02-07 06:11:11 +03:00
|
|
|
// We'd like to test a case of serving notifications to multiple
|
2016-02-27 03:34:40 +03:00
|
|
|
// clients, each requesting to be notified once a txid receives
|
|
|
|
// various numbers of confirmations.
|
|
|
|
confSpread := [6]uint32{1, 2, 3, 6, 20, 22}
|
|
|
|
confIntents := make([]*chainntnfs.ConfirmationEvent, len(confSpread))
|
|
|
|
|
2017-05-11 03:00:18 +03:00
|
|
|
_, currentHeight, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-02-27 03:34:40 +03:00
|
|
|
// Create a new txid spending miner coins for each confirmation entry
|
|
|
|
// in confSpread, we collect each conf intent into a slice so we can
|
|
|
|
// verify they're each notified at the proper number of confirmations
|
|
|
|
// below.
|
|
|
|
for i, numConfs := range confSpread {
|
2018-08-17 05:29:37 +03:00
|
|
|
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
|
2016-02-27 03:34:40 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test addr: %v", err)
|
|
|
|
}
|
2018-12-07 08:14:37 +03:00
|
|
|
var confIntent *chainntnfs.ConfirmationEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
confIntent, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
nil, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
confIntent, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
txid, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
}
|
2016-02-27 03:34:40 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register ntfn: %v", err)
|
|
|
|
}
|
|
|
|
confIntents[i] = confIntent
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-02-27 03:34:40 +03:00
|
|
|
}
|
|
|
|
|
2017-07-12 03:37:01 +03:00
|
|
|
initialConfHeight := uint32(currentHeight + 1)
|
|
|
|
|
2016-02-27 03:34:40 +03:00
|
|
|
// Now, for each confirmation intent, generate the delta number of blocks
|
|
|
|
// needed to trigger the confirmation notification. A goroutine is
|
|
|
|
// spawned in order to verify the proper notification is triggered.
|
|
|
|
for i, numConfs := range confSpread {
|
|
|
|
var blocksToGen uint32
|
|
|
|
|
|
|
|
// If this is the last instance, manually index to generate the
|
|
|
|
// proper block delta in order to avoid a panic.
|
|
|
|
if i == len(confSpread)-1 {
|
|
|
|
blocksToGen = confSpread[len(confSpread)-1] - confSpread[len(confSpread)-2]
|
|
|
|
} else {
|
|
|
|
blocksToGen = confSpread[i+1] - confSpread[i]
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate the number of blocks necessary to trigger this
|
|
|
|
// current confirmation notification.
|
|
|
|
if _, err := miner.Node.Generate(blocksToGen); err != nil {
|
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2017-09-28 05:38:30 +03:00
|
|
|
case conf := <-confIntents[i].Confirmed:
|
2017-07-12 03:37:01 +03:00
|
|
|
// All of the notifications above were originally
|
|
|
|
// confirmed in the same block. The returned
|
|
|
|
// notification should list the initial confirmation
|
|
|
|
// height rather than the height they were _fully_
|
|
|
|
// confirmed.
|
|
|
|
if conf.BlockHeight != initialConfHeight {
|
|
|
|
t.Fatalf("notification has incorrect initial "+
|
|
|
|
"conf height: expected %v, got %v",
|
|
|
|
initialConfHeight, conf.BlockHeight)
|
|
|
|
}
|
2016-02-27 03:34:40 +03:00
|
|
|
continue
|
2017-05-24 04:13:45 +03:00
|
|
|
case <-time.After(20 * time.Second):
|
2016-02-27 03:34:40 +03:00
|
|
|
t.Fatalf("confirmation notification never received: %v", numConfs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-02 05:09:56 +03:00
|
|
|
func checkNotificationFields(ntfn *chainntnfs.SpendDetail,
|
|
|
|
outpoint *wire.OutPoint, spenderSha *chainhash.Hash,
|
|
|
|
height int32, t *testing.T) {
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
t.Helper()
|
|
|
|
|
2018-05-02 05:09:56 +03:00
|
|
|
if *ntfn.SpentOutPoint != *outpoint {
|
|
|
|
t.Fatalf("ntfn includes wrong output, reports "+
|
|
|
|
"%v instead of %v",
|
|
|
|
ntfn.SpentOutPoint, outpoint)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) {
|
|
|
|
t.Fatalf("ntfn includes wrong spender tx sha, "+
|
|
|
|
"reports %v instead of %v",
|
|
|
|
ntfn.SpenderTxHash[:], spenderSha[:])
|
|
|
|
}
|
|
|
|
if ntfn.SpenderInputIndex != 0 {
|
|
|
|
t.Fatalf("ntfn includes wrong spending input "+
|
|
|
|
"index, reports %v, should be %v",
|
|
|
|
ntfn.SpenderInputIndex, 0)
|
|
|
|
}
|
|
|
|
if ntfn.SpendingHeight != height {
|
|
|
|
t.Fatalf("ntfn has wrong spending height: "+
|
|
|
|
"expected %v, got %v", height,
|
|
|
|
ntfn.SpendingHeight)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-21 03:29:05 +03:00
|
|
|
func testSpendNotification(miner *rpctest.Harness,
|
2018-12-07 08:14:37 +03:00
|
|
|
notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
|
2017-02-21 03:29:05 +03:00
|
|
|
|
|
|
|
// We'd like to test the spend notifications for all ChainNotifier
|
|
|
|
// concrete implementations.
|
|
|
|
//
|
|
|
|
// To do so, we first create a new output to our test target address.
|
2018-12-07 08:14:37 +03:00
|
|
|
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
|
2017-02-21 03:29:05 +03:00
|
|
|
|
2017-05-11 03:00:18 +03:00
|
|
|
_, currentHeight, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-04-18 05:02:04 +03:00
|
|
|
// Now that we have an output index and the pkScript, register for a
|
2017-02-21 03:29:05 +03:00
|
|
|
// spentness notification for the newly created output with multiple
|
|
|
|
// clients in order to ensure the implementation can support
|
|
|
|
// multi-client spend notifications.
|
|
|
|
const numClients = 5
|
|
|
|
spendClients := make([]*chainntnfs.SpendEvent, numClients)
|
|
|
|
for i := 0; i < numClients; i++ {
|
2018-12-07 08:14:37 +03:00
|
|
|
var spentIntent *chainntnfs.SpendEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
spentIntent, err = notifier.RegisterSpendNtfn(
|
|
|
|
nil, output.PkScript, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
spentIntent, err = notifier.RegisterSpendNtfn(
|
|
|
|
outpoint, output.PkScript, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
}
|
2017-02-21 03:29:05 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register for spend ntfn: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
spendClients[i] = spentIntent
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, create a new transaction spending that output.
|
2018-12-07 08:14:37 +03:00
|
|
|
spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
|
2017-02-21 03:29:05 +03:00
|
|
|
|
2016-02-27 03:34:40 +03:00
|
|
|
// Broadcast our spending transaction.
|
|
|
|
spenderSha, err := miner.Node.SendRawTransaction(spendingTx, true)
|
|
|
|
if err != nil {
|
2017-08-11 07:15:36 +03:00
|
|
|
t.Fatalf("unable to broadcast tx: %v", err)
|
2016-02-27 03:34:40 +03:00
|
|
|
}
|
|
|
|
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, spenderSha); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-07-13 13:35:29 +03:00
|
|
|
// Make sure notifications are not yet sent. We launch a go routine for
|
|
|
|
// all the spend clients, such that we can wait for them all in
|
|
|
|
// parallel.
|
2018-10-21 02:27:46 +03:00
|
|
|
mempoolSpendTimeout := 2 * chainntnfs.TrickleInterval
|
2018-07-13 13:35:29 +03:00
|
|
|
mempoolSpends := make(chan *chainntnfs.SpendDetail, numClients)
|
2018-03-29 15:28:26 +03:00
|
|
|
for _, c := range spendClients {
|
2018-07-13 13:35:29 +03:00
|
|
|
go func(client *chainntnfs.SpendEvent) {
|
|
|
|
select {
|
|
|
|
case s := <-client.Spend:
|
|
|
|
mempoolSpends <- s
|
|
|
|
case <-time.After(mempoolSpendTimeout):
|
|
|
|
}
|
|
|
|
}(c)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-mempoolSpends:
|
|
|
|
t.Fatalf("did not expect to get notification before " +
|
|
|
|
"block was mined")
|
|
|
|
case <-time.After(mempoolSpendTimeout):
|
2018-03-29 15:28:26 +03:00
|
|
|
}
|
|
|
|
|
2018-07-17 10:13:06 +03:00
|
|
|
// Make sure registering a client after the tx is in the mempool still
|
|
|
|
// doesn't trigger a notification.
|
2018-12-07 08:14:37 +03:00
|
|
|
var spentIntent *chainntnfs.SpendEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
spentIntent, err = notifier.RegisterSpendNtfn(
|
|
|
|
nil, output.PkScript, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
spentIntent, err = notifier.RegisterSpendNtfn(
|
|
|
|
outpoint, output.PkScript, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
}
|
2018-07-17 10:13:06 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register for spend ntfn: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-spentIntent.Spend:
|
|
|
|
t.Fatalf("did not expect to get notification before " +
|
|
|
|
"block was mined")
|
|
|
|
case <-time.After(mempoolSpendTimeout):
|
|
|
|
}
|
|
|
|
spendClients = append(spendClients, spentIntent)
|
|
|
|
|
2016-02-27 03:34:40 +03:00
|
|
|
// Now we mine a single block, which should include our spend. The
|
|
|
|
// notification should also be sent off.
|
|
|
|
if _, err := miner.Node.Generate(1); err != nil {
|
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
_, currentHeight, err = miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-03-29 15:28:26 +03:00
|
|
|
for _, c := range spendClients {
|
2016-11-28 06:18:18 +03:00
|
|
|
select {
|
2018-03-29 15:28:26 +03:00
|
|
|
case ntfn := <-c.Spend:
|
2016-11-28 06:18:18 +03:00
|
|
|
// We've received the spend nftn. So now verify all the
|
|
|
|
// fields have been set properly.
|
2018-05-02 05:09:56 +03:00
|
|
|
checkNotificationFields(ntfn, outpoint, spenderSha,
|
|
|
|
currentHeight, t)
|
2017-05-24 04:13:45 +03:00
|
|
|
case <-time.After(30 * time.Second):
|
2016-11-28 06:18:18 +03:00
|
|
|
t.Fatalf("spend ntfn never received")
|
2016-02-27 03:34:40 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-08 21:27:07 +03:00
|
|
|
func testBlockEpochNotification(miner *rpctest.Harness,
|
2018-08-09 10:05:30 +03:00
|
|
|
notifier chainntnfs.TestChainNotifier, t *testing.T) {
|
2016-09-08 21:27:07 +03:00
|
|
|
|
|
|
|
// We'd like to test the case of multiple registered clients receiving
|
|
|
|
// block epoch notifications.
|
|
|
|
|
|
|
|
const numBlocks = 10
|
2018-12-11 05:29:28 +03:00
|
|
|
const numNtfns = numBlocks + 1
|
2016-09-08 21:27:07 +03:00
|
|
|
const numClients = 5
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
// Create numClients clients which will listen for block notifications. We
|
2018-12-11 05:29:28 +03:00
|
|
|
// expect each client to receive 11 notifications, one for the current
|
|
|
|
// tip of the chain, and one for each of the ten blocks we generate
|
|
|
|
// below. So we'll use a WaitGroup to synchronize the test.
|
2016-09-08 21:27:07 +03:00
|
|
|
for i := 0; i < numClients; i++ {
|
2018-08-09 10:05:27 +03:00
|
|
|
epochClient, err := notifier.RegisterBlockEpochNtfn(nil)
|
2016-09-08 21:27:07 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register for epoch notification")
|
|
|
|
}
|
|
|
|
|
2018-12-11 05:29:28 +03:00
|
|
|
wg.Add(numNtfns)
|
2016-09-08 21:27:07 +03:00
|
|
|
go func() {
|
2018-12-11 05:29:28 +03:00
|
|
|
for i := 0; i < numNtfns; i++ {
|
2016-09-08 21:27:07 +03:00
|
|
|
<-epochClient.Epochs
|
|
|
|
wg.Done()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
epochsSent := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
wg.Wait()
|
|
|
|
close(epochsSent)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Now generate 10 blocks, the clients above should each receive 10
|
|
|
|
// notifications, thereby unblocking the goroutine above.
|
|
|
|
if _, err := miner.Node.Generate(numBlocks); err != nil {
|
|
|
|
t.Fatalf("unable to generate blocks: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-epochsSent:
|
2017-05-24 04:13:45 +03:00
|
|
|
case <-time.After(30 * time.Second):
|
2016-09-08 21:27:07 +03:00
|
|
|
t.Fatalf("all notifications not sent")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-09 21:25:12 +03:00
|
|
|
func testMultiClientConfirmationNotification(miner *rpctest.Harness,
|
2018-12-07 08:14:37 +03:00
|
|
|
notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
|
2016-11-28 06:18:18 +03:00
|
|
|
|
2016-09-09 21:25:12 +03:00
|
|
|
// We'd like to test the case of a multiple clients registered to
|
|
|
|
// receive a confirmation notification for the same transaction.
|
2018-08-17 05:29:37 +03:00
|
|
|
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
|
2016-09-09 21:25:12 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test tx: %v", err)
|
|
|
|
}
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-09-09 21:25:12 +03:00
|
|
|
var wg sync.WaitGroup
|
2016-11-28 06:18:18 +03:00
|
|
|
const (
|
|
|
|
numConfsClients = 5
|
|
|
|
numConfs = 1
|
|
|
|
)
|
2016-09-09 21:25:12 +03:00
|
|
|
|
2017-05-11 03:00:18 +03:00
|
|
|
_, currentHeight, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-09-09 21:25:12 +03:00
|
|
|
// Register for a conf notification for the above generated txid with
|
|
|
|
// numConfsClients distinct clients.
|
|
|
|
for i := 0; i < numConfsClients; i++ {
|
2018-12-07 08:14:37 +03:00
|
|
|
var confClient *chainntnfs.ConfirmationEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
confClient, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
nil, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
confClient, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
txid, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
}
|
2016-09-09 21:25:12 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register for confirmation: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
<-confClient.Confirmed
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
confsSent := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
wg.Wait()
|
|
|
|
close(confsSent)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Finally, generate a single block which should trigger the unblocking
|
|
|
|
// of all numConfsClients blocked on the channel read above.
|
|
|
|
if _, err := miner.Node.Generate(1); err != nil {
|
|
|
|
t.Fatalf("unable to generate block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-confsSent:
|
2017-05-24 04:13:45 +03:00
|
|
|
case <-time.After(30 * time.Second):
|
2016-09-09 21:25:12 +03:00
|
|
|
t.Fatalf("all confirmation notifications not sent")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-30 11:00:20 +03:00
|
|
|
// Tests the case in which a confirmation notification is requested for a
|
2016-12-09 03:15:58 +03:00
|
|
|
// transaction that has already been included in a block. In this case, the
|
|
|
|
// confirmation notification should be dispatched immediately.
|
2016-11-30 11:00:20 +03:00
|
|
|
func testTxConfirmedBeforeNtfnRegistration(miner *rpctest.Harness,
|
2018-12-07 08:14:37 +03:00
|
|
|
notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
|
2016-11-30 11:00:20 +03:00
|
|
|
|
2016-12-09 03:15:58 +03:00
|
|
|
// First, let's send some coins to "ourself", obtaining a txid. We're
|
|
|
|
// spending from a coinbase output here, so we use the dedicated
|
2016-11-30 11:00:20 +03:00
|
|
|
// function.
|
2018-08-17 05:29:37 +03:00
|
|
|
txid3, pkScript3, err := chainntnfs.GetTestTxidAndScript(miner)
|
2016-11-30 11:00:20 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test tx: %v", err)
|
|
|
|
}
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, txid3); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-11-14 04:19:43 +03:00
|
|
|
// Generate another block containing tx 3, but we won't register conf
|
|
|
|
// notifications for this tx until much later. The notifier must check
|
|
|
|
// older blocks when the confirmation event is registered below to ensure
|
|
|
|
// that the TXID hasn't already been included in the chain, otherwise the
|
2016-12-09 03:15:58 +03:00
|
|
|
// notification will never be sent.
|
2017-11-14 04:19:43 +03:00
|
|
|
_, err = miner.Node.Generate(1)
|
2017-05-16 03:48:31 +03:00
|
|
|
if err != nil {
|
2017-09-28 05:38:30 +03:00
|
|
|
t.Fatalf("unable to generate block: %v", err)
|
2016-11-30 11:00:20 +03:00
|
|
|
}
|
|
|
|
|
2018-08-17 05:29:37 +03:00
|
|
|
txid1, pkScript1, err := chainntnfs.GetTestTxidAndScript(miner)
|
2017-11-14 04:19:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test tx: %v", err)
|
|
|
|
}
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, txid1); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-08-17 05:29:37 +03:00
|
|
|
txid2, pkScript2, err := chainntnfs.GetTestTxidAndScript(miner)
|
2017-11-14 04:19:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test tx: %v", err)
|
|
|
|
}
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, txid2); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-05-11 03:00:18 +03:00
|
|
|
_, currentHeight, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-11-14 04:19:43 +03:00
|
|
|
// Now generate another block containing txs 1 & 2.
|
|
|
|
blockHash, err := miner.Node.Generate(1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to generate block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Register a confirmation notification with the chainntfn source for tx2,
|
|
|
|
// which is included in the last block. The height hint is the height before
|
|
|
|
// the block is included. This notification should fire immediately since
|
|
|
|
// only 1 confirmation is required.
|
2018-12-07 08:14:37 +03:00
|
|
|
var ntfn1 *chainntnfs.ConfirmationEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
ntfn1, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
nil, pkScript1, 1, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
ntfn1, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
txid1, pkScript1, 1, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
}
|
2016-11-30 11:00:20 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register ntfn: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2017-11-14 04:19:43 +03:00
|
|
|
case confInfo := <-ntfn1.Confirmed:
|
2017-05-16 03:48:31 +03:00
|
|
|
// Finally, we'll verify that the tx index returned is the exact same
|
|
|
|
// as the tx index of the transaction within the block itself.
|
|
|
|
msgBlock, err := miner.Node.GetBlock(blockHash[0])
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch block: %v", err)
|
|
|
|
}
|
|
|
|
block := btcutil.NewBlock(msgBlock)
|
|
|
|
specifiedTxHash, err := block.TxHash(int(confInfo.TxIndex))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to index into block: %v", err)
|
|
|
|
}
|
2017-11-14 04:19:43 +03:00
|
|
|
if !specifiedTxHash.IsEqual(txid1) {
|
2017-05-16 03:48:31 +03:00
|
|
|
t.Fatalf("mismatched tx indexes: expected %v, got %v",
|
2017-11-14 04:19:43 +03:00
|
|
|
txid1, specifiedTxHash)
|
2017-05-16 03:48:31 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll also ensure that the block height has been set
|
|
|
|
// properly.
|
2017-11-14 04:19:43 +03:00
|
|
|
if confInfo.BlockHeight != uint32(currentHeight+1) {
|
2017-05-16 03:48:31 +03:00
|
|
|
t.Fatalf("incorrect block height: expected %v, got %v",
|
|
|
|
confInfo.BlockHeight, currentHeight)
|
|
|
|
}
|
2016-11-30 11:00:20 +03:00
|
|
|
break
|
2017-05-24 04:13:45 +03:00
|
|
|
case <-time.After(20 * time.Second):
|
2016-11-30 11:00:20 +03:00
|
|
|
t.Fatalf("confirmation notification never received")
|
|
|
|
}
|
2016-12-09 03:15:58 +03:00
|
|
|
|
2017-11-14 04:19:43 +03:00
|
|
|
// Register a confirmation notification for tx2, requiring 3 confirmations.
|
|
|
|
// This transaction is only partially confirmed, so the notification should
|
|
|
|
// not fire yet.
|
2018-12-07 08:14:37 +03:00
|
|
|
var ntfn2 *chainntnfs.ConfirmationEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
ntfn2, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
nil, pkScript2, 3, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
ntfn2, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
txid2, pkScript2, 3, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
}
|
2016-12-09 03:15:58 +03:00
|
|
|
if err != nil {
|
2017-11-14 04:19:43 +03:00
|
|
|
t.Fatalf("unable to register ntfn: %v", err)
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
|
|
|
|
2017-11-14 04:19:43 +03:00
|
|
|
// Fully confirm tx3.
|
|
|
|
_, err = miner.Node.Generate(2)
|
2017-05-11 03:00:18 +03:00
|
|
|
if err != nil {
|
2017-11-14 04:19:43 +03:00
|
|
|
t.Fatalf("unable to generate block: %v", err)
|
2017-05-11 03:00:18 +03:00
|
|
|
}
|
|
|
|
|
2017-11-14 04:19:43 +03:00
|
|
|
select {
|
|
|
|
case <-ntfn2.Confirmed:
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
t.Fatalf("confirmation notification never received")
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
|
|
|
|
2017-11-14 04:19:43 +03:00
|
|
|
select {
|
|
|
|
case <-ntfn1.Confirmed:
|
|
|
|
t.Fatalf("received multiple confirmations for tx")
|
|
|
|
case <-time.After(1 * time.Second):
|
|
|
|
}
|
2017-12-08 06:05:35 +03:00
|
|
|
|
2017-11-14 04:19:43 +03:00
|
|
|
// Finally register a confirmation notification for tx3, requiring 1
|
|
|
|
// confirmation. Ensure that conf notifications do not refire on txs
|
|
|
|
// 1 or 2.
|
2018-12-07 08:14:37 +03:00
|
|
|
var ntfn3 *chainntnfs.ConfirmationEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
ntfn3, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
nil, pkScript3, 1, uint32(currentHeight-1),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
ntfn3, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
txid3, pkScript3, 1, uint32(currentHeight-1),
|
|
|
|
)
|
|
|
|
}
|
2016-12-09 03:15:58 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register ntfn: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-06-28 05:11:25 +03:00
|
|
|
// We'll also register for a confirmation notification with the pkscript
|
|
|
|
// of a different transaction. This notification shouldn't fire since we
|
|
|
|
// match on both txid and pkscript.
|
|
|
|
var ntfn4 *chainntnfs.ConfirmationEvent
|
|
|
|
ntfn4, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
txid3, pkScript2, 1, uint32(currentHeight-1),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register ntfn: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-12-08 06:05:35 +03:00
|
|
|
select {
|
2017-11-14 04:19:43 +03:00
|
|
|
case <-ntfn3.Confirmed:
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
t.Fatalf("confirmation notification never received")
|
2017-12-08 06:05:35 +03:00
|
|
|
}
|
|
|
|
|
2019-06-28 05:11:25 +03:00
|
|
|
select {
|
|
|
|
case <-ntfn4.Confirmed:
|
|
|
|
t.Fatalf("confirmation notification received")
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
}
|
|
|
|
|
2017-11-14 04:19:43 +03:00
|
|
|
time.Sleep(1 * time.Second)
|
2017-12-08 06:05:35 +03:00
|
|
|
|
|
|
|
select {
|
2017-11-14 04:19:43 +03:00
|
|
|
case <-ntfn1.Confirmed:
|
|
|
|
t.Fatalf("received multiple confirmations for tx")
|
2017-12-08 06:05:35 +03:00
|
|
|
default:
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2017-11-14 04:19:43 +03:00
|
|
|
case <-ntfn2.Confirmed:
|
|
|
|
t.Fatalf("received multiple confirmations for tx")
|
|
|
|
default:
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
2016-11-30 11:00:20 +03:00
|
|
|
}
|
|
|
|
|
2017-11-09 11:10:15 +03:00
|
|
|
// Test the case of a notification consumer having forget or being delayed in
|
|
|
|
// checking for a confirmation. This should not cause the notifier to stop
|
|
|
|
// working
|
|
|
|
func testLazyNtfnConsumer(miner *rpctest.Harness,
|
2018-12-07 08:14:37 +03:00
|
|
|
notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
|
2017-11-09 11:10:15 +03:00
|
|
|
|
|
|
|
// Create a transaction to be notified about. We'll register for
|
|
|
|
// notifications on this transaction but won't be prompt in checking them
|
2018-08-17 05:29:37 +03:00
|
|
|
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
|
2017-11-09 11:10:15 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test tx: %v", err)
|
|
|
|
}
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-11-09 11:10:15 +03:00
|
|
|
_, currentHeight, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
numConfs := uint32(3)
|
|
|
|
|
|
|
|
// Add a block right before registering, this makes race conditions
|
|
|
|
// between the historical dispatcher and the normal dispatcher more obvious
|
|
|
|
if _, err := miner.Node.Generate(1); err != nil {
|
|
|
|
t.Fatalf("unable to generate blocks: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:37 +03:00
|
|
|
var firstConfIntent *chainntnfs.ConfirmationEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
firstConfIntent, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
nil, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
firstConfIntent, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
txid, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
}
|
2017-11-09 11:10:15 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register ntfn: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate another 2 blocks, this should dispatch the confirm notification
|
|
|
|
if _, err := miner.Node.Generate(2); err != nil {
|
|
|
|
t.Fatalf("unable to generate blocks: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now make another transaction, just because we haven't checked to see
|
|
|
|
// if the first transaction has confirmed doesn't mean that we shouldn't
|
|
|
|
// be able to see if this transaction confirms first
|
2018-08-17 05:29:37 +03:00
|
|
|
txid, pkScript, err = chainntnfs.GetTestTxidAndScript(miner)
|
2017-11-09 11:10:15 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test tx: %v", err)
|
|
|
|
}
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-11-09 11:10:15 +03:00
|
|
|
_, currentHeight, err = miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
numConfs = 1
|
2018-12-07 08:14:37 +03:00
|
|
|
var secondConfIntent *chainntnfs.ConfirmationEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
secondConfIntent, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
nil, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
secondConfIntent, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
txid, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
}
|
2017-11-09 11:10:15 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register ntfn: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err := miner.Node.Generate(1); err != nil {
|
|
|
|
t.Fatalf("unable to generate blocks: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-secondConfIntent.Confirmed:
|
|
|
|
// Successfully receive the second notification
|
|
|
|
break
|
|
|
|
case <-time.After(30 * time.Second):
|
|
|
|
t.Fatalf("Second confirmation notification never received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure the first tx confirmed successfully
|
|
|
|
select {
|
|
|
|
case <-firstConfIntent.Confirmed:
|
|
|
|
break
|
|
|
|
case <-time.After(30 * time.Second):
|
|
|
|
t.Fatalf("First confirmation notification never received")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-30 11:00:20 +03:00
|
|
|
// Tests the case in which a spend notification is requested for a spend that
|
|
|
|
// has already been included in a block. In this case, the spend notification
|
|
|
|
// should be dispatched immediately.
|
|
|
|
func testSpendBeforeNtfnRegistration(miner *rpctest.Harness,
|
2018-12-07 08:14:37 +03:00
|
|
|
notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
|
2016-11-30 11:00:20 +03:00
|
|
|
|
2016-12-09 03:15:58 +03:00
|
|
|
// We'd like to test the spend notifications for all ChainNotifier
|
|
|
|
// concrete implementations.
|
2016-11-30 11:00:20 +03:00
|
|
|
//
|
2016-12-09 03:15:58 +03:00
|
|
|
// To do so, we first create a new output to our test target address.
|
2018-12-07 08:14:37 +03:00
|
|
|
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
|
|
|
|
|
|
|
|
_, heightHint, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
2016-11-30 11:00:20 +03:00
|
|
|
|
2018-07-27 07:34:43 +03:00
|
|
|
// We'll then spend this output and broadcast the spend transaction.
|
2018-12-07 08:14:37 +03:00
|
|
|
spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
|
2016-11-30 11:00:20 +03:00
|
|
|
spenderSha, err := miner.Node.SendRawTransaction(spendingTx, true)
|
|
|
|
if err != nil {
|
2018-02-07 06:11:11 +03:00
|
|
|
t.Fatalf("unable to broadcast tx: %v", err)
|
2016-11-30 11:00:20 +03:00
|
|
|
}
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, spenderSha); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-04-16 21:04:49 +03:00
|
|
|
// We create an epoch client we can use to make sure the notifier is
|
|
|
|
// caught up to the mining node's chain.
|
2018-08-09 10:05:27 +03:00
|
|
|
epochClient, err := notifier.RegisterBlockEpochNtfn(nil)
|
2018-04-16 21:04:49 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register for block epoch: %v", err)
|
|
|
|
}
|
|
|
|
|
2016-11-30 11:00:20 +03:00
|
|
|
// Now we mine an additional block, which should include our spend.
|
|
|
|
if _, err := miner.Node.Generate(1); err != nil {
|
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
2018-12-07 08:14:37 +03:00
|
|
|
_, spendHeight, err := miner.Node.GetBestBlock()
|
2017-05-11 03:00:18 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-04-16 21:04:49 +03:00
|
|
|
// checkSpends registers two clients to be notified of a spend that has
|
|
|
|
// already happened. The notifier should dispatch a spend notification
|
2018-07-17 10:13:06 +03:00
|
|
|
// immediately.
|
2018-04-16 21:04:49 +03:00
|
|
|
checkSpends := func() {
|
2018-10-05 12:07:55 +03:00
|
|
|
t.Helper()
|
|
|
|
|
2018-04-16 21:04:49 +03:00
|
|
|
const numClients = 2
|
|
|
|
spendClients := make([]*chainntnfs.SpendEvent, numClients)
|
|
|
|
for i := 0; i < numClients; i++ {
|
2018-12-07 08:14:37 +03:00
|
|
|
var spentIntent *chainntnfs.SpendEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
spentIntent, err = notifier.RegisterSpendNtfn(
|
|
|
|
nil, output.PkScript, uint32(heightHint),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
spentIntent, err = notifier.RegisterSpendNtfn(
|
|
|
|
outpoint, output.PkScript,
|
|
|
|
uint32(heightHint),
|
|
|
|
)
|
|
|
|
}
|
2018-04-16 21:04:49 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register for spend ntfn: %v",
|
|
|
|
err)
|
|
|
|
}
|
2016-11-30 11:00:20 +03:00
|
|
|
|
2018-04-16 21:04:49 +03:00
|
|
|
spendClients[i] = spentIntent
|
|
|
|
}
|
2016-11-30 11:00:20 +03:00
|
|
|
|
2018-04-16 21:04:49 +03:00
|
|
|
for _, client := range spendClients {
|
|
|
|
select {
|
|
|
|
case ntfn := <-client.Spend:
|
|
|
|
// We've received the spend nftn. So now verify
|
|
|
|
// all the fields have been set properly.
|
2018-12-07 08:14:37 +03:00
|
|
|
checkNotificationFields(
|
|
|
|
ntfn, outpoint, spenderSha, spendHeight, t,
|
|
|
|
)
|
2018-04-16 21:04:49 +03:00
|
|
|
case <-time.After(30 * time.Second):
|
|
|
|
t.Fatalf("spend ntfn never received")
|
|
|
|
}
|
2016-11-30 11:00:20 +03:00
|
|
|
}
|
2018-04-16 21:04:49 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the notifier to have caught up to the mined block.
|
|
|
|
select {
|
|
|
|
case _, ok := <-epochClient.Epochs:
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("epoch channel was closed")
|
2016-11-30 11:00:20 +03:00
|
|
|
}
|
2018-04-16 21:04:49 +03:00
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("did not receive block epoch")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that the spend clients gets immediately notified for the spend
|
|
|
|
// in the previous block.
|
|
|
|
checkSpends()
|
|
|
|
|
|
|
|
// Bury the spend even deeper, and do the same check.
|
|
|
|
const numBlocks = 10
|
|
|
|
if _, err := miner.Node.Generate(numBlocks); err != nil {
|
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait for the notifier to have caught up with the new blocks.
|
|
|
|
for i := 0; i < numBlocks; i++ {
|
|
|
|
select {
|
|
|
|
case _, ok := <-epochClient.Epochs:
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("epoch channel was closed")
|
|
|
|
}
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("did not receive block epoch")
|
2016-11-30 11:00:20 +03:00
|
|
|
}
|
|
|
|
}
|
2018-04-16 21:04:49 +03:00
|
|
|
|
|
|
|
// The clients should still be notified immediately.
|
|
|
|
checkSpends()
|
2016-11-30 11:00:20 +03:00
|
|
|
}
|
|
|
|
|
2017-02-21 03:29:05 +03:00
|
|
|
func testCancelSpendNtfn(node *rpctest.Harness,
|
2018-12-07 08:14:37 +03:00
|
|
|
notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
|
2017-02-21 03:29:05 +03:00
|
|
|
|
|
|
|
// We'd like to test that once a spend notification is registered, it
|
2019-10-03 18:22:43 +03:00
|
|
|
// can be canceled before the notification is dispatched.
|
2017-02-21 03:29:05 +03:00
|
|
|
|
|
|
|
// First, we'll start by creating a new output that we can spend
|
|
|
|
// ourselves.
|
2018-12-07 08:14:37 +03:00
|
|
|
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, node)
|
2017-02-21 03:29:05 +03:00
|
|
|
|
2017-05-11 03:00:18 +03:00
|
|
|
_, currentHeight, err := node.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-02-21 03:29:05 +03:00
|
|
|
// Create two clients that each registered to the spend notification.
|
|
|
|
// We'll cancel the notification for the first client and leave the
|
|
|
|
// notification for the second client enabled.
|
|
|
|
const numClients = 2
|
|
|
|
spendClients := make([]*chainntnfs.SpendEvent, numClients)
|
|
|
|
for i := 0; i < numClients; i++ {
|
2018-12-07 08:14:37 +03:00
|
|
|
var spentIntent *chainntnfs.SpendEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
spentIntent, err = notifier.RegisterSpendNtfn(
|
|
|
|
nil, output.PkScript, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
spentIntent, err = notifier.RegisterSpendNtfn(
|
|
|
|
outpoint, output.PkScript, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
}
|
2017-02-21 03:29:05 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register for spend ntfn: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
spendClients[i] = spentIntent
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, create a new transaction spending that output.
|
2018-12-07 08:14:37 +03:00
|
|
|
spendingTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
|
2017-02-21 03:29:05 +03:00
|
|
|
|
|
|
|
// Before we broadcast the spending transaction, we'll cancel the
|
|
|
|
// notification of the first client.
|
|
|
|
spendClients[1].Cancel()
|
|
|
|
|
|
|
|
// Broadcast our spending transaction.
|
|
|
|
spenderSha, err := node.Node.SendRawTransaction(spendingTx, true)
|
|
|
|
if err != nil {
|
2018-02-07 06:11:11 +03:00
|
|
|
t.Fatalf("unable to broadcast tx: %v", err)
|
2017-02-21 03:29:05 +03:00
|
|
|
}
|
|
|
|
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(node, spenderSha); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-02-21 03:29:05 +03:00
|
|
|
// Now we mine a single block, which should include our spend. The
|
|
|
|
// notification should also be sent off.
|
|
|
|
if _, err := node.Node.Generate(1); err != nil {
|
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:37 +03:00
|
|
|
// The spend notification for the first client should have been
|
|
|
|
// dispatched.
|
2017-02-21 03:29:05 +03:00
|
|
|
select {
|
|
|
|
case ntfn := <-spendClients[0].Spend:
|
|
|
|
// We've received the spend nftn. So now verify all the
|
|
|
|
// fields have been set properly.
|
2017-05-11 03:00:18 +03:00
|
|
|
if *ntfn.SpentOutPoint != *outpoint {
|
2017-02-21 03:29:05 +03:00
|
|
|
t.Fatalf("ntfn includes wrong output, reports "+
|
|
|
|
"%v instead of %v",
|
|
|
|
ntfn.SpentOutPoint, outpoint)
|
|
|
|
}
|
|
|
|
if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) {
|
|
|
|
t.Fatalf("ntfn includes wrong spender tx sha, "+
|
2018-02-07 06:11:11 +03:00
|
|
|
"reports %v instead of %v",
|
2017-02-21 03:29:05 +03:00
|
|
|
ntfn.SpenderTxHash[:], spenderSha[:])
|
|
|
|
}
|
|
|
|
if ntfn.SpenderInputIndex != 0 {
|
|
|
|
t.Fatalf("ntfn includes wrong spending input "+
|
|
|
|
"index, reports %v, should be %v",
|
|
|
|
ntfn.SpenderInputIndex, 0)
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
case <-time.After(20 * time.Second):
|
2017-02-21 03:29:05 +03:00
|
|
|
t.Fatalf("spend ntfn never received")
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:37 +03:00
|
|
|
// However, the spend notification of the second client should NOT have
|
2017-02-21 03:29:05 +03:00
|
|
|
// been dispatched.
|
|
|
|
select {
|
|
|
|
case _, ok := <-spendClients[1].Spend:
|
|
|
|
if ok {
|
2019-10-03 18:22:43 +03:00
|
|
|
t.Fatalf("spend ntfn should have been canceled")
|
2017-02-21 03:29:05 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
case <-time.After(20 * time.Second):
|
2019-10-03 18:22:43 +03:00
|
|
|
t.Fatalf("spend ntfn never canceled")
|
2017-02-21 03:29:05 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:37 +03:00
|
|
|
func testCancelEpochNtfn(node *rpctest.Harness,
|
|
|
|
notifier chainntnfs.TestChainNotifier, t *testing.T) {
|
2017-02-21 03:29:05 +03:00
|
|
|
|
|
|
|
// We'd like to ensure that once a client cancels their block epoch
|
|
|
|
// notifications, no further notifications are sent over the channel
|
|
|
|
// if/when new blocks come in.
|
|
|
|
const numClients = 2
|
|
|
|
|
|
|
|
epochClients := make([]*chainntnfs.BlockEpochEvent, numClients)
|
|
|
|
for i := 0; i < numClients; i++ {
|
2018-08-09 10:05:27 +03:00
|
|
|
epochClient, err := notifier.RegisterBlockEpochNtfn(nil)
|
2017-02-21 03:29:05 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register for epoch notification")
|
|
|
|
}
|
|
|
|
epochClients[i] = epochClient
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now before we mine any blocks, cancel the notification for the first
|
|
|
|
// epoch client.
|
|
|
|
epochClients[0].Cancel()
|
|
|
|
|
|
|
|
// Now mine a single block, this should trigger the logic to dispatch
|
|
|
|
// epoch notifications.
|
|
|
|
if _, err := node.Node.Generate(1); err != nil {
|
|
|
|
t.Fatalf("unable to generate blocks: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The epoch notification for the first client shouldn't have been
|
|
|
|
// dispatched.
|
|
|
|
select {
|
|
|
|
case _, ok := <-epochClients[0].Epochs:
|
|
|
|
if ok {
|
2019-10-03 18:22:43 +03:00
|
|
|
t.Fatalf("epoch notification should have been canceled")
|
2017-02-21 03:29:05 +03:00
|
|
|
}
|
|
|
|
case <-time.After(2 * time.Second):
|
|
|
|
t.Fatalf("epoch notification not sent")
|
|
|
|
}
|
|
|
|
|
|
|
|
// However, the epoch notification for the second client should have
|
|
|
|
// been dispatched as normal.
|
|
|
|
select {
|
|
|
|
case _, ok := <-epochClients[1].Epochs:
|
|
|
|
if !ok {
|
2019-10-03 18:22:43 +03:00
|
|
|
t.Fatalf("epoch was canceled")
|
2017-02-21 03:29:05 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
case <-time.After(20 * time.Second):
|
2017-02-21 03:29:05 +03:00
|
|
|
t.Fatalf("epoch notification not sent")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:37 +03:00
|
|
|
func testReorgConf(miner *rpctest.Harness,
|
|
|
|
notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
|
2017-11-10 08:29:23 +03:00
|
|
|
|
|
|
|
// Set up a new miner that we can use to cause a reorg.
|
2020-12-03 13:30:22 +03:00
|
|
|
miner2, err := rpctest.New(
|
|
|
|
chainntnfs.NetParams, nil, []string{"--txindex"}, "",
|
|
|
|
)
|
2017-11-10 08:29:23 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create mining node: %v", err)
|
|
|
|
}
|
|
|
|
if err := miner2.SetUp(false, 0); err != nil {
|
|
|
|
t.Fatalf("unable to set up mining node: %v", err)
|
|
|
|
}
|
|
|
|
defer miner2.TearDown()
|
|
|
|
|
|
|
|
// We start by connecting the new miner to our original miner,
|
|
|
|
// such that it will sync to our original chain.
|
|
|
|
if err := rpctest.ConnectNode(miner, miner2); err != nil {
|
|
|
|
t.Fatalf("unable to connect harnesses: %v", err)
|
|
|
|
}
|
|
|
|
nodeSlice := []*rpctest.Harness{miner, miner2}
|
|
|
|
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
|
|
|
|
t.Fatalf("unable to join node on blocks: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The two should be on the same blockheight.
|
|
|
|
_, nodeHeight1, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current blockheight %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, nodeHeight2, err := miner2.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current blockheight %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if nodeHeight1 != nodeHeight2 {
|
2018-02-20 05:03:25 +03:00
|
|
|
t.Fatalf("expected both miners to be on the same height: %v vs %v",
|
2017-11-10 08:29:23 +03:00
|
|
|
nodeHeight1, nodeHeight2)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We disconnect the two nodes, such that we can start mining on them
|
|
|
|
// individually without the other one learning about the new blocks.
|
|
|
|
err = miner.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to remove node: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-08-17 05:29:37 +03:00
|
|
|
txid, pkScript, err := chainntnfs.GetTestTxidAndScript(miner)
|
2017-11-10 08:29:23 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test tx: %v", err)
|
|
|
|
}
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-11-10 08:29:23 +03:00
|
|
|
_, currentHeight, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current height: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that we have a txid, register a confirmation notification with
|
|
|
|
// the chainntfn source.
|
|
|
|
numConfs := uint32(2)
|
2018-12-07 08:14:37 +03:00
|
|
|
var confIntent *chainntnfs.ConfirmationEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
confIntent, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
nil, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
confIntent, err = notifier.RegisterConfirmationsNtfn(
|
|
|
|
txid, pkScript, numConfs, uint32(currentHeight),
|
|
|
|
)
|
|
|
|
}
|
2017-11-10 08:29:23 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register ntfn: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now generate a single block, the transaction should be included.
|
|
|
|
_, err = miner.Node.Generate(1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Transaction only has one confirmation, and the notification is registered
|
|
|
|
// with 2 confirmations, so we should not be notified yet.
|
|
|
|
select {
|
|
|
|
case <-confIntent.Confirmed:
|
|
|
|
t.Fatal("tx was confirmed unexpectedly")
|
|
|
|
case <-time.After(1 * time.Second):
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reorganize transaction out of the chain by generating a longer fork
|
|
|
|
// from the other miner. The transaction is not included in this fork.
|
|
|
|
miner2.Node.Generate(2)
|
|
|
|
|
|
|
|
// Reconnect nodes to reach consensus on the longest chain. miner2's chain
|
|
|
|
// should win and become active on miner1.
|
|
|
|
if err := rpctest.ConnectNode(miner, miner2); err != nil {
|
|
|
|
t.Fatalf("unable to connect harnesses: %v", err)
|
|
|
|
}
|
|
|
|
nodeSlice = []*rpctest.Harness{miner, miner2}
|
|
|
|
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
|
|
|
|
t.Fatalf("unable to join node on blocks: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, nodeHeight1, err = miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current blockheight %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, nodeHeight2, err = miner2.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current blockheight %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if nodeHeight1 != nodeHeight2 {
|
2018-02-20 05:03:25 +03:00
|
|
|
t.Fatalf("expected both miners to be on the same height: %v vs %v",
|
2017-11-10 08:29:23 +03:00
|
|
|
nodeHeight1, nodeHeight2)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Even though there is one block above the height of the block that the
|
|
|
|
// transaction was included in, it is not the active chain so the
|
|
|
|
// notification should not be sent.
|
|
|
|
select {
|
|
|
|
case <-confIntent.Confirmed:
|
|
|
|
t.Fatal("tx was confirmed unexpectedly")
|
|
|
|
case <-time.After(1 * time.Second):
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now confirm the transaction on the longest chain and verify that we
|
|
|
|
// receive the notification.
|
|
|
|
tx, err := miner.Node.GetRawTransaction(txid)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get raw tx: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
txid, err = miner2.Node.SendRawTransaction(tx.MsgTx(), false)
|
2017-11-10 08:29:23 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get send tx: %v", err)
|
|
|
|
}
|
2018-08-17 05:29:37 +03:00
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, txid); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-11-10 08:29:23 +03:00
|
|
|
_, err = miner.Node.Generate(3)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-confIntent.Confirmed:
|
|
|
|
case <-time.After(20 * time.Second):
|
|
|
|
t.Fatalf("confirmation notification never received")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
// testReorgSpend ensures that the different ChainNotifier implementations
|
|
|
|
// correctly handle outpoints whose spending transaction has been reorged out of
|
|
|
|
// the chain.
|
|
|
|
func testReorgSpend(miner *rpctest.Harness,
|
2018-12-07 08:14:37 +03:00
|
|
|
notifier chainntnfs.TestChainNotifier, scriptDispatch bool, t *testing.T) {
|
2018-10-05 12:07:55 +03:00
|
|
|
|
|
|
|
// We'll start by creating an output and registering a spend
|
|
|
|
// notification for it.
|
2018-12-07 08:14:37 +03:00
|
|
|
outpoint, output, privKey := chainntnfs.CreateSpendableOutput(t, miner)
|
|
|
|
_, heightHint, err := miner.Node.GetBestBlock()
|
2018-10-05 12:07:55 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to retrieve current height: %v", err)
|
|
|
|
}
|
2018-12-07 08:14:37 +03:00
|
|
|
|
|
|
|
var spendIntent *chainntnfs.SpendEvent
|
|
|
|
if scriptDispatch {
|
|
|
|
spendIntent, err = notifier.RegisterSpendNtfn(
|
|
|
|
nil, output.PkScript, uint32(heightHint),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
spendIntent, err = notifier.RegisterSpendNtfn(
|
|
|
|
outpoint, output.PkScript, uint32(heightHint),
|
|
|
|
)
|
|
|
|
}
|
2018-10-05 12:07:55 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register for spend: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set up a new miner that we can use to cause a reorg.
|
2020-12-03 13:30:22 +03:00
|
|
|
miner2, err := rpctest.New(
|
|
|
|
chainntnfs.NetParams, nil, []string{"--txindex"}, "",
|
|
|
|
)
|
2018-10-05 12:07:55 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create mining node: %v", err)
|
|
|
|
}
|
|
|
|
if err := miner2.SetUp(false, 0); err != nil {
|
|
|
|
t.Fatalf("unable to set up mining node: %v", err)
|
|
|
|
}
|
|
|
|
defer miner2.TearDown()
|
|
|
|
|
|
|
|
// We start by connecting the new miner to our original miner, in order
|
|
|
|
// to have a consistent view of the chain from both miners. They should
|
|
|
|
// be on the same block height.
|
|
|
|
if err := rpctest.ConnectNode(miner, miner2); err != nil {
|
|
|
|
t.Fatalf("unable to connect miners: %v", err)
|
|
|
|
}
|
|
|
|
nodeSlice := []*rpctest.Harness{miner, miner2}
|
|
|
|
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
|
|
|
|
t.Fatalf("unable to sync miners: %v", err)
|
|
|
|
}
|
|
|
|
_, minerHeight1, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get miner1's current height: %v", err)
|
|
|
|
}
|
|
|
|
_, minerHeight2, err := miner2.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get miner2's current height: %v", err)
|
|
|
|
}
|
|
|
|
if minerHeight1 != minerHeight2 {
|
|
|
|
t.Fatalf("expected both miners to be on the same height: "+
|
|
|
|
"%v vs %v", minerHeight1, minerHeight2)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We disconnect the two nodes, such that we can start mining on them
|
|
|
|
// individually without the other one learning about the new blocks.
|
|
|
|
err = miner.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to disconnect miners: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Craft the spending transaction for the outpoint created above and
|
|
|
|
// confirm it under the chain of the original miner.
|
2018-12-07 08:14:37 +03:00
|
|
|
spendTx := chainntnfs.CreateSpendTx(t, outpoint, output, privKey)
|
2018-10-05 12:07:55 +03:00
|
|
|
spendTxHash, err := miner.Node.SendRawTransaction(spendTx, true)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to broadcast spend tx: %v", err)
|
|
|
|
}
|
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil {
|
|
|
|
t.Fatalf("spend tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
const numBlocks = 1
|
|
|
|
if _, err := miner.Node.Generate(numBlocks); err != nil {
|
|
|
|
t.Fatalf("unable to generate blocks: %v", err)
|
|
|
|
}
|
2018-12-07 08:14:37 +03:00
|
|
|
_, spendHeight, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get spend height: %v", err)
|
|
|
|
}
|
2018-10-05 12:07:55 +03:00
|
|
|
|
|
|
|
// We should see a spend notification dispatched with the correct spend
|
|
|
|
// details.
|
|
|
|
select {
|
|
|
|
case spendDetails := <-spendIntent.Spend:
|
|
|
|
checkNotificationFields(
|
2018-12-07 08:14:37 +03:00
|
|
|
spendDetails, outpoint, spendTxHash, spendHeight, t,
|
2018-10-05 12:07:55 +03:00
|
|
|
)
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatal("expected spend notification to be dispatched")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now, with the other miner, we'll generate one more block than the
|
|
|
|
// other miner and connect them to cause a reorg.
|
|
|
|
if _, err := miner2.Node.Generate(numBlocks + 1); err != nil {
|
|
|
|
t.Fatalf("unable to generate blocks: %v", err)
|
|
|
|
}
|
|
|
|
if err := rpctest.ConnectNode(miner, miner2); err != nil {
|
|
|
|
t.Fatalf("unable to connect miners: %v", err)
|
|
|
|
}
|
|
|
|
nodeSlice = []*rpctest.Harness{miner2, miner}
|
|
|
|
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
|
|
|
|
t.Fatalf("unable to sync miners: %v", err)
|
|
|
|
}
|
|
|
|
_, minerHeight1, err = miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get miner1's current height: %v", err)
|
|
|
|
}
|
|
|
|
_, minerHeight2, err = miner2.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get miner2's current height: %v", err)
|
|
|
|
}
|
|
|
|
if minerHeight1 != minerHeight2 {
|
|
|
|
t.Fatalf("expected both miners to be on the same height: "+
|
|
|
|
"%v vs %v", minerHeight1, minerHeight2)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should receive a reorg notification.
|
|
|
|
select {
|
|
|
|
case _, ok := <-spendIntent.Reorg:
|
|
|
|
if !ok {
|
|
|
|
t.Fatal("unexpected reorg channel closed")
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatal("expected to receive reorg notification")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that both miners are on the same chain, we'll confirm the
|
|
|
|
// spending transaction of the outpoint and receive a notification for
|
|
|
|
// it.
|
|
|
|
if _, err = miner2.Node.SendRawTransaction(spendTx, true); err != nil {
|
|
|
|
t.Fatalf("unable to broadcast spend tx: %v", err)
|
|
|
|
}
|
|
|
|
if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil {
|
|
|
|
t.Fatalf("tx not relayed to miner: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := miner.Node.Generate(numBlocks); err != nil {
|
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
2018-12-07 08:14:37 +03:00
|
|
|
_, spendHeight, err = miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to retrieve current height: %v", err)
|
|
|
|
}
|
2018-10-05 12:07:55 +03:00
|
|
|
|
|
|
|
select {
|
|
|
|
case spendDetails := <-spendIntent.Spend:
|
|
|
|
checkNotificationFields(
|
2018-12-07 08:14:37 +03:00
|
|
|
spendDetails, outpoint, spendTxHash, spendHeight, t,
|
2018-10-05 12:07:55 +03:00
|
|
|
)
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatal("expected spend notification to be dispatched")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
// testCatchUpClientOnMissedBlocks tests the case of multiple registered client
|
|
|
|
// receiving historical block epoch notifications due to their best known block
|
|
|
|
// being out of date.
|
|
|
|
func testCatchUpClientOnMissedBlocks(miner *rpctest.Harness,
|
|
|
|
notifier chainntnfs.TestChainNotifier, t *testing.T) {
|
|
|
|
|
|
|
|
const numBlocks = 10
|
|
|
|
const numClients = 5
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
outdatedHash, outdatedHeight, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to retrieve current height: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function is used by UnsafeStart to ensure all notifications
|
|
|
|
// are fully drained before clients register for notifications.
|
|
|
|
generateBlocks := func() error {
|
|
|
|
_, err = miner.Node.Generate(numBlocks)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We want to ensure that when a client registers for block notifications,
|
|
|
|
// the notifier's best block is at the tip of the chain. If it isn't, the
|
|
|
|
// client may not receive all historical notifications.
|
|
|
|
bestHeight := outdatedHeight + numBlocks
|
2018-10-05 12:07:55 +03:00
|
|
|
err = notifier.UnsafeStart(bestHeight, nil, bestHeight, generateBlocks)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to unsafe start the notifier: %v", err)
|
2018-08-09 10:05:30 +03:00
|
|
|
}
|
2018-10-05 12:07:55 +03:00
|
|
|
defer notifier.Stop()
|
2018-08-09 10:05:30 +03:00
|
|
|
|
|
|
|
// Create numClients clients whose best known block is 10 blocks behind
|
|
|
|
// the tip of the chain. We expect each client to receive numBlocks
|
|
|
|
// notifications, 1 for each block they're behind.
|
|
|
|
clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients)
|
|
|
|
outdatedBlock := &chainntnfs.BlockEpoch{
|
|
|
|
Height: outdatedHeight, Hash: outdatedHash,
|
|
|
|
}
|
|
|
|
for i := 0; i < numClients; i++ {
|
|
|
|
epochClient, err := notifier.RegisterBlockEpochNtfn(outdatedBlock)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register for epoch notification: %v", err)
|
|
|
|
}
|
|
|
|
clients = append(clients, epochClient)
|
|
|
|
}
|
|
|
|
for expectedHeight := outdatedHeight + 1; expectedHeight <=
|
|
|
|
bestHeight; expectedHeight++ {
|
|
|
|
|
|
|
|
for _, epochClient := range clients {
|
|
|
|
select {
|
|
|
|
case block := <-epochClient.Epochs:
|
|
|
|
if block.Height != expectedHeight {
|
|
|
|
t.Fatalf("received block of height: %d, "+
|
|
|
|
"expected: %d", block.Height,
|
|
|
|
expectedHeight)
|
|
|
|
}
|
|
|
|
case <-time.After(20 * time.Second):
|
|
|
|
t.Fatalf("did not receive historical notification "+
|
|
|
|
"for height %d", expectedHeight)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, ensure that an extra block notification wasn't received.
|
|
|
|
anyExtras := make(chan struct{}, len(clients))
|
|
|
|
for _, epochClient := range clients {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(epochClient *chainntnfs.BlockEpochEvent) {
|
|
|
|
defer wg.Done()
|
|
|
|
select {
|
|
|
|
case <-epochClient.Epochs:
|
|
|
|
anyExtras <- struct{}{}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
}
|
|
|
|
}(epochClient)
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
close(anyExtras)
|
|
|
|
|
|
|
|
var extraCount int
|
|
|
|
for range anyExtras {
|
|
|
|
extraCount++
|
|
|
|
}
|
|
|
|
|
|
|
|
if extraCount > 0 {
|
|
|
|
t.Fatalf("received %d unexpected block notification", extraCount)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:31 +03:00
|
|
|
// testCatchUpOnMissedBlocks the case of multiple registered clients receiving
|
|
|
|
// historical block epoch notifications due to the notifier's best known block
|
|
|
|
// being out of date.
|
|
|
|
func testCatchUpOnMissedBlocks(miner *rpctest.Harness,
|
|
|
|
notifier chainntnfs.TestChainNotifier, t *testing.T) {
|
|
|
|
|
|
|
|
const numBlocks = 10
|
|
|
|
const numClients = 5
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
_, bestHeight, err := miner.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current blockheight %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function is used by UnsafeStart to ensure all notifications
|
|
|
|
// are fully drained before clients register for notifications.
|
|
|
|
generateBlocks := func() error {
|
|
|
|
_, err = miner.Node.Generate(numBlocks)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, start the notifier with outdated best block information.
|
2018-10-05 12:07:55 +03:00
|
|
|
err = notifier.UnsafeStart(
|
|
|
|
bestHeight, nil, bestHeight+numBlocks, generateBlocks,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to unsafe start the notifier: %v", err)
|
2018-08-09 10:05:31 +03:00
|
|
|
}
|
2018-10-05 12:07:55 +03:00
|
|
|
defer notifier.Stop()
|
2018-08-09 10:05:31 +03:00
|
|
|
|
|
|
|
// Create numClients clients who will listen for block notifications.
|
|
|
|
clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients)
|
|
|
|
for i := 0; i < numClients; i++ {
|
|
|
|
epochClient, err := notifier.RegisterBlockEpochNtfn(nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register for epoch notification: %v", err)
|
|
|
|
}
|
2018-12-11 05:29:28 +03:00
|
|
|
|
|
|
|
// Drain the notification dispatched upon registration as we're
|
|
|
|
// not interested in it.
|
|
|
|
select {
|
|
|
|
case <-epochClient.Epochs:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatal("expected to receive epoch for current block " +
|
|
|
|
"upon registration")
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:31 +03:00
|
|
|
clients = append(clients, epochClient)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a single block to trigger the backlog of historical
|
|
|
|
// notifications for the previously mined blocks.
|
|
|
|
if _, err := miner.Node.Generate(1); err != nil {
|
|
|
|
t.Fatalf("unable to generate blocks: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We expect each client to receive numBlocks + 1 notifications, 1 for
|
|
|
|
// each block that the notifier has missed out on.
|
|
|
|
for expectedHeight := bestHeight + 1; expectedHeight <=
|
|
|
|
bestHeight+numBlocks+1; expectedHeight++ {
|
|
|
|
|
|
|
|
for _, epochClient := range clients {
|
|
|
|
select {
|
|
|
|
case block := <-epochClient.Epochs:
|
|
|
|
if block.Height != expectedHeight {
|
|
|
|
t.Fatalf("received block of height: %d, "+
|
|
|
|
"expected: %d", block.Height,
|
|
|
|
expectedHeight)
|
|
|
|
}
|
|
|
|
case <-time.After(20 * time.Second):
|
|
|
|
t.Fatalf("did not receive historical notification "+
|
|
|
|
"for height %d", expectedHeight)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, ensure that an extra block notification wasn't received.
|
|
|
|
anyExtras := make(chan struct{}, len(clients))
|
|
|
|
for _, epochClient := range clients {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(epochClient *chainntnfs.BlockEpochEvent) {
|
|
|
|
defer wg.Done()
|
|
|
|
select {
|
|
|
|
case <-epochClient.Epochs:
|
|
|
|
anyExtras <- struct{}{}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
}
|
|
|
|
}(epochClient)
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
close(anyExtras)
|
|
|
|
|
|
|
|
var extraCount int
|
|
|
|
for range anyExtras {
|
|
|
|
extraCount++
|
|
|
|
}
|
|
|
|
|
|
|
|
if extraCount > 0 {
|
|
|
|
t.Fatalf("received %d unexpected block notification", extraCount)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// testCatchUpOnMissedBlocks tests that a client will still receive all valid
|
|
|
|
// block notifications in the case where a notifier's best block has been reorged
|
|
|
|
// out of the chain.
|
|
|
|
func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness,
|
|
|
|
notifier chainntnfs.TestChainNotifier, t *testing.T) {
|
|
|
|
|
2019-02-06 06:38:48 +03:00
|
|
|
// If this is the neutrino notifier, then we'll skip this test for now
|
|
|
|
// as we're missing functionality required to ensure the test passes
|
|
|
|
// reliably.
|
|
|
|
if _, ok := notifier.(*neutrinonotify.NeutrinoNotifier); ok {
|
|
|
|
t.Skip("skipping re-org test for neutrino")
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:31 +03:00
|
|
|
const numBlocks = 10
|
|
|
|
const numClients = 5
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
// Set up a new miner that we can use to cause a reorg.
|
2020-12-03 13:30:22 +03:00
|
|
|
miner2, err := rpctest.New(
|
|
|
|
chainntnfs.NetParams, nil, []string{"--txindex"}, "",
|
|
|
|
)
|
2018-08-09 10:05:31 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create mining node: %v", err)
|
|
|
|
}
|
|
|
|
if err := miner2.SetUp(false, 0); err != nil {
|
|
|
|
t.Fatalf("unable to set up mining node: %v", err)
|
|
|
|
}
|
|
|
|
defer miner2.TearDown()
|
|
|
|
|
|
|
|
// We start by connecting the new miner to our original miner,
|
|
|
|
// such that it will sync to our original chain.
|
|
|
|
if err := rpctest.ConnectNode(miner1, miner2); err != nil {
|
|
|
|
t.Fatalf("unable to connect harnesses: %v", err)
|
|
|
|
}
|
|
|
|
nodeSlice := []*rpctest.Harness{miner1, miner2}
|
|
|
|
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
|
|
|
|
t.Fatalf("unable to join node on blocks: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The two should be on the same blockheight.
|
|
|
|
_, nodeHeight1, err := miner1.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current blockheight %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
_, nodeHeight2, err := miner2.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current blockheight %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if nodeHeight1 != nodeHeight2 {
|
|
|
|
t.Fatalf("expected both miners to be on the same height: %v vs %v",
|
|
|
|
nodeHeight1, nodeHeight2)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We disconnect the two nodes, such that we can start mining on them
|
|
|
|
// individually without the other one learning about the new blocks.
|
|
|
|
err = miner1.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to remove node: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now mine on each chain separately
|
|
|
|
blocks, err := miner1.Node.Generate(numBlocks)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We generate an extra block on miner 2's chain to ensure it is the
|
|
|
|
// longer chain.
|
|
|
|
_, err = miner2.Node.Generate(numBlocks + 1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sync the two chains to ensure they will sync to miner2's chain.
|
|
|
|
if err := rpctest.ConnectNode(miner1, miner2); err != nil {
|
|
|
|
t.Fatalf("unable to connect harnesses: %v", err)
|
|
|
|
}
|
|
|
|
nodeSlice = []*rpctest.Harness{miner1, miner2}
|
|
|
|
if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil {
|
|
|
|
t.Fatalf("unable to join node on blocks: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-08-17 08:43:27 +03:00
|
|
|
// The two should be on the same block hash.
|
|
|
|
timeout := time.After(10 * time.Second)
|
|
|
|
for {
|
|
|
|
nodeHash1, _, err := miner1.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current block hash: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
nodeHash2, _, err := miner2.Node.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to get current block hash: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if *nodeHash1 == *nodeHash2 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-timeout:
|
|
|
|
t.Fatalf("Unable to sync two chains")
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:31 +03:00
|
|
|
// Next, start the notifier with outdated best block information.
|
|
|
|
// We set the notifier's best block to be the last block mined on the
|
|
|
|
// shorter chain, to test that the notifier correctly rewinds to
|
|
|
|
// the common ancestor between the two chains.
|
|
|
|
syncHeight := nodeHeight1 + numBlocks + 1
|
2018-10-05 12:07:55 +03:00
|
|
|
err = notifier.UnsafeStart(
|
|
|
|
nodeHeight1+numBlocks, blocks[numBlocks-1], syncHeight, nil,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2018-08-09 10:05:31 +03:00
|
|
|
t.Fatalf("Unable to unsafe start the notifier: %v", err)
|
|
|
|
}
|
2018-10-05 12:07:55 +03:00
|
|
|
defer notifier.Stop()
|
2018-08-09 10:05:31 +03:00
|
|
|
|
|
|
|
// Create numClients clients who will listen for block notifications.
|
|
|
|
clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients)
|
|
|
|
for i := 0; i < numClients; i++ {
|
|
|
|
epochClient, err := notifier.RegisterBlockEpochNtfn(nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to register for epoch notification: %v", err)
|
|
|
|
}
|
2018-12-11 05:29:28 +03:00
|
|
|
|
|
|
|
// Drain the notification dispatched upon registration as we're
|
|
|
|
// not interested in it.
|
|
|
|
select {
|
|
|
|
case <-epochClient.Epochs:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatal("expected to receive epoch for current block " +
|
|
|
|
"upon registration")
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:31 +03:00
|
|
|
clients = append(clients, epochClient)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate a single block, which should trigger the notifier to rewind
|
|
|
|
// to the common ancestor and dispatch notifications from there.
|
|
|
|
_, err = miner2.Node.Generate(1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to generate single block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the chain backend to the notifier stores information about reorged
|
|
|
|
// blocks, the notifier is able to rewind the chain to the common
|
|
|
|
// ancestor between the chain tip and its outdated best known block.
|
|
|
|
// In this case, the client is expected to receive numBlocks + 2
|
|
|
|
// notifications, 1 for each block the notifier has missed out on from
|
|
|
|
// the longer chain.
|
|
|
|
//
|
|
|
|
// If the chain backend does not store information about reorged blocks,
|
|
|
|
// the notifier has no way of knowing where to rewind to and therefore
|
|
|
|
// the client is only expected to receive notifications for blocks
|
|
|
|
// whose height is greater than the notifier's best known height: 2
|
|
|
|
// notifications, in this case.
|
|
|
|
var startingHeight int32
|
|
|
|
switch notifier.(type) {
|
|
|
|
case *neutrinonotify.NeutrinoNotifier:
|
|
|
|
startingHeight = nodeHeight1 + numBlocks + 1
|
|
|
|
default:
|
|
|
|
startingHeight = nodeHeight1 + 1
|
|
|
|
}
|
|
|
|
|
|
|
|
for expectedHeight := startingHeight; expectedHeight <=
|
|
|
|
nodeHeight1+numBlocks+2; expectedHeight++ {
|
|
|
|
|
|
|
|
for _, epochClient := range clients {
|
|
|
|
select {
|
|
|
|
case block := <-epochClient.Epochs:
|
|
|
|
if block.Height != expectedHeight {
|
|
|
|
t.Fatalf("received block of height: %d, "+
|
|
|
|
"expected: %d", block.Height,
|
|
|
|
expectedHeight)
|
|
|
|
}
|
|
|
|
case <-time.After(20 * time.Second):
|
|
|
|
t.Fatalf("did not receive historical notification "+
|
|
|
|
"for height %d", expectedHeight)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, ensure that an extra block notification wasn't received.
|
|
|
|
anyExtras := make(chan struct{}, len(clients))
|
|
|
|
for _, epochClient := range clients {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(epochClient *chainntnfs.BlockEpochEvent) {
|
|
|
|
defer wg.Done()
|
|
|
|
select {
|
|
|
|
case <-epochClient.Epochs:
|
|
|
|
anyExtras <- struct{}{}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
}
|
|
|
|
}(epochClient)
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
close(anyExtras)
|
|
|
|
|
|
|
|
var extraCount int
|
|
|
|
for range anyExtras {
|
|
|
|
extraCount++
|
|
|
|
}
|
|
|
|
|
|
|
|
if extraCount > 0 {
|
|
|
|
t.Fatalf("received %d unexpected block notification", extraCount)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:37 +03:00
|
|
|
type txNtfnTestCase struct {
|
|
|
|
name string
|
|
|
|
test func(node *rpctest.Harness, notifier chainntnfs.TestChainNotifier,
|
|
|
|
scriptDispatch bool, t *testing.T)
|
|
|
|
}
|
|
|
|
|
|
|
|
type blockNtfnTestCase struct {
|
2017-07-05 01:54:35 +03:00
|
|
|
name string
|
2018-08-17 05:29:37 +03:00
|
|
|
test func(node *rpctest.Harness, notifier chainntnfs.TestChainNotifier,
|
|
|
|
t *testing.T)
|
2017-07-05 01:54:35 +03:00
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
type blockCatchupTestCase struct {
|
|
|
|
name string
|
|
|
|
test func(node *rpctest.Harness, notifier chainntnfs.TestChainNotifier,
|
|
|
|
t *testing.T)
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:37 +03:00
|
|
|
var txNtfnTests = []txNtfnTestCase{
|
2017-07-05 01:54:35 +03:00
|
|
|
{
|
|
|
|
name: "single conf ntfn",
|
|
|
|
test: testSingleConfirmationNotification,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "multi conf ntfn",
|
|
|
|
test: testMultiConfirmationNotification,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "batch conf ntfn",
|
|
|
|
test: testBatchConfirmationNotification,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "multi client conf",
|
|
|
|
test: testMultiClientConfirmationNotification,
|
|
|
|
},
|
|
|
|
{
|
2018-12-07 08:14:37 +03:00
|
|
|
name: "lazy ntfn consumer",
|
|
|
|
test: testLazyNtfnConsumer,
|
2017-07-05 01:54:35 +03:00
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "historical conf dispatch",
|
|
|
|
test: testTxConfirmedBeforeNtfnRegistration,
|
|
|
|
},
|
|
|
|
{
|
2018-12-07 08:14:37 +03:00
|
|
|
name: "reorg conf",
|
|
|
|
test: testReorgConf,
|
2017-07-05 01:54:35 +03:00
|
|
|
},
|
|
|
|
{
|
2018-12-07 08:14:37 +03:00
|
|
|
name: "spend ntfn",
|
|
|
|
test: testSpendNotification,
|
2017-07-05 01:54:35 +03:00
|
|
|
},
|
|
|
|
{
|
2018-12-07 08:14:37 +03:00
|
|
|
name: "historical spend dispatch",
|
|
|
|
test: testSpendBeforeNtfnRegistration,
|
2017-07-05 01:54:35 +03:00
|
|
|
},
|
2017-11-09 11:10:15 +03:00
|
|
|
{
|
2018-12-07 08:14:37 +03:00
|
|
|
name: "reorg spend",
|
|
|
|
test: testReorgSpend,
|
2017-11-09 11:10:15 +03:00
|
|
|
},
|
2017-11-10 08:29:23 +03:00
|
|
|
{
|
2018-12-07 08:14:37 +03:00
|
|
|
name: "cancel spend ntfn",
|
|
|
|
test: testCancelSpendNtfn,
|
2017-11-10 08:29:23 +03:00
|
|
|
},
|
2018-12-07 08:14:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
var blockNtfnTests = []blockNtfnTestCase{
|
2018-10-05 12:07:55 +03:00
|
|
|
{
|
2018-12-07 08:14:37 +03:00
|
|
|
name: "block epoch",
|
|
|
|
test: testBlockEpochNotification,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "cancel epoch ntfn",
|
|
|
|
test: testCancelEpochNtfn,
|
2018-10-05 12:07:55 +03:00
|
|
|
},
|
2016-02-27 03:34:40 +03:00
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
var blockCatchupTests = []blockCatchupTestCase{
|
|
|
|
{
|
|
|
|
name: "catch up client on historical block epoch ntfns",
|
|
|
|
test: testCatchUpClientOnMissedBlocks,
|
|
|
|
},
|
2018-08-09 10:05:31 +03:00
|
|
|
{
|
|
|
|
name: "test catch up on missed blocks",
|
|
|
|
test: testCatchUpOnMissedBlocks,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "test catch up on missed blocks w/ reorged best block",
|
|
|
|
test: testCatchUpOnMissedBlocksWithReorg,
|
|
|
|
},
|
2018-08-09 10:05:30 +03:00
|
|
|
}
|
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
// TestInterfaces tests all registered interfaces with a unified set of tests
|
2017-02-21 03:29:05 +03:00
|
|
|
// which exercise each of the required methods found within the ChainNotifier
|
2016-08-04 08:13:10 +03:00
|
|
|
// interface.
|
|
|
|
//
|
|
|
|
// NOTE: In the future, when additional implementations of the ChainNotifier
|
|
|
|
// interface have been implemented, in order to ensure the new concrete
|
|
|
|
// implementation is automatically tested, two steps must be undertaken. First,
|
|
|
|
// one needs add a "non-captured" (_) import from the new sub-package. This
|
2017-02-21 03:29:05 +03:00
|
|
|
// import should trigger an init() method within the package which registers
|
2016-08-04 08:13:10 +03:00
|
|
|
// the interface. Second, an additional case in the switch within the main loop
|
|
|
|
// below needs to be added which properly initializes the interface.
|
2020-12-11 07:56:10 +03:00
|
|
|
func TestInterfaces(t *testing.T, targetBackEnd string) {
|
2016-02-27 03:34:40 +03:00
|
|
|
// Initialize the harness around a btcd node which will serve as our
|
2016-08-04 08:13:10 +03:00
|
|
|
// dedicated miner to generate blocks, cause re-orgs, etc. We'll set up
|
2018-02-07 06:11:11 +03:00
|
|
|
// this node with a chain length of 125, so we have plenty of BTC to
|
2016-08-04 08:13:10 +03:00
|
|
|
// play around with.
|
2018-08-17 05:29:37 +03:00
|
|
|
miner, tearDown := chainntnfs.NewMiner(t, nil, true, 25)
|
|
|
|
defer tearDown()
|
2016-02-27 03:34:40 +03:00
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
rpcConfig := miner.RPCConfig()
|
2017-05-24 04:13:45 +03:00
|
|
|
p2pAddr := miner.P2PAddress()
|
2016-08-04 08:13:10 +03:00
|
|
|
|
2018-12-07 08:14:37 +03:00
|
|
|
log.Printf("Running %v ChainNotifier interface tests",
|
|
|
|
2*len(txNtfnTests)+len(blockNtfnTests)+len(blockCatchupTests))
|
2018-08-09 10:05:30 +03:00
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
for _, notifierDriver := range chainntnfs.RegisteredNotifiers() {
|
2020-12-11 07:56:10 +03:00
|
|
|
notifierType := notifierDriver.NotifierType
|
|
|
|
if notifierType != targetBackEnd {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-08-15 03:53:34 +03:00
|
|
|
// Initialize a height hint cache for each notifier.
|
|
|
|
tempDir, err := ioutil.TempDir("", "channeldb")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create temp dir: %v", err)
|
|
|
|
}
|
|
|
|
db, err := channeldb.Open(tempDir)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create db: %v", err)
|
|
|
|
}
|
2020-07-15 05:19:33 +03:00
|
|
|
testCfg := chainntnfs.CacheConfig{
|
|
|
|
QueryDisable: false,
|
2020-06-27 04:40:00 +03:00
|
|
|
}
|
|
|
|
hintCache, err := chainntnfs.NewHeightHintCache(testCfg, db)
|
2018-08-15 03:53:34 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create height hint cache: %v", err)
|
|
|
|
}
|
2016-08-04 08:13:10 +03:00
|
|
|
|
2018-08-17 05:29:37 +03:00
|
|
|
var (
|
2020-12-11 07:56:10 +03:00
|
|
|
cleanUp func()
|
|
|
|
newNotifier func() (chainntnfs.TestChainNotifier, error)
|
2018-08-17 05:29:37 +03:00
|
|
|
)
|
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
switch notifierType {
|
2017-11-10 03:30:20 +03:00
|
|
|
case "bitcoind":
|
2018-08-17 05:29:37 +03:00
|
|
|
var bitcoindConn *chain.BitcoindConn
|
|
|
|
bitcoindConn, cleanUp = chainntnfs.NewBitcoindBackend(
|
|
|
|
t, p2pAddr, true,
|
2018-07-17 02:50:47 +03:00
|
|
|
)
|
2018-08-09 10:05:30 +03:00
|
|
|
newNotifier = func() (chainntnfs.TestChainNotifier, error) {
|
2018-08-15 03:53:34 +03:00
|
|
|
return bitcoindnotify.New(
|
2018-12-07 08:14:10 +03:00
|
|
|
bitcoindConn, chainntnfs.NetParams,
|
|
|
|
hintCache, hintCache,
|
2018-08-15 03:53:34 +03:00
|
|
|
), nil
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
case "btcd":
|
2018-08-09 10:05:30 +03:00
|
|
|
newNotifier = func() (chainntnfs.TestChainNotifier, error) {
|
2018-08-15 03:53:34 +03:00
|
|
|
return btcdnotify.New(
|
2018-12-07 08:14:10 +03:00
|
|
|
&rpcConfig, chainntnfs.NetParams,
|
|
|
|
hintCache, hintCache,
|
2018-08-15 03:53:34 +03:00
|
|
|
)
|
2016-08-04 08:13:10 +03:00
|
|
|
}
|
2018-08-09 10:05:30 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
case "neutrino":
|
2018-08-17 05:29:37 +03:00
|
|
|
var spvNode *neutrino.ChainService
|
|
|
|
spvNode, cleanUp = chainntnfs.NewNeutrinoBackend(
|
|
|
|
t, p2pAddr,
|
|
|
|
)
|
2018-08-09 10:05:30 +03:00
|
|
|
newNotifier = func() (chainntnfs.TestChainNotifier, error) {
|
2018-08-15 03:53:34 +03:00
|
|
|
return neutrinonotify.New(
|
|
|
|
spvNode, hintCache, hintCache,
|
2018-12-07 08:14:10 +03:00
|
|
|
), nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
2016-08-04 08:13:10 +03:00
|
|
|
}
|
|
|
|
|
2018-08-17 05:29:37 +03:00
|
|
|
log.Printf("Running ChainNotifier interface tests for: %v",
|
|
|
|
notifierType)
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-17 05:29:37 +03:00
|
|
|
notifier, err := newNotifier()
|
2018-08-09 10:05:30 +03:00
|
|
|
if err != nil {
|
2018-08-17 05:29:37 +03:00
|
|
|
t.Fatalf("unable to create %v notifier: %v",
|
|
|
|
notifierType, err)
|
2018-08-09 10:05:30 +03:00
|
|
|
}
|
2016-08-04 08:13:10 +03:00
|
|
|
if err := notifier.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start notifier %v: %v",
|
|
|
|
notifierType, err)
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:37 +03:00
|
|
|
for _, txNtfnTest := range txNtfnTests {
|
|
|
|
for _, scriptDispatch := range []bool{false, true} {
|
|
|
|
testName := fmt.Sprintf("%v %v", notifierType,
|
|
|
|
txNtfnTest.name)
|
|
|
|
if scriptDispatch {
|
|
|
|
testName += " with script dispatch"
|
|
|
|
}
|
|
|
|
success := t.Run(testName, func(t *testing.T) {
|
|
|
|
txNtfnTest.test(
|
|
|
|
miner, notifier, scriptDispatch,
|
|
|
|
t,
|
|
|
|
)
|
|
|
|
})
|
|
|
|
if !success {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-07-05 01:54:35 +03:00
|
|
|
|
2018-12-07 08:14:37 +03:00
|
|
|
for _, blockNtfnTest := range blockNtfnTests {
|
|
|
|
testName := fmt.Sprintf("%v %v", notifierType,
|
|
|
|
blockNtfnTest.name)
|
2017-07-05 01:54:35 +03:00
|
|
|
success := t.Run(testName, func(t *testing.T) {
|
2018-12-07 08:14:37 +03:00
|
|
|
blockNtfnTest.test(miner, notifier, t)
|
2017-07-05 01:54:35 +03:00
|
|
|
})
|
|
|
|
if !success {
|
|
|
|
break
|
|
|
|
}
|
2016-08-04 08:13:10 +03:00
|
|
|
}
|
2016-02-27 03:34:40 +03:00
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
notifier.Stop()
|
2018-08-09 10:05:30 +03:00
|
|
|
|
2018-08-17 05:29:37 +03:00
|
|
|
// Run catchup tests separately since they require restarting
|
|
|
|
// the notifier every time.
|
2018-08-09 10:05:30 +03:00
|
|
|
for _, blockCatchupTest := range blockCatchupTests {
|
|
|
|
notifier, err = newNotifier()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create %v notifier: %v",
|
|
|
|
notifierType, err)
|
|
|
|
}
|
2018-08-09 10:05:31 +03:00
|
|
|
|
2018-12-07 08:14:37 +03:00
|
|
|
testName := fmt.Sprintf("%v %v", notifierType,
|
2018-08-09 10:05:30 +03:00
|
|
|
blockCatchupTest.name)
|
|
|
|
|
|
|
|
success := t.Run(testName, func(t *testing.T) {
|
|
|
|
blockCatchupTest.test(miner, notifier, t)
|
|
|
|
})
|
|
|
|
if !success {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
if cleanUp != nil {
|
|
|
|
cleanUp()
|
|
|
|
}
|
2016-02-27 03:34:40 +03:00
|
|
|
}
|
|
|
|
}
|