chainntfnfs test: add testSpendNotificationMempoolSpends for btcd backend
The test is only run for the btcd backend for now, as notifying on mempool spends doesn't work for neutrino and bitcoind.
This commit is contained in:
parent
a36683e5e0
commit
6b6a616b1e
@ -15,6 +15,9 @@ import (
|
||||
|
||||
"github.com/lightninglabs/neutrino"
|
||||
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||
"github.com/lightningnetwork/lnd/chainntnfs/bitcoindnotify"
|
||||
"github.com/lightningnetwork/lnd/chainntnfs/btcdnotify"
|
||||
"github.com/lightningnetwork/lnd/chainntnfs/neutrinonotify"
|
||||
"github.com/ltcsuite/ltcd/btcjson"
|
||||
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
||||
"github.com/roasbeef/btcwallet/walletdb"
|
||||
@ -27,18 +30,6 @@ import (
|
||||
"github.com/roasbeef/btcd/wire"
|
||||
"github.com/roasbeef/btcutil"
|
||||
|
||||
// Required to auto-register the bitcoind backed ChainNotifier
|
||||
// implementation.
|
||||
_ "github.com/lightningnetwork/lnd/chainntnfs/bitcoindnotify"
|
||||
|
||||
// Required to auto-register the btcd backed ChainNotifier
|
||||
// implementation.
|
||||
_ "github.com/lightningnetwork/lnd/chainntnfs/btcdnotify"
|
||||
|
||||
// Required to auto-register the neutrino backed ChainNotifier
|
||||
// implementation.
|
||||
_ "github.com/lightningnetwork/lnd/chainntnfs/neutrinonotify"
|
||||
|
||||
// Required to register the boltdb walletdb implementation.
|
||||
_ "github.com/roasbeef/btcwallet/walletdb/bdb"
|
||||
)
|
||||
@ -460,6 +451,120 @@ func testSpendNotification(miner *rpctest.Harness,
|
||||
}
|
||||
}
|
||||
|
||||
func testSpendNotificationMempoolSpends(miner *rpctest.Harness,
|
||||
notifier chainntnfs.ChainNotifier, t *testing.T) {
|
||||
|
||||
// Skip this test for neutrino and bitcoind backends, as they currently
|
||||
// don't support notifying about mempool spends.
|
||||
switch notifier.(type) {
|
||||
case *neutrinonotify.NeutrinoNotifier:
|
||||
return
|
||||
case *bitcoindnotify.BitcoindNotifier:
|
||||
return
|
||||
case *btcdnotify.BtcdNotifier:
|
||||
// Go on to test this implementation.
|
||||
default:
|
||||
t.Fatalf("unknown notifier type: %T", notifier)
|
||||
}
|
||||
|
||||
// We first create a new output to our test target address.
|
||||
outpoint, pkScript := createSpendableOutput(miner, t)
|
||||
|
||||
_, currentHeight, err := miner.Node.GetBestBlock()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get current height: %v", err)
|
||||
}
|
||||
|
||||
// Now that we have a output index and the pkScript, register for a
|
||||
// spentness notification for the newly created output with multiple
|
||||
// clients in order to ensure the implementation can support
|
||||
// multi-client spend notifications.
|
||||
|
||||
// We first create a list of clients that will be notified on mempool
|
||||
// spends.
|
||||
const numClients = 5
|
||||
spendClientsMempool := make([]*chainntnfs.SpendEvent, numClients)
|
||||
for i := 0; i < numClients; i++ {
|
||||
spentIntent, err := notifier.RegisterSpendNtfn(outpoint,
|
||||
uint32(currentHeight), true)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to register for spend ntfn: %v", err)
|
||||
}
|
||||
|
||||
spendClientsMempool[i] = spentIntent
|
||||
}
|
||||
|
||||
// Next, create a new transaction spending that output.
|
||||
spendingTx := createSpendTx(outpoint, pkScript, t)
|
||||
|
||||
// Broadcast our spending transaction.
|
||||
spenderSha, err := miner.Node.SendRawTransaction(spendingTx, true)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to broadcast tx: %v", err)
|
||||
}
|
||||
|
||||
err = waitForMempoolTx(miner, spenderSha)
|
||||
if err != nil {
|
||||
t.Fatalf("tx not relayed to miner: %v", err)
|
||||
}
|
||||
|
||||
// Make sure the mempool spend clients are correctly notified.
|
||||
for _, client := range spendClientsMempool {
|
||||
select {
|
||||
case ntfn, ok := <-client.Spend:
|
||||
if !ok {
|
||||
t.Fatalf("channel closed unexpectedly")
|
||||
}
|
||||
|
||||
if *ntfn.SpentOutPoint != *outpoint {
|
||||
t.Fatalf("ntfn includes wrong output, reports "+
|
||||
"%v instead of %v",
|
||||
ntfn.SpentOutPoint, outpoint)
|
||||
}
|
||||
if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) {
|
||||
t.Fatalf("ntfn includes wrong spender tx sha, "+
|
||||
"reports %v instead of %v",
|
||||
ntfn.SpenderTxHash[:], spenderSha[:])
|
||||
}
|
||||
if ntfn.SpenderInputIndex != 0 {
|
||||
t.Fatalf("ntfn includes wrong spending input "+
|
||||
"index, reports %v, should be %v",
|
||||
ntfn.SpenderInputIndex, 0)
|
||||
}
|
||||
if ntfn.SpendingHeight != currentHeight+1 {
|
||||
t.Fatalf("ntfn has wrong spending height: "+
|
||||
"expected %v, got %v", currentHeight,
|
||||
ntfn.SpendingHeight)
|
||||
}
|
||||
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("did not receive notification")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(halseth): create new clients that should be registered after tx
|
||||
// is in the mempool already, when btcd supports notifying on these.
|
||||
|
||||
// Now we mine a single block, which should include our spend. The
|
||||
// notification should not be sent off again.
|
||||
if _, err := miner.Node.Generate(1); err != nil {
|
||||
t.Fatalf("unable to generate single block: %v", err)
|
||||
}
|
||||
|
||||
// When a block is mined, the mempool notifications we registered should
|
||||
// not be sent off again, and the channel should be closed.
|
||||
for _, c := range spendClientsMempool {
|
||||
select {
|
||||
case _, ok := <-c.Spend:
|
||||
if ok {
|
||||
t.Fatalf("channel should have been closed")
|
||||
}
|
||||
case <-time.After(30 * time.Second):
|
||||
t.Fatalf("expected clients to be closed.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testBlockEpochNotification(miner *rpctest.Harness,
|
||||
notifier chainntnfs.ChainNotifier, t *testing.T) {
|
||||
|
||||
@ -1259,6 +1364,10 @@ var ntfnTests = []testCase{
|
||||
name: "spend ntfn",
|
||||
test: testSpendNotification,
|
||||
},
|
||||
{
|
||||
name: "spend ntfn mempool",
|
||||
test: testSpendNotificationMempoolSpends,
|
||||
},
|
||||
{
|
||||
name: "block epoch",
|
||||
test: testBlockEpochNotification,
|
||||
|
Loading…
Reference in New Issue
Block a user