From 39d86d57310c27a0d762b3966fda7bfe1ffc66a1 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 01/23] chainntnfs/interface_test: stop UnsafeStart notifiers within test In this commit, we modify the set of tests that start the different backend notifiers with UnsafeStart to stop them within the tests themselves. This prevents us from running into a panic when attempting to run the package-level tests with a filter (using test.run). --- chainntnfs/interface_test.go | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/chainntnfs/interface_test.go b/chainntnfs/interface_test.go index 36006dcc..180de026 100644 --- a/chainntnfs/interface_test.go +++ b/chainntnfs/interface_test.go @@ -1134,11 +1134,11 @@ func testCatchUpClientOnMissedBlocks(miner *rpctest.Harness, // the notifier's best block is at the tip of the chain. If it isn't, the // client may not receive all historical notifications. bestHeight := outdatedHeight + numBlocks - if err := notifier.UnsafeStart( - bestHeight, nil, bestHeight, generateBlocks); err != nil { - - t.Fatalf("Unable to unsafe start the notifier: %v", err) + err = notifier.UnsafeStart(bestHeight, nil, bestHeight, generateBlocks) + if err != nil { + t.Fatalf("unable to unsafe start the notifier: %v", err) } + defer notifier.Stop() // Create numClients clients whose best known block is 10 blocks behind // the tip of the chain. We expect each client to receive numBlocks @@ -1223,11 +1223,13 @@ func testCatchUpOnMissedBlocks(miner *rpctest.Harness, } // Next, start the notifier with outdated best block information. - if err := notifier.UnsafeStart(bestHeight, - nil, bestHeight+numBlocks, generateBlocks); err != nil { - - t.Fatalf("Unable to unsafe start the notifier: %v", err) + err = notifier.UnsafeStart( + bestHeight, nil, bestHeight+numBlocks, generateBlocks, + ) + if err != nil { + t.Fatalf("unable to unsafe start the notifier: %v", err) } + defer notifier.Stop() // Create numClients clients who will listen for block notifications. clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients) @@ -1396,11 +1398,13 @@ func testCatchUpOnMissedBlocksWithReorg(miner1 *rpctest.Harness, // shorter chain, to test that the notifier correctly rewinds to // the common ancestor between the two chains. syncHeight := nodeHeight1 + numBlocks + 1 - if err := notifier.UnsafeStart(nodeHeight1+numBlocks, - blocks[numBlocks-1], syncHeight, nil); err != nil { - + err = notifier.UnsafeStart( + nodeHeight1+numBlocks, blocks[numBlocks-1], syncHeight, nil, + ) + if err != nil { t.Fatalf("Unable to unsafe start the notifier: %v", err) } + defer notifier.Stop() // Create numClients clients who will listen for block notifications. clients := make([]*chainntnfs.BlockEpochEvent, 0, numClients) @@ -1680,9 +1684,6 @@ func TestInterfaces(t *testing.T) { success := t.Run(testName, func(t *testing.T) { blockCatchupTest.test(miner, notifier, t) }) - - notifier.Stop() - if !success { break } From f4cf1073d4feb8f8d717269b86c473d4b56d684f Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 02/23] chainntnfs/height_hint_cache: prevent db transactions with no updates In this commit, we modify our height hint cache to no longer start a database transaction if no outpoints/txids are provided to update the height hints for. --- chainntnfs/height_hint_cache.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/chainntnfs/height_hint_cache.go b/chainntnfs/height_hint_cache.go index 33dac971..cd499a6e 100644 --- a/chainntnfs/height_hint_cache.go +++ b/chainntnfs/height_hint_cache.go @@ -127,6 +127,10 @@ func (c *HeightHintCache) CommitSpendHint(height uint32, ops ...wire.OutPoint) e return nil } + if len(ops) == 0 { + return nil + } + Log.Tracef("Updating spend hint to height %d for %v", height, ops) return c.db.Batch(func(tx *bolt.Tx) error { @@ -197,6 +201,10 @@ func (c *HeightHintCache) PurgeSpendHint(ops ...wire.OutPoint) error { return nil } + if len(ops) == 0 { + return nil + } + Log.Tracef("Removing spend hints for %v", ops) return c.db.Batch(func(tx *bolt.Tx) error { @@ -228,6 +236,10 @@ func (c *HeightHintCache) CommitConfirmHint(height uint32, txids ...chainhash.Ha return nil } + if len(txids) == 0 { + return nil + } + Log.Tracef("Updating confirm hints to height %d for %v", height, txids) return c.db.Batch(func(tx *bolt.Tx) error { @@ -299,6 +311,10 @@ func (c *HeightHintCache) PurgeConfirmHint(txids ...chainhash.Hash) error { return nil } + if len(txids) == 0 { + return nil + } + Log.Tracef("Removing confirm hints for %v", txids) return c.db.Batch(func(tx *bolt.Tx) error { From f8789e9db0ce2887c2068a768b6b34b5f85c54bb Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 03/23] chainntnfs: rename txconfnotifier.go -> txnotifier.go --- chainntnfs/{txconfnotifier.go => txnotifier.go} | 0 chainntnfs/{txconfnotifier_test.go => txnotifier_test.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename chainntnfs/{txconfnotifier.go => txnotifier.go} (100%) rename chainntnfs/{txconfnotifier_test.go => txnotifier_test.go} (100%) diff --git a/chainntnfs/txconfnotifier.go b/chainntnfs/txnotifier.go similarity index 100% rename from chainntnfs/txconfnotifier.go rename to chainntnfs/txnotifier.go diff --git a/chainntnfs/txconfnotifier_test.go b/chainntnfs/txnotifier_test.go similarity index 100% rename from chainntnfs/txconfnotifier_test.go rename to chainntnfs/txnotifier_test.go From 82f6fd7a917d26743b6959c1a2853f56cf52a235 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 04/23] chainntnfs: rename TxConfNotifier -> TxNotifier --- chainntnfs/bitcoindnotify/bitcoind.go | 18 +- chainntnfs/bitcoindnotify/bitcoind_dev.go | 13 +- chainntnfs/btcdnotify/btcd.go | 18 +- chainntnfs/btcdnotify/btcd_dev.go | 13 +- chainntnfs/interface.go | 12 +- chainntnfs/neutrinonotify/neutrino.go | 18 +- chainntnfs/neutrinonotify/neutrino_dev.go | 17 +- chainntnfs/txnotifier.go | 232 +++++++++++----------- chainntnfs/txnotifier_test.go | 123 ++++++------ 9 files changed, 231 insertions(+), 233 deletions(-) diff --git a/chainntnfs/bitcoindnotify/bitcoind.go b/chainntnfs/bitcoindnotify/bitcoind.go index 32756903..4263f477 100644 --- a/chainntnfs/bitcoindnotify/bitcoind.go +++ b/chainntnfs/bitcoindnotify/bitcoind.go @@ -71,7 +71,7 @@ type BitcoindNotifier struct { spendNotifications map[wire.OutPoint]map[uint64]*spendNotification - txConfNotifier *chainntnfs.TxConfNotifier + txNotifier *chainntnfs.TxNotifier blockEpochClients map[uint64]*blockEpochRegistration @@ -142,7 +142,7 @@ func (b *BitcoindNotifier) Start() error { return err } - b.txConfNotifier = chainntnfs.NewTxConfNotifier( + b.txNotifier = chainntnfs.NewTxNotifier( uint32(currentHeight), reorgSafetyLimit, b.confirmHintCache, ) @@ -184,7 +184,7 @@ func (b *BitcoindNotifier) Stop() error { close(epochClient.epochChan) } - b.txConfNotifier.TearDown() + b.txNotifier.TearDown() return nil } @@ -278,7 +278,7 @@ out: // begin safely updating the height hint // cache at tip, since any pending // rescans have now completed. - err = b.txConfNotifier.UpdateConfDetails( + err = b.txNotifier.UpdateConfDetails( *msg.TxID, confDetails, ) if err != nil { @@ -330,7 +330,7 @@ out: newBestBlock, missedBlocks, err := chainntnfs.HandleMissedBlocks( b.chainConn, - b.txConfNotifier, + b.txNotifier, b.bestBlock, item.Height, true, ) @@ -369,7 +369,7 @@ out: } newBestBlock, err := chainntnfs.RewindChain( - b.chainConn, b.txConfNotifier, + b.chainConn, b.txNotifier, b.bestBlock, item.Height-1, ) if err != nil { @@ -621,7 +621,7 @@ func (b *BitcoindNotifier) handleBlockConnected(block chainntnfs.BlockEpoch) err } txns := btcutil.NewBlock(rawBlock).Transactions() - err = b.txConfNotifier.ConnectTip( + err = b.txNotifier.ConnectTip( block.Hash, uint32(block.Height), txns) if err != nil { return fmt.Errorf("unable to connect tip: %v", err) @@ -959,11 +959,11 @@ func (b *BitcoindNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, chainntnfs.Log.Infof("New confirmation subscription: "+ "txid=%v, numconfs=%v", txid, numConfs) - // Register the conf notification with txconfnotifier. A non-nil value + // Register the conf notification with the TxNotifier. A non-nil value // for `dispatch` will be returned if we are required to perform a // manual scan for the confirmation. Otherwise the notifier will begin // watching at tip for the transaction to confirm. - dispatch, err := b.txConfNotifier.Register(ntfn) + dispatch, err := b.txNotifier.Register(ntfn) if err != nil { return nil, err } diff --git a/chainntnfs/bitcoindnotify/bitcoind_dev.go b/chainntnfs/bitcoindnotify/bitcoind_dev.go index f45cda09..8189ecf4 100644 --- a/chainntnfs/bitcoindnotify/bitcoind_dev.go +++ b/chainntnfs/bitcoindnotify/bitcoind_dev.go @@ -12,12 +12,11 @@ import ( ) // UnsafeStart starts the notifier with a specified best height and optional -// best hash. Its bestBlock and txConfNotifier are initialized with -// bestHeight and optionally bestHash. The parameter generateBlocks is -// necessary for the bitcoind notifier to ensure we drain all notifications up -// to syncHeight, since if they are generated ahead of UnsafeStart the chainConn -// may start up with an outdated best block and miss sending ntfns. Used for -// testing. +// best hash. Its bestBlock and txNotifier are initialized with bestHeight and +// optionally bestHash. The parameter generateBlocks is necessary for the +// bitcoind notifier to ensure we drain all notifications up to syncHeight, +// since if they are generated ahead of UnsafeStart the chainConn may start up +// with an outdated best block and miss sending ntfns. Used for testing. func (b *BitcoindNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Hash, syncHeight int32, generateBlocks func() error) error { @@ -30,7 +29,7 @@ func (b *BitcoindNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Has return err } - b.txConfNotifier = chainntnfs.NewTxConfNotifier( + b.txNotifier = chainntnfs.NewTxNotifier( uint32(bestHeight), reorgSafetyLimit, b.confirmHintCache, ) diff --git a/chainntnfs/btcdnotify/btcd.go b/chainntnfs/btcdnotify/btcd.go index 5a60a83f..1c357a5e 100644 --- a/chainntnfs/btcdnotify/btcd.go +++ b/chainntnfs/btcdnotify/btcd.go @@ -76,7 +76,7 @@ type BtcdNotifier struct { spendNotifications map[wire.OutPoint]map[uint64]*spendNotification - txConfNotifier *chainntnfs.TxConfNotifier + txNotifier *chainntnfs.TxNotifier blockEpochClients map[uint64]*blockEpochRegistration @@ -166,7 +166,7 @@ func (b *BtcdNotifier) Start() error { return err } - b.txConfNotifier = chainntnfs.NewTxConfNotifier( + b.txNotifier = chainntnfs.NewTxNotifier( uint32(currentHeight), reorgSafetyLimit, b.confirmHintCache, ) @@ -214,7 +214,7 @@ func (b *BtcdNotifier) Stop() error { close(epochClient.epochChan) } - b.txConfNotifier.TearDown() + b.txNotifier.TearDown() return nil } @@ -348,7 +348,7 @@ out: // begin safely updating the height hint // cache at tip, since any pending // rescans have now completed. - err = b.txConfNotifier.UpdateConfDetails( + err = b.txNotifier.UpdateConfDetails( *msg.TxID, confDetails, ) if err != nil { @@ -398,7 +398,7 @@ out: newBestBlock, missedBlocks, err := chainntnfs.HandleMissedBlocks( b.chainConn, - b.txConfNotifier, + b.txNotifier, b.bestBlock, update.blockHeight, true, @@ -436,7 +436,7 @@ out: } newBestBlock, err := chainntnfs.RewindChain( - b.chainConn, b.txConfNotifier, b.bestBlock, + b.chainConn, b.txNotifier, b.bestBlock, update.blockHeight-1, ) if err != nil { @@ -703,7 +703,7 @@ func (b *BtcdNotifier) handleBlockConnected(epoch chainntnfs.BlockEpoch) error { connect: true, } - err = b.txConfNotifier.ConnectTip( + err = b.txNotifier.ConnectTip( &newBlock.hash, newBlock.height, newBlock.txns, ) if err != nil { @@ -1019,11 +1019,11 @@ func (b *BtcdNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, _ []byte, chainntnfs.Log.Infof("New confirmation subscription: "+ "txid=%v, numconfs=%v", txid, numConfs) - // Register the conf notification with txconfnotifier. A non-nil value + // Register the conf notification with the TxNotifier. A non-nil value // for `dispatch` will be returned if we are required to perform a // manual scan for the confirmation. Otherwise the notifier will begin // watching at tip for the transaction to confirm. - dispatch, err := b.txConfNotifier.Register(ntfn) + dispatch, err := b.txNotifier.Register(ntfn) if err != nil { return nil, err } diff --git a/chainntnfs/btcdnotify/btcd_dev.go b/chainntnfs/btcdnotify/btcd_dev.go index af64dad8..4146baa5 100644 --- a/chainntnfs/btcdnotify/btcd_dev.go +++ b/chainntnfs/btcdnotify/btcd_dev.go @@ -11,12 +11,11 @@ import ( ) // UnsafeStart starts the notifier with a specified best height and optional -// best hash. Its bestBlock and txConfNotifier are initialized with -// bestHeight and optionally bestHash. The parameter generateBlocks is -// necessary for the bitcoind notifier to ensure we drain all notifications up -// to syncHeight, since if they are generated ahead of UnsafeStart the chainConn -// may start up with an outdated best block and miss sending ntfns. Used for -// testing. +// best hash. Its bestBlock and txNotifier are initialized with bestHeight and +// optionally bestHash. The parameter generateBlocks is necessary for the +// bitcoind notifier to ensure we drain all notifications up to syncHeight, +// since if they are generated ahead of UnsafeStart the chainConn may start up +// with an outdated best block and miss sending ntfns. Used for testing. func (b *BtcdNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Hash, syncHeight int32, generateBlocks func() error) error { @@ -29,7 +28,7 @@ func (b *BtcdNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Hash, return err } - b.txConfNotifier = chainntnfs.NewTxConfNotifier( + b.txNotifier = chainntnfs.NewTxNotifier( uint32(bestHeight), reorgSafetyLimit, b.confirmHintCache, ) diff --git a/chainntnfs/interface.go b/chainntnfs/interface.go index 9b878669..5acec0a2 100644 --- a/chainntnfs/interface.go +++ b/chainntnfs/interface.go @@ -392,10 +392,10 @@ func GetClientMissedBlocks(chainConn ChainConn, clientBestBlock *BlockEpoch, return missedBlocks, nil } -// RewindChain handles internal state updates for the notifier's TxConfNotifier -// It has no effect if given a height greater than or equal to our current best +// RewindChain handles internal state updates for the notifier's TxNotifier It +// has no effect if given a height greater than or equal to our current best // known height. It returns the new best block for the notifier. -func RewindChain(chainConn ChainConn, txConfNotifier *TxConfNotifier, +func RewindChain(chainConn ChainConn, txNotifier *TxNotifier, currBestBlock BlockEpoch, targetHeight int32) (BlockEpoch, error) { newBestBlock := BlockEpoch{ @@ -414,7 +414,7 @@ func RewindChain(chainConn ChainConn, txConfNotifier *TxConfNotifier, Log.Infof("Block disconnected from main chain: "+ "height=%v, sha=%v", height, newBestBlock.Hash) - err = txConfNotifier.DisconnectTip(uint32(height)) + err = txNotifier.DisconnectTip(uint32(height)) if err != nil { return newBestBlock, fmt.Errorf("unable to "+ " disconnect tip for height=%d: %v", @@ -436,7 +436,7 @@ func RewindChain(chainConn ChainConn, txConfNotifier *TxConfNotifier, // returned in case a chain rewind occurs and partially completes before // erroring. In the case where there is no rewind, the notifier's // current best block is returned. -func HandleMissedBlocks(chainConn ChainConn, txConfNotifier *TxConfNotifier, +func HandleMissedBlocks(chainConn ChainConn, txNotifier *TxNotifier, currBestBlock BlockEpoch, newHeight int32, backendStoresReorgs bool) (BlockEpoch, []BlockEpoch, error) { @@ -462,7 +462,7 @@ func HandleMissedBlocks(chainConn ChainConn, txConfNotifier *TxConfNotifier, "common ancestor: %v", err) } - currBestBlock, err = RewindChain(chainConn, txConfNotifier, + currBestBlock, err = RewindChain(chainConn, txNotifier, currBestBlock, startingHeight) if err != nil { return currBestBlock, nil, fmt.Errorf("unable to "+ diff --git a/chainntnfs/neutrinonotify/neutrino.go b/chainntnfs/neutrinonotify/neutrino.go index b4455442..4c43b328 100644 --- a/chainntnfs/neutrinonotify/neutrino.go +++ b/chainntnfs/neutrinonotify/neutrino.go @@ -70,7 +70,7 @@ type NeutrinoNotifier struct { spendNotifications map[wire.OutPoint]map[uint64]*spendNotification - txConfNotifier *chainntnfs.TxConfNotifier + txNotifier *chainntnfs.TxNotifier blockEpochClients map[uint64]*blockEpochRegistration @@ -162,7 +162,7 @@ func (n *NeutrinoNotifier) Start() error { neutrino.WatchInputs(zeroInput), } - n.txConfNotifier = chainntnfs.NewTxConfNotifier( + n.txNotifier = chainntnfs.NewTxNotifier( n.bestHeight, reorgSafetyLimit, n.confirmHintCache, ) @@ -206,7 +206,7 @@ func (n *NeutrinoNotifier) Stop() error { close(epochClient.epochChan) } - n.txConfNotifier.TearDown() + n.txNotifier.TearDown() return nil } @@ -350,7 +350,7 @@ out: // begin safely updating the height hint // cache at tip, since any pending // rescans have now completed. - err = n.txConfNotifier.UpdateConfDetails( + err = n.txNotifier.UpdateConfDetails( *msg.TxID, confDetails, ) if err != nil { @@ -426,7 +426,7 @@ out: _, missedBlocks, err := chainntnfs.HandleMissedBlocks( n.chainConn, - n.txConfNotifier, + n.txNotifier, bestBlock, int32(update.height), false, @@ -482,7 +482,7 @@ out: Hash: hash, } newBestBlock, err := chainntnfs.RewindChain( - n.chainConn, n.txConfNotifier, notifierBestBlock, + n.chainConn, n.txNotifier, notifierBestBlock, int32(update.height-1), ) if err != nil { @@ -593,7 +593,7 @@ func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) error { // First process the block for our internal state. A new block has // been connected to the main chain. Send out any N confirmation // notifications which may have been triggered by this new block. - err := n.txConfNotifier.ConnectTip( + err := n.txNotifier.ConnectTip( &newBlock.hash, newBlock.height, newBlock.txns, ) if err != nil { @@ -928,11 +928,11 @@ func (n *NeutrinoNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, chainntnfs.Log.Infof("New confirmation subscription: "+ "txid=%v, numconfs=%v", txid, numConfs) - // Register the conf notification with txconfnotifier. A non-nil value + // Register the conf notification with the TxNotifier. A non-nil value // for `dispatch` will be returned if we are required to perform a // manual scan for the confirmation. Otherwise the notifier will begin // watching at tip for the transaction to confirm. - dispatch, err := n.txConfNotifier.Register(ntfn) + dispatch, err := n.txNotifier.Register(ntfn) if err != nil { return nil, err } diff --git a/chainntnfs/neutrinonotify/neutrino_dev.go b/chainntnfs/neutrinonotify/neutrino_dev.go index b069ff5d..b5ff182b 100644 --- a/chainntnfs/neutrinonotify/neutrino_dev.go +++ b/chainntnfs/neutrinonotify/neutrino_dev.go @@ -13,13 +13,14 @@ import ( ) // UnsafeStart starts the notifier with a specified best height and optional -// best hash. Its bestHeight, txConfNotifier and neutrino node are initialized -// with bestHeight. The parameter generateBlocks is necessary for the -// bitcoind notifier to ensure we drain all notifications up to syncHeight, -// since if they are generated ahead of UnsafeStart the chainConn may start -// up with an outdated best block and miss sending ntfns. Used for testing. -func (n *NeutrinoNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Hash, - syncHeight int32, generateBlocks func() error) error { +// best hash. Its bestHeight, txNotifier and neutrino node are initialized with +// bestHeight. The parameter generateBlocks is necessary for the bitcoind +// notifier to ensure we drain all notifications up to syncHeight, since if they +// are generated ahead of UnsafeStart the chainConn may start up with an +// outdated best block and miss sending ntfns. Used for testing. +func (n *NeutrinoNotifier) UnsafeStart(bestHeight int32, + bestHash *chainhash.Hash, syncHeight int32, + generateBlocks func() error) error { // We'll obtain the latest block height of the p2p node. We'll // start the auto-rescan from this point. Once a caller actually wishes @@ -47,7 +48,7 @@ func (n *NeutrinoNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Has neutrino.WatchInputs(zeroInput), } - n.txConfNotifier = chainntnfs.NewTxConfNotifier( + n.txNotifier = chainntnfs.NewTxNotifier( uint32(bestHeight), reorgSafetyLimit, n.confirmHintCache, ) diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index fb3998c4..2db2d7e5 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -10,9 +10,9 @@ import ( ) var ( - // ErrTxConfNotifierExiting is an error returned when attempting to - // interact with the TxConfNotifier but it been shut down. - ErrTxConfNotifierExiting = errors.New("TxConfNotifier is exiting") + // ErrTxNotifierExiting is an error returned when attempting to interact + // with the TxNotifier but it been shut down. + ErrTxNotifierExiting = errors.New("TxNotifier is exiting") // ErrTxMaxConfs signals that the user requested a number of // confirmations beyond the reorg safety limit. @@ -87,12 +87,12 @@ func NewConfirmationEvent(numConfs uint32) *ConfirmationEvent { } } -// TxConfNotifier is used to register transaction confirmation notifications and +// TxNotifier is used to register transaction confirmation notifications and // dispatch them as the transactions confirm. A client can request to be // notified when a particular transaction has sufficient on-chain confirmations -// (or be notified immediately if the tx already does), and the TxConfNotifier +// (or be notified immediately if the tx already does), and the TxNotifier // will watch changes to the blockchain in order to satisfy these requests. -type TxConfNotifier struct { +type TxNotifier struct { // currentHeight is the height of the tracked blockchain. It is used to // determine the number of confirmations a tx has and ensure blocks are // connected and disconnected in order. @@ -176,12 +176,12 @@ func newConfNtfnSet() *confNtfnSet { } } -// NewTxConfNotifier creates a TxConfNotifier. The current height of the -// blockchain is accepted as a parameter. -func NewTxConfNotifier(startHeight uint32, reorgSafetyLimit uint32, - hintCache ConfirmHintCache) *TxConfNotifier { +// NewTxNotifier creates a TxNotifier. The current height of the blockchain is +// accepted as a parameter. +func NewTxNotifier(startHeight uint32, reorgSafetyLimit uint32, + hintCache ConfirmHintCache) *TxNotifier { - return &TxConfNotifier{ + return &TxNotifier{ currentHeight: startHeight, reorgSafetyLimit: reorgSafetyLimit, confNotifications: make(map[chainhash.Hash]*confNtfnSet), @@ -202,18 +202,18 @@ func NewTxConfNotifier(startHeight uint32, reorgSafetyLimit uint32, // the confirmation details must be provided with the UpdateConfDetails method, // otherwise we will wait for the transaction to confirm even though it already // has. -func (tcn *TxConfNotifier) Register( +func (n *TxNotifier) Register( ntfn *ConfNtfn) (*HistoricalConfDispatch, error) { select { - case <-tcn.quit: - return nil, ErrTxConfNotifierExiting + case <-n.quit: + return nil, ErrTxNotifierExiting default: } // Enforce that we will not dispatch confirmations beyond the reorg // safety limit. - if ntfn.NumConfirmations > tcn.reorgSafetyLimit { + if ntfn.NumConfirmations > n.reorgSafetyLimit { return nil, ErrTxMaxConfs } @@ -222,7 +222,7 @@ func (tcn *TxConfNotifier) Register( // // TODO(conner): verify that all submitted height hints are identical. startHeight := ntfn.HeightHint - hint, err := tcn.hintCache.QueryConfirmHint(*ntfn.TxID) + hint, err := n.hintCache.QueryConfirmHint(*ntfn.TxID) if err == nil { if hint > startHeight { Log.Debugf("Using height hint %d retrieved "+ @@ -234,15 +234,15 @@ func (tcn *TxConfNotifier) Register( *ntfn.TxID, err) } - tcn.Lock() - defer tcn.Unlock() + n.Lock() + defer n.Unlock() - confSet, ok := tcn.confNotifications[*ntfn.TxID] + confSet, ok := n.confNotifications[*ntfn.TxID] if !ok { // If this is the first registration for this txid, construct a // confSet to coalesce all notifications for the same txid. confSet = newConfNtfnSet() - tcn.confNotifications[*ntfn.TxID] = confSet + n.confNotifications[*ntfn.TxID] = confSet } confSet.ntfns[ntfn.ConfID] = ntfn @@ -257,7 +257,7 @@ func (tcn *TxConfNotifier) Register( // client. Log.Debugf("Attempting to dispatch conf for txid=%v "+ "on registration since rescan has finished", ntfn.TxID) - return nil, tcn.dispatchConfDetails(ntfn, confSet.details) + return nil, n.dispatchConfDetails(ntfn, confSet.details) // A rescan is already in progress, return here to prevent dispatching // another. When the scan returns, this notifications details will be @@ -274,7 +274,7 @@ func (tcn *TxConfNotifier) Register( // If the provided or cached height hint indicates that the transaction // is to be confirmed at a height greater than the conf notifier's // current height, we'll refrain from spawning a historical dispatch. - if startHeight > tcn.currentHeight { + if startHeight > n.currentHeight { Log.Debugf("Height hint is above current height, not dispatching "+ "historical rescan for txid=%v ", ntfn.TxID) // Set the rescan status to complete, which will allow the conf @@ -294,7 +294,7 @@ func (tcn *TxConfNotifier) Register( TxID: ntfn.TxID, PkScript: ntfn.PkScript, StartHeight: startHeight, - EndHeight: tcn.currentHeight, + EndHeight: n.currentHeight, } // Set this confSet's status to pending, ensuring subsequent @@ -310,23 +310,23 @@ func (tcn *TxConfNotifier) Register( // // NOTE: The notification should be registered first to ensure notifications are // dispatched correctly. -func (tcn *TxConfNotifier) UpdateConfDetails(txid chainhash.Hash, +func (n *TxNotifier) UpdateConfDetails(txid chainhash.Hash, details *TxConfirmation) error { select { - case <-tcn.quit: - return ErrTxConfNotifierExiting + case <-n.quit: + return ErrTxNotifierExiting default: } // Ensure we hold the lock throughout handling the notification to // prevent the notifier from advancing its height underneath us. - tcn.Lock() - defer tcn.Unlock() + n.Lock() + defer n.Unlock() // First, we'll determine whether we have an active notification for // this transaction with the given ID. - confSet, ok := tcn.confNotifications[txid] + confSet, ok := n.confNotifications[txid] if !ok { return fmt.Errorf("no notification found with TxID %v", txid) } @@ -354,7 +354,7 @@ func (tcn *TxConfNotifier) UpdateConfDetails(txid chainhash.Hash, return nil } - if details.BlockHeight > tcn.currentHeight { + if details.BlockHeight > n.currentHeight { Log.Debugf("Conf details for txid=%v found above current "+ "height, waiting to dispatch at tip", txid) return nil @@ -362,7 +362,7 @@ func (tcn *TxConfNotifier) UpdateConfDetails(txid chainhash.Hash, Log.Debugf("Updating conf details for txid=%v details", txid) - err := tcn.hintCache.CommitConfirmHint(details.BlockHeight, txid) + err := n.hintCache.CommitConfirmHint(details.BlockHeight, txid) if err != nil { // The error is not fatal, so we should not return an error to // the caller. @@ -374,7 +374,7 @@ func (tcn *TxConfNotifier) UpdateConfDetails(txid chainhash.Hash, // notifications that have not yet been delivered. confSet.details = details for _, ntfn := range confSet.ntfns { - err = tcn.dispatchConfDetails(ntfn, details) + err = n.dispatchConfDetails(ntfn, details) if err != nil { return err } @@ -386,7 +386,7 @@ func (tcn *TxConfNotifier) UpdateConfDetails(txid chainhash.Hash, // dispatchConfDetails attempts to cache and dispatch details to a particular // client if the transaction has sufficiently confirmed. If the provided details // are nil, this method will be a no-op. -func (tcn *TxConfNotifier) dispatchConfDetails( +func (n *TxNotifier) dispatchConfDetails( ntfn *ConfNtfn, details *TxConfirmation) error { // If no details are provided, return early as we can't dispatch. @@ -401,7 +401,7 @@ func (tcn *TxConfNotifier) dispatchConfDetails( // confirmations. If it has, we'll dispatch a confirmation // notification to the caller. confHeight := details.BlockHeight + ntfn.NumConfirmations - 1 - if confHeight <= tcn.currentHeight { + if confHeight <= n.currentHeight { Log.Infof("Dispatching %v conf notification for %v", ntfn.NumConfirmations, ntfn.TxID) @@ -410,15 +410,15 @@ func (tcn *TxConfNotifier) dispatchConfDetails( // confirmed. select { case ntfn.Event.Updates <- 0: - case <-tcn.quit: - return ErrTxConfNotifierExiting + case <-n.quit: + return ErrTxNotifierExiting } select { case ntfn.Event.Confirmed <- details: ntfn.dispatched = true - case <-tcn.quit: - return ErrTxConfNotifierExiting + case <-n.quit: + return ErrTxNotifierExiting } } else { Log.Debugf("Queueing %v conf notification for %v at tip ", @@ -427,33 +427,33 @@ func (tcn *TxConfNotifier) dispatchConfDetails( // Otherwise, we'll keep track of the notification // request by the height at which we should dispatch the // confirmation notification. - ntfnSet, exists := tcn.ntfnsByConfirmHeight[confHeight] + ntfnSet, exists := n.ntfnsByConfirmHeight[confHeight] if !exists { ntfnSet = make(map[*ConfNtfn]struct{}) - tcn.ntfnsByConfirmHeight[confHeight] = ntfnSet + n.ntfnsByConfirmHeight[confHeight] = ntfnSet } ntfnSet[ntfn] = struct{}{} // We'll also send an update to the client of how many // confirmations are left for the transaction to be // confirmed. - numConfsLeft := confHeight - tcn.currentHeight + numConfsLeft := confHeight - n.currentHeight select { case ntfn.Event.Updates <- numConfsLeft: - case <-tcn.quit: - return ErrTxConfNotifierExiting + case <-n.quit: + return ErrTxNotifierExiting } } // As a final check, we'll also watch the transaction if it's // still possible for it to get reorged out of the chain. blockHeight := details.BlockHeight - reorgSafeHeight := blockHeight + tcn.reorgSafetyLimit - if reorgSafeHeight > tcn.currentHeight { - txSet, exists := tcn.txsByInitialHeight[blockHeight] + reorgSafeHeight := blockHeight + n.reorgSafetyLimit + if reorgSafeHeight > n.currentHeight { + txSet, exists := n.txsByInitialHeight[blockHeight] if !exists { txSet = make(map[chainhash.Hash]struct{}) - tcn.txsByInitialHeight[blockHeight] = txSet + n.txsByInitialHeight[blockHeight] = txSet } txSet[*ntfn.TxID] = struct{}{} } @@ -466,25 +466,25 @@ func (tcn *TxConfNotifier) dispatchConfDetails( // Also, if any watched transactions now have the required number of // confirmations as a result of this block being connected, this dispatches // notifications. -func (tcn *TxConfNotifier) ConnectTip(blockHash *chainhash.Hash, +func (n *TxNotifier) ConnectTip(blockHash *chainhash.Hash, blockHeight uint32, txns []*btcutil.Tx) error { select { - case <-tcn.quit: - return ErrTxConfNotifierExiting + case <-n.quit: + return ErrTxNotifierExiting default: } - tcn.Lock() - defer tcn.Unlock() + n.Lock() + defer n.Unlock() - if blockHeight != tcn.currentHeight+1 { + if blockHeight != n.currentHeight+1 { return fmt.Errorf("Received blocks out of order: "+ "current height=%d, new height=%d", - tcn.currentHeight, blockHeight) + n.currentHeight, blockHeight) } - tcn.currentHeight++ - tcn.reorgDepth = 0 + n.currentHeight++ + n.reorgDepth = 0 // Record any newly confirmed transactions by their confirmed height so // that notifications get dispatched when the transactions reach their @@ -496,7 +496,7 @@ func (tcn *TxConfNotifier) ConnectTip(blockHash *chainhash.Hash, // Check if we have any pending notifications for this txid. If // none are found, we can proceed to the next transaction. - confSet, ok := tcn.confNotifications[*txHash] + confSet, ok := n.confNotifications[*txHash] if !ok { continue } @@ -529,20 +529,20 @@ func (tcn *TxConfNotifier) ConnectTip(blockHash *chainhash.Hash, // confirmations so that we can notify them when // expected. confHeight := blockHeight + ntfn.NumConfirmations - 1 - ntfnSet, exists := tcn.ntfnsByConfirmHeight[confHeight] + ntfnSet, exists := n.ntfnsByConfirmHeight[confHeight] if !exists { ntfnSet = make(map[*ConfNtfn]struct{}) - tcn.ntfnsByConfirmHeight[confHeight] = ntfnSet + n.ntfnsByConfirmHeight[confHeight] = ntfnSet } ntfnSet[ntfn] = struct{}{} // We'll also note the initial confirmation height in // order to correctly handle dispatching notifications // when the transaction gets reorged out of the chain. - txSet, exists := tcn.txsByInitialHeight[blockHeight] + txSet, exists := n.txsByInitialHeight[blockHeight] if !exists { txSet = make(map[chainhash.Hash]struct{}) - tcn.txsByInitialHeight[blockHeight] = txSet + n.txsByInitialHeight[blockHeight] = txSet } txSet[*txHash] = struct{}{} } @@ -556,11 +556,11 @@ func (tcn *TxConfNotifier) ConnectTip(blockHash *chainhash.Hash, // this map doesn't tell us whether the transaction has confirmed or // not, we'll need to look at txsByInitialHeight to determine so. var txsToUpdateHints []chainhash.Hash - for confirmedTx := range tcn.txsByInitialHeight[tcn.currentHeight] { + for confirmedTx := range n.txsByInitialHeight[n.currentHeight] { txsToUpdateHints = append(txsToUpdateHints, confirmedTx) } out: - for maybeUnconfirmedTx, confSet := range tcn.confNotifications { + for maybeUnconfirmedTx, confSet := range n.confNotifications { // We shouldn't update the confirm hints if we still have a // pending rescan in progress. We'll skip writing any for // notification sets that haven't reached rescanComplete. @@ -568,7 +568,7 @@ out: continue } - for height, confirmedTxs := range tcn.txsByInitialHeight { + for height, confirmedTxs := range n.txsByInitialHeight { // Skip the transactions that confirmed at the new block // height as those have already been added. if height == blockHeight { @@ -585,14 +585,14 @@ out: } if len(txsToUpdateHints) > 0 { - err := tcn.hintCache.CommitConfirmHint( - tcn.currentHeight, txsToUpdateHints..., + err := n.hintCache.CommitConfirmHint( + n.currentHeight, txsToUpdateHints..., ) if err != nil { // The error is not fatal, so we should not return an // error to the caller. Log.Errorf("Unable to update confirm hint to %d for "+ - "%v: %v", tcn.currentHeight, txsToUpdateHints, + "%v: %v", n.currentHeight, txsToUpdateHints, err) } } @@ -600,9 +600,9 @@ out: // Next, we'll dispatch an update to all of the notification clients for // our watched transactions with the number of confirmations left at // this new height. - for _, txHashes := range tcn.txsByInitialHeight { + for _, txHashes := range n.txsByInitialHeight { for txHash := range txHashes { - confSet := tcn.confNotifications[txHash] + confSet := n.confNotifications[txHash] for _, ntfn := range confSet.ntfns { txConfHeight := confSet.details.BlockHeight + ntfn.NumConfirmations - 1 @@ -619,8 +619,8 @@ out: select { case ntfn.Event.Updates <- numConfsLeft: - case <-tcn.quit: - return ErrTxConfNotifierExiting + case <-n.quit: + return ErrTxNotifierExiting } } } @@ -628,8 +628,8 @@ out: // Then, we'll dispatch notifications for all the transactions that have // become confirmed at this new block height. - for ntfn := range tcn.ntfnsByConfirmHeight[blockHeight] { - confSet := tcn.confNotifications[*ntfn.TxID] + for ntfn := range n.ntfnsByConfirmHeight[blockHeight] { + confSet := n.confNotifications[*ntfn.TxID] Log.Infof("Dispatching %v conf notification for %v", ntfn.NumConfirmations, ntfn.TxID) @@ -637,21 +637,21 @@ out: select { case ntfn.Event.Confirmed <- confSet.details: ntfn.dispatched = true - case <-tcn.quit: - return ErrTxConfNotifierExiting + case <-n.quit: + return ErrTxNotifierExiting } } - delete(tcn.ntfnsByConfirmHeight, tcn.currentHeight) + delete(n.ntfnsByConfirmHeight, n.currentHeight) // Clear entries from confNotifications and confTxsByInitialHeight. We // assume that reorgs deeper than the reorg safety limit do not happen, // so we can clear out entries for the block that is now mature. - if tcn.currentHeight >= tcn.reorgSafetyLimit { - matureBlockHeight := tcn.currentHeight - tcn.reorgSafetyLimit - for txHash := range tcn.txsByInitialHeight[matureBlockHeight] { - delete(tcn.confNotifications, txHash) + if n.currentHeight >= n.reorgSafetyLimit { + matureBlockHeight := n.currentHeight - n.reorgSafetyLimit + for txHash := range n.txsByInitialHeight[matureBlockHeight] { + delete(n.confNotifications, txHash) } - delete(tcn.txsByInitialHeight, matureBlockHeight) + delete(n.txsByInitialHeight, matureBlockHeight) } return nil @@ -661,47 +661,47 @@ out: // a chain reorganization. If any watched transactions were included in this // block, internal structures are updated to ensure a confirmation notification // is not sent unless the transaction is included in the new chain. -func (tcn *TxConfNotifier) DisconnectTip(blockHeight uint32) error { +func (n *TxNotifier) DisconnectTip(blockHeight uint32) error { select { - case <-tcn.quit: - return ErrTxConfNotifierExiting + case <-n.quit: + return ErrTxNotifierExiting default: } - tcn.Lock() - defer tcn.Unlock() + n.Lock() + defer n.Unlock() - if blockHeight != tcn.currentHeight { + if blockHeight != n.currentHeight { return fmt.Errorf("Received blocks out of order: "+ "current height=%d, disconnected height=%d", - tcn.currentHeight, blockHeight) + n.currentHeight, blockHeight) } - tcn.currentHeight-- - tcn.reorgDepth++ + n.currentHeight-- + n.reorgDepth++ // Rewind the height hint for all watched transactions. var txs []chainhash.Hash - for tx := range tcn.confNotifications { + for tx := range n.confNotifications { txs = append(txs, tx) } - err := tcn.hintCache.CommitConfirmHint(tcn.currentHeight, txs...) + err := n.hintCache.CommitConfirmHint(n.currentHeight, txs...) if err != nil { Log.Errorf("Unable to update confirm hint to %d for %v: %v", - tcn.currentHeight, txs, err) + n.currentHeight, txs, err) return err } // We'll go through all of our watched transactions and attempt to drain // their notification channels to ensure sending notifications to the // clients is always non-blocking. - for initialHeight, txHashes := range tcn.txsByInitialHeight { + for initialHeight, txHashes := range n.txsByInitialHeight { for txHash := range txHashes { // If the transaction has been reorged out of the chain, // we'll make sure to remove the cached confirmation // details to prevent notifying clients with old // information. - confSet := tcn.confNotifications[txHash] + confSet := n.confNotifications[txHash] if initialHeight == blockHeight { confSet.details = nil } @@ -712,8 +712,8 @@ func (tcn *TxConfNotifier) DisconnectTip(blockHeight uint32) error { // Updates channel are always non-blocking. select { case <-ntfn.Event.Updates: - case <-tcn.quit: - return ErrTxConfNotifierExiting + case <-n.quit: + return ErrTxNotifierExiting default: } @@ -722,7 +722,7 @@ func (tcn *TxConfNotifier) DisconnectTip(blockHeight uint32) error { // disconnected. If it was, we'll need to // dispatch a reorg notification to the client. if initialHeight == blockHeight { - err := tcn.dispatchConfReorg( + err := n.dispatchConfReorg( ntfn, blockHeight, ) if err != nil { @@ -735,7 +735,7 @@ func (tcn *TxConfNotifier) DisconnectTip(blockHeight uint32) error { // Finally, we can remove the transactions we're currently watching that // were included in this block height. - delete(tcn.txsByInitialHeight, blockHeight) + delete(n.txsByInitialHeight, blockHeight) return nil } @@ -744,16 +744,16 @@ func (tcn *TxConfNotifier) DisconnectTip(blockHeight uint32) error { // confirmation notification was already delivered. // // NOTE: This must be called with the TxNotifier's lock held. -func (tcn *TxConfNotifier) dispatchConfReorg( - ntfn *ConfNtfn, heightDisconnected uint32) error { +func (n *TxNotifier) dispatchConfReorg(ntfn *ConfNtfn, + heightDisconnected uint32) error { // If the transaction's confirmation notification has yet to be // dispatched, we'll need to clear its entry within the - // ntfnsByConfirmHeight index to prevent from notifiying the client once + // ntfnsByConfirmHeight index to prevent from notifying the client once // the notifier reaches the confirmation height. if !ntfn.dispatched { confHeight := heightDisconnected + ntfn.NumConfirmations - 1 - ntfnSet, exists := tcn.ntfnsByConfirmHeight[confHeight] + ntfnSet, exists := n.ntfnsByConfirmHeight[confHeight] if exists { delete(ntfnSet, ntfn) } @@ -765,8 +765,8 @@ func (tcn *TxConfNotifier) dispatchConfReorg( // ensure sends to the Confirmed channel are always non-blocking. select { case <-ntfn.Event.Confirmed: - case <-tcn.quit: - return ErrTxConfNotifierExiting + case <-n.quit: + return ErrTxNotifierExiting default: } @@ -775,24 +775,24 @@ func (tcn *TxConfNotifier) dispatchConfReorg( // Send a negative confirmation notification to the client indicating // how many blocks have been disconnected successively. select { - case ntfn.Event.NegativeConf <- int32(tcn.reorgDepth): - case <-tcn.quit: - return ErrTxConfNotifierExiting + case ntfn.Event.NegativeConf <- int32(n.reorgDepth): + case <-n.quit: + return ErrTxNotifierExiting } return nil } -// TearDown is to be called when the owner of the TxConfNotifier is exiting. -// This closes the event channels of all registered notifications that have -// not been dispatched yet. -func (tcn *TxConfNotifier) TearDown() { - tcn.Lock() - defer tcn.Unlock() +// TearDown is to be called when the owner of the TxNotifier is exiting. This +// closes the event channels of all registered notifications that have not been +// dispatched yet. +func (n *TxNotifier) TearDown() { + n.Lock() + defer n.Unlock() - close(tcn.quit) + close(n.quit) - for _, confSet := range tcn.confNotifications { + for _, confSet := range n.confNotifications { for _, ntfn := range confSet.ntfns { if ntfn.dispatched { continue diff --git a/chainntnfs/txnotifier_test.go b/chainntnfs/txnotifier_test.go index 04f13ef5..ded8f9ee 100644 --- a/chainntnfs/txnotifier_test.go +++ b/chainntnfs/txnotifier_test.go @@ -96,8 +96,8 @@ func newMockHintCache() *mockHintCache { } } -// TestTxConfFutureDispatch tests that the TxConfNotifier dispatches -// registered notifications when the transaction confirms after registration. +// TestTxConfFutureDispatch tests that the TxNotifier dispatches registered +// notifications when the transaction confirms after registration. func TestTxConfFutureDispatch(t *testing.T) { t.Parallel() @@ -113,10 +113,10 @@ func TestTxConfFutureDispatch(t *testing.T) { ) hintCache := newMockHintCache() - tcn := chainntnfs.NewTxConfNotifier(10, 100, hintCache) + n := chainntnfs.NewTxNotifier(10, 100, hintCache) - // Create the test transactions and register them with the - // TxConfNotifier before including them in a block to receive future + // Create the test transactions and register them with the TxNotifier + // before including them in a block to receive future // notifications. tx1Hash := tx1.TxHash() ntfn1 := chainntnfs.ConfNtfn{ @@ -124,7 +124,7 @@ func TestTxConfFutureDispatch(t *testing.T) { NumConfirmations: tx1NumConfs, Event: chainntnfs.NewConfirmationEvent(tx1NumConfs), } - if _, err := tcn.Register(&ntfn1); err != nil { + if _, err := n.Register(&ntfn1); err != nil { t.Fatalf("unable to register ntfn: %v", err) } @@ -134,7 +134,7 @@ func TestTxConfFutureDispatch(t *testing.T) { NumConfirmations: tx2NumConfs, Event: chainntnfs.NewConfirmationEvent(tx2NumConfs), } - if _, err := tcn.Register(&ntfn2); err != nil { + if _, err := n.Register(&ntfn2); err != nil { t.Fatalf("unable to register ntfn: %v", err) } @@ -156,13 +156,13 @@ func TestTxConfFutureDispatch(t *testing.T) { default: } - // Include the transactions in a block and add it to the TxConfNotifier. + // Include the transactions in a block and add it to the TxNotifier. // This should confirm tx1, but not tx2. block1 := btcutil.NewBlock(&wire.MsgBlock{ Transactions: []*wire.MsgTx{&tx1, &tx2, &tx3}, }) - err := tcn.ConnectTip( + err := n.ConnectTip( block1.Hash(), 11, block1.Transactions(), ) if err != nil { @@ -219,13 +219,13 @@ func TestTxConfFutureDispatch(t *testing.T) { default: } - // Create a new block and add it to the TxConfNotifier at the next - // height. This should confirm tx2. + // Create a new block and add it to the TxNotifier at the next height. + // This should confirm tx2. block2 := btcutil.NewBlock(&wire.MsgBlock{ Transactions: []*wire.MsgTx{&tx3}, }) - err = tcn.ConnectTip(block2.Hash(), 12, block2.Transactions()) + err = n.ConnectTip(block2.Hash(), 12, block2.Transactions()) if err != nil { t.Fatalf("Failed to connect block: %v", err) } @@ -269,9 +269,8 @@ func TestTxConfFutureDispatch(t *testing.T) { } } -// TestTxConfHistoricalDispatch tests that the TxConfNotifier dispatches -// registered notifications when the transaction is confirmed before -// registration. +// TestTxConfHistoricalDispatch tests that the TxNotifier dispatches registered +// notifications when the transaction is confirmed before registration. func TestTxConfHistoricalDispatch(t *testing.T) { t.Parallel() @@ -287,9 +286,9 @@ func TestTxConfHistoricalDispatch(t *testing.T) { ) hintCache := newMockHintCache() - tcn := chainntnfs.NewTxConfNotifier(10, 100, hintCache) + n := chainntnfs.NewTxNotifier(10, 100, hintCache) - // Create the test transactions at a height before the TxConfNotifier's + // Create the test transactions at a height before the TxNotifier's // starting height so that they are confirmed once registering them. tx1Hash := tx1.TxHash() ntfn1 := chainntnfs.ConfNtfn{ @@ -298,7 +297,7 @@ func TestTxConfHistoricalDispatch(t *testing.T) { NumConfirmations: tx1NumConfs, Event: chainntnfs.NewConfirmationEvent(tx1NumConfs), } - if _, err := tcn.Register(&ntfn1); err != nil { + if _, err := n.Register(&ntfn1); err != nil { t.Fatalf("unable to register ntfn: %v", err) } @@ -309,7 +308,7 @@ func TestTxConfHistoricalDispatch(t *testing.T) { NumConfirmations: tx2NumConfs, Event: chainntnfs.NewConfirmationEvent(tx2NumConfs), } - if _, err := tcn.Register(&ntfn2); err != nil { + if _, err := n.Register(&ntfn2); err != nil { t.Fatalf("unable to register ntfn: %v", err) } @@ -320,7 +319,7 @@ func TestTxConfHistoricalDispatch(t *testing.T) { BlockHeight: 9, TxIndex: 1, } - err := tcn.UpdateConfDetails(tx1Hash, &txConf1) + err := n.UpdateConfDetails(tx1Hash, &txConf1) if err != nil { t.Fatalf("unable to update conf details: %v", err) } @@ -353,7 +352,7 @@ func TestTxConfHistoricalDispatch(t *testing.T) { BlockHeight: 9, TxIndex: 2, } - err = tcn.UpdateConfDetails(tx2Hash, &txConf2) + err = n.UpdateConfDetails(tx2Hash, &txConf2) if err != nil { t.Fatalf("unable to update conf details: %v", err) } @@ -375,13 +374,13 @@ func TestTxConfHistoricalDispatch(t *testing.T) { default: } - // Create a new block and add it to the TxConfNotifier at the next - // height. This should confirm tx2. + // Create a new block and add it to the TxNotifier at the next height. + // This should confirm tx2. block := btcutil.NewBlock(&wire.MsgBlock{ Transactions: []*wire.MsgTx{&tx3}, }) - err = tcn.ConnectTip(block.Hash(), 11, block.Transactions()) + err = n.ConnectTip(block.Hash(), 11, block.Transactions()) if err != nil { t.Fatalf("Failed to connect block: %v", err) } @@ -420,7 +419,7 @@ func TestTxConfHistoricalDispatch(t *testing.T) { } } -// TestTxConfChainReorg tests that TxConfNotifier dispatches Confirmed and +// TestTxConfChainReorg tests that TxNotifier dispatches Confirmed and // NegativeConf notifications appropriately when there is a chain // reorganization. func TestTxConfChainReorg(t *testing.T) { @@ -439,7 +438,7 @@ func TestTxConfChainReorg(t *testing.T) { ) hintCache := newMockHintCache() - tcn := chainntnfs.NewTxConfNotifier(7, 100, hintCache) + n := chainntnfs.NewTxNotifier(7, 100, hintCache) // Tx 1 will be confirmed in block 9 and requires 2 confs. tx1Hash := tx1.TxHash() @@ -448,11 +447,11 @@ func TestTxConfChainReorg(t *testing.T) { NumConfirmations: tx1NumConfs, Event: chainntnfs.NewConfirmationEvent(tx1NumConfs), } - if _, err := tcn.Register(&ntfn1); err != nil { + if _, err := n.Register(&ntfn1); err != nil { t.Fatalf("unable to register ntfn: %v", err) } - if err := tcn.UpdateConfDetails(*ntfn1.TxID, nil); err != nil { + if err := n.UpdateConfDetails(*ntfn1.TxID, nil); err != nil { t.Fatalf("unable to deliver conf details: %v", err) } @@ -463,11 +462,11 @@ func TestTxConfChainReorg(t *testing.T) { NumConfirmations: tx2NumConfs, Event: chainntnfs.NewConfirmationEvent(tx2NumConfs), } - if _, err := tcn.Register(&ntfn2); err != nil { + if _, err := n.Register(&ntfn2); err != nil { t.Fatalf("unable to register ntfn: %v", err) } - if err := tcn.UpdateConfDetails(*ntfn2.TxID, nil); err != nil { + if err := n.UpdateConfDetails(*ntfn2.TxID, nil); err != nil { t.Fatalf("unable to deliver conf details: %v", err) } @@ -478,11 +477,11 @@ func TestTxConfChainReorg(t *testing.T) { NumConfirmations: tx3NumConfs, Event: chainntnfs.NewConfirmationEvent(tx3NumConfs), } - if _, err := tcn.Register(&ntfn3); err != nil { + if _, err := n.Register(&ntfn3); err != nil { t.Fatalf("unable to register ntfn: %v", err) } - if err := tcn.UpdateConfDetails(*ntfn3.TxID, nil); err != nil { + if err := n.UpdateConfDetails(*ntfn3.TxID, nil); err != nil { t.Fatalf("unable to deliver conf details: %v", err) } @@ -490,11 +489,11 @@ func TestTxConfChainReorg(t *testing.T) { block1 := btcutil.NewBlock(&wire.MsgBlock{ Transactions: []*wire.MsgTx{&tx1}, }) - err := tcn.ConnectTip(nil, 8, block1.Transactions()) + err := n.ConnectTip(nil, 8, block1.Transactions()) if err != nil { t.Fatalf("Failed to connect block: %v", err) } - err = tcn.ConnectTip(nil, 9, nil) + err = n.ConnectTip(nil, 9, nil) if err != nil { t.Fatalf("Failed to connect block: %v", err) } @@ -502,7 +501,7 @@ func TestTxConfChainReorg(t *testing.T) { block2 := btcutil.NewBlock(&wire.MsgBlock{ Transactions: []*wire.MsgTx{&tx2, &tx3}, }) - err = tcn.ConnectTip(nil, 10, block2.Transactions()) + err = n.ConnectTip(nil, 10, block2.Transactions()) if err != nil { t.Fatalf("Failed to connect block: %v", err) } @@ -559,17 +558,17 @@ func TestTxConfChainReorg(t *testing.T) { // The block that included tx2 and tx3 is disconnected and two next // blocks without them are connected. - err = tcn.DisconnectTip(10) + err = n.DisconnectTip(10) if err != nil { t.Fatalf("Failed to connect block: %v", err) } - err = tcn.ConnectTip(nil, 10, nil) + err = n.ConnectTip(nil, 10, nil) if err != nil { t.Fatalf("Failed to connect block: %v", err) } - err = tcn.ConnectTip(nil, 11, nil) + err = n.ConnectTip(nil, 11, nil) if err != nil { t.Fatalf("Failed to connect block: %v", err) } @@ -617,12 +616,12 @@ func TestTxConfChainReorg(t *testing.T) { }) block4 := btcutil.NewBlock(&wire.MsgBlock{}) - err = tcn.ConnectTip(block3.Hash(), 12, block3.Transactions()) + err = n.ConnectTip(block3.Hash(), 12, block3.Transactions()) if err != nil { t.Fatalf("Failed to connect block: %v", err) } - err = tcn.ConnectTip(block4.Hash(), 13, block4.Transactions()) + err = n.ConnectTip(block4.Hash(), 13, block4.Transactions()) if err != nil { t.Fatalf("Failed to connect block: %v", err) } @@ -701,9 +700,9 @@ func TestTxConfHeightHintCache(t *testing.T) { tx2Height = 203 ) - // Initialize our TxConfNotifier instance backed by a height hint cache. + // Initialize our TxNotifier instance backed by a height hint cache. hintCache := newMockHintCache() - tcn := chainntnfs.NewTxConfNotifier( + n := chainntnfs.NewTxNotifier( startingHeight, 100, hintCache, ) @@ -724,10 +723,10 @@ func TestTxConfHeightHintCache(t *testing.T) { Event: chainntnfs.NewConfirmationEvent(2), } - if _, err := tcn.Register(ntfn1); err != nil { + if _, err := n.Register(ntfn1); err != nil { t.Fatalf("unable to register tx1: %v", err) } - if _, err := tcn.Register(ntfn2); err != nil { + if _, err := n.Register(ntfn2); err != nil { t.Fatalf("unable to register tx2: %v", err) } @@ -754,7 +753,7 @@ func TestTxConfHeightHintCache(t *testing.T) { Transactions: []*wire.MsgTx{&txDummy}, }) - err = tcn.ConnectTip( + err = n.ConnectTip( block1.Hash(), txDummyHeight, block1.Transactions(), ) if err != nil { @@ -781,10 +780,10 @@ func TestTxConfHeightHintCache(t *testing.T) { // Now, update the conf details reporting that the neither txn was found // in the historical dispatch. - if err := tcn.UpdateConfDetails(tx1Hash, nil); err != nil { + if err := n.UpdateConfDetails(tx1Hash, nil); err != nil { t.Fatalf("unable to update conf details: %v", err) } - if err := tcn.UpdateConfDetails(tx2Hash, nil); err != nil { + if err := n.UpdateConfDetails(tx2Hash, nil); err != nil { t.Fatalf("unable to update conf details: %v", err) } @@ -794,7 +793,7 @@ func TestTxConfHeightHintCache(t *testing.T) { Transactions: []*wire.MsgTx{&tx1}, }) - err = tcn.ConnectTip( + err = n.ConnectTip( block2.Hash(), tx1Height, block2.Transactions(), ) if err != nil { @@ -828,7 +827,7 @@ func TestTxConfHeightHintCache(t *testing.T) { Transactions: []*wire.MsgTx{&tx2}, }) - err = tcn.ConnectTip( + err = n.ConnectTip( block3.Hash(), tx2Height, block3.Transactions(), ) if err != nil { @@ -858,7 +857,7 @@ func TestTxConfHeightHintCache(t *testing.T) { // Finally, we'll attempt do disconnect the last block in order to // simulate a chain reorg. - if err := tcn.DisconnectTip(tx2Height); err != nil { + if err := n.DisconnectTip(tx2Height); err != nil { t.Fatalf("Failed to disconnect block: %v", err) } @@ -894,20 +893,20 @@ func TestTxConfTearDown(t *testing.T) { ) hintCache := newMockHintCache() - tcn := chainntnfs.NewTxConfNotifier(10, 100, hintCache) + n := chainntnfs.NewTxNotifier(10, 100, hintCache) - // Create the test transactions and register them with the - // TxConfNotifier to receive notifications. + // Create the test transactions and register them with the TxNotifier to + // receive notifications. tx1Hash := tx1.TxHash() ntfn1 := chainntnfs.ConfNtfn{ TxID: &tx1Hash, NumConfirmations: 1, Event: chainntnfs.NewConfirmationEvent(1), } - if _, err := tcn.Register(&ntfn1); err != nil { + if _, err := n.Register(&ntfn1); err != nil { t.Fatalf("unable to register ntfn: %v", err) } - if err := tcn.UpdateConfDetails(*ntfn1.TxID, nil); err != nil { + if err := n.UpdateConfDetails(*ntfn1.TxID, nil); err != nil { t.Fatalf("unable to update conf details: %v", err) } @@ -917,20 +916,20 @@ func TestTxConfTearDown(t *testing.T) { NumConfirmations: 2, Event: chainntnfs.NewConfirmationEvent(2), } - if _, err := tcn.Register(&ntfn2); err != nil { + if _, err := n.Register(&ntfn2); err != nil { t.Fatalf("unable to register ntfn: %v", err) } - if err := tcn.UpdateConfDetails(*ntfn2.TxID, nil); err != nil { + if err := n.UpdateConfDetails(*ntfn2.TxID, nil); err != nil { t.Fatalf("unable to update conf details: %v", err) } - // Include the transactions in a block and add it to the TxConfNotifier. + // Include the transactions in a block and add it to the TxNotifier. // This should confirm tx1, but not tx2. block := btcutil.NewBlock(&wire.MsgBlock{ Transactions: []*wire.MsgTx{&tx1, &tx2}, }) - err := tcn.ConnectTip(block.Hash(), 11, block.Transactions()) + err := n.ConnectTip(block.Hash(), 11, block.Transactions()) if err != nil { t.Fatalf("Failed to connect block: %v", err) } @@ -966,10 +965,10 @@ func TestTxConfTearDown(t *testing.T) { // The notification channels should be closed for notifications that // have not been dispatched yet, so we should not expect to receive any // more updates. - tcn.TearDown() + n.TearDown() // tx1 should not receive any more updates because it has already been - // confirmed and the TxConfNotifier has been shut down. + // confirmed and the TxNotifier has been shut down. select { case <-ntfn1.Event.Updates: t.Fatal("Received unexpected confirmation update for tx1") @@ -979,7 +978,7 @@ func TestTxConfTearDown(t *testing.T) { } // tx2 should not receive any more updates after the notifications - // channels have been closed and the TxConfNotifier shut down. + // channels have been closed and the TxNotifier shut down. select { case _, more := <-ntfn2.Event.Updates: if more { From f65401b439edd62727600ef0ad58e4e842a40489 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 05/23] chainntnfs/txnotifier: rename Register -> RegisterConf --- chainntnfs/bitcoindnotify/bitcoind.go | 2 +- chainntnfs/btcdnotify/btcd.go | 2 +- chainntnfs/neutrinonotify/neutrino.go | 2 +- chainntnfs/txnotifier.go | 15 +++++++-------- chainntnfs/txnotifier_test.go | 24 ++++++++++++------------ 5 files changed, 22 insertions(+), 23 deletions(-) diff --git a/chainntnfs/bitcoindnotify/bitcoind.go b/chainntnfs/bitcoindnotify/bitcoind.go index 4263f477..fa6812d4 100644 --- a/chainntnfs/bitcoindnotify/bitcoind.go +++ b/chainntnfs/bitcoindnotify/bitcoind.go @@ -963,7 +963,7 @@ func (b *BitcoindNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, // for `dispatch` will be returned if we are required to perform a // manual scan for the confirmation. Otherwise the notifier will begin // watching at tip for the transaction to confirm. - dispatch, err := b.txNotifier.Register(ntfn) + dispatch, err := b.txNotifier.RegisterConf(ntfn) if err != nil { return nil, err } diff --git a/chainntnfs/btcdnotify/btcd.go b/chainntnfs/btcdnotify/btcd.go index 1c357a5e..3fe740a7 100644 --- a/chainntnfs/btcdnotify/btcd.go +++ b/chainntnfs/btcdnotify/btcd.go @@ -1023,7 +1023,7 @@ func (b *BtcdNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, _ []byte, // for `dispatch` will be returned if we are required to perform a // manual scan for the confirmation. Otherwise the notifier will begin // watching at tip for the transaction to confirm. - dispatch, err := b.txNotifier.Register(ntfn) + dispatch, err := b.txNotifier.RegisterConf(ntfn) if err != nil { return nil, err } diff --git a/chainntnfs/neutrinonotify/neutrino.go b/chainntnfs/neutrinonotify/neutrino.go index 4c43b328..8e854442 100644 --- a/chainntnfs/neutrinonotify/neutrino.go +++ b/chainntnfs/neutrinonotify/neutrino.go @@ -932,7 +932,7 @@ func (n *NeutrinoNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, // for `dispatch` will be returned if we are required to perform a // manual scan for the confirmation. Otherwise the notifier will begin // watching at tip for the transaction to confirm. - dispatch, err := n.txNotifier.Register(ntfn) + dispatch, err := n.txNotifier.RegisterConf(ntfn) if err != nil { return nil, err } diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index 2db2d7e5..f9743264 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -192,19 +192,18 @@ func NewTxNotifier(startHeight uint32, reorgSafetyLimit uint32, } } -// Register handles a new notification request. The client will be notified when -// the transaction gets a sufficient number of confirmations on the blockchain. -// The registration succeeds if no error is returned. If the returned -// HistoricalConfDispatch is non-nil, the caller is responsible for attempting -// to manually rescan blocks for the txid between the start and end heights. +// RegisterConf handles a new notification request. The client will be notified +// when the transaction gets a sufficient number of confirmations on the +// blockchain. The registration succeeds if no error is returned. If the +// returned HistoricalConfDispatch is non-nil, the caller is responsible for +// attempting to manually rescan blocks for the txid between the start and end +// heights. // // NOTE: If the transaction has already been included in a block on the chain, // the confirmation details must be provided with the UpdateConfDetails method, // otherwise we will wait for the transaction to confirm even though it already // has. -func (n *TxNotifier) Register( - ntfn *ConfNtfn) (*HistoricalConfDispatch, error) { - +func (n *TxNotifier) RegisterConf(ntfn *ConfNtfn) (*HistoricalConfDispatch, error) { select { case <-n.quit: return nil, ErrTxNotifierExiting diff --git a/chainntnfs/txnotifier_test.go b/chainntnfs/txnotifier_test.go index ded8f9ee..6675709b 100644 --- a/chainntnfs/txnotifier_test.go +++ b/chainntnfs/txnotifier_test.go @@ -124,7 +124,7 @@ func TestTxConfFutureDispatch(t *testing.T) { NumConfirmations: tx1NumConfs, Event: chainntnfs.NewConfirmationEvent(tx1NumConfs), } - if _, err := n.Register(&ntfn1); err != nil { + if _, err := n.RegisterConf(&ntfn1); err != nil { t.Fatalf("unable to register ntfn: %v", err) } @@ -134,7 +134,7 @@ func TestTxConfFutureDispatch(t *testing.T) { NumConfirmations: tx2NumConfs, Event: chainntnfs.NewConfirmationEvent(tx2NumConfs), } - if _, err := n.Register(&ntfn2); err != nil { + if _, err := n.RegisterConf(&ntfn2); err != nil { t.Fatalf("unable to register ntfn: %v", err) } @@ -297,7 +297,7 @@ func TestTxConfHistoricalDispatch(t *testing.T) { NumConfirmations: tx1NumConfs, Event: chainntnfs.NewConfirmationEvent(tx1NumConfs), } - if _, err := n.Register(&ntfn1); err != nil { + if _, err := n.RegisterConf(&ntfn1); err != nil { t.Fatalf("unable to register ntfn: %v", err) } @@ -308,7 +308,7 @@ func TestTxConfHistoricalDispatch(t *testing.T) { NumConfirmations: tx2NumConfs, Event: chainntnfs.NewConfirmationEvent(tx2NumConfs), } - if _, err := n.Register(&ntfn2); err != nil { + if _, err := n.RegisterConf(&ntfn2); err != nil { t.Fatalf("unable to register ntfn: %v", err) } @@ -447,7 +447,7 @@ func TestTxConfChainReorg(t *testing.T) { NumConfirmations: tx1NumConfs, Event: chainntnfs.NewConfirmationEvent(tx1NumConfs), } - if _, err := n.Register(&ntfn1); err != nil { + if _, err := n.RegisterConf(&ntfn1); err != nil { t.Fatalf("unable to register ntfn: %v", err) } @@ -462,7 +462,7 @@ func TestTxConfChainReorg(t *testing.T) { NumConfirmations: tx2NumConfs, Event: chainntnfs.NewConfirmationEvent(tx2NumConfs), } - if _, err := n.Register(&ntfn2); err != nil { + if _, err := n.RegisterConf(&ntfn2); err != nil { t.Fatalf("unable to register ntfn: %v", err) } @@ -477,7 +477,7 @@ func TestTxConfChainReorg(t *testing.T) { NumConfirmations: tx3NumConfs, Event: chainntnfs.NewConfirmationEvent(tx3NumConfs), } - if _, err := n.Register(&ntfn3); err != nil { + if _, err := n.RegisterConf(&ntfn3); err != nil { t.Fatalf("unable to register ntfn: %v", err) } @@ -723,14 +723,14 @@ func TestTxConfHeightHintCache(t *testing.T) { Event: chainntnfs.NewConfirmationEvent(2), } - if _, err := n.Register(ntfn1); err != nil { + if _, err := n.RegisterConf(ntfn1); err != nil { t.Fatalf("unable to register tx1: %v", err) } - if _, err := n.Register(ntfn2); err != nil { + if _, err := n.RegisterConf(ntfn2); err != nil { t.Fatalf("unable to register tx2: %v", err) } - // Both transactions should not have a height hint set, as Register + // Both transactions should not have a height hint set, as RegisterConf // should not alter the cache state. _, err := hintCache.QueryConfirmHint(tx1Hash) if err != chainntnfs.ErrConfirmHintNotFound { @@ -903,7 +903,7 @@ func TestTxConfTearDown(t *testing.T) { NumConfirmations: 1, Event: chainntnfs.NewConfirmationEvent(1), } - if _, err := n.Register(&ntfn1); err != nil { + if _, err := n.RegisterConf(&ntfn1); err != nil { t.Fatalf("unable to register ntfn: %v", err) } if err := n.UpdateConfDetails(*ntfn1.TxID, nil); err != nil { @@ -916,7 +916,7 @@ func TestTxConfTearDown(t *testing.T) { NumConfirmations: 2, Event: chainntnfs.NewConfirmationEvent(2), } - if _, err := n.Register(&ntfn2); err != nil { + if _, err := n.RegisterConf(&ntfn2); err != nil { t.Fatalf("unable to register ntfn: %v", err) } if err := n.UpdateConfDetails(*ntfn2.TxID, nil); err != nil { From 405e8f0fadf35ddc59045fb12489ee13ac3d0af3 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 06/23] chainntnfs/txnotifier: rename hintCache -> confirmHintCache --- chainntnfs/txnotifier.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index f9743264..6e7dd44d 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -123,10 +123,10 @@ type TxNotifier struct { // at which the transaction will have sufficient confirmations. ntfnsByConfirmHeight map[uint32]map[*ConfNtfn]struct{} - // hintCache is a cache used to maintain the latest height hints for + // confirmHintCache is a cache used to maintain the latest height hints for // transactions. Each height hint represents the earliest height at // which the transactions could have been confirmed within the chain. - hintCache ConfirmHintCache + confirmHintCache ConfirmHintCache // quit is closed in order to signal that the notifier is gracefully // exiting. @@ -179,7 +179,7 @@ func newConfNtfnSet() *confNtfnSet { // NewTxNotifier creates a TxNotifier. The current height of the blockchain is // accepted as a parameter. func NewTxNotifier(startHeight uint32, reorgSafetyLimit uint32, - hintCache ConfirmHintCache) *TxNotifier { + confirmHintCache ConfirmHintCache) *TxNotifier { return &TxNotifier{ currentHeight: startHeight, @@ -187,7 +187,7 @@ func NewTxNotifier(startHeight uint32, reorgSafetyLimit uint32, confNotifications: make(map[chainhash.Hash]*confNtfnSet), txsByInitialHeight: make(map[uint32]map[chainhash.Hash]struct{}), ntfnsByConfirmHeight: make(map[uint32]map[*ConfNtfn]struct{}), - hintCache: hintCache, + confirmHintCache: confirmHintCache, quit: make(chan struct{}), } } @@ -221,7 +221,7 @@ func (n *TxNotifier) RegisterConf(ntfn *ConfNtfn) (*HistoricalConfDispatch, erro // // TODO(conner): verify that all submitted height hints are identical. startHeight := ntfn.HeightHint - hint, err := n.hintCache.QueryConfirmHint(*ntfn.TxID) + hint, err := n.confirmHintCache.QueryConfirmHint(*ntfn.TxID) if err == nil { if hint > startHeight { Log.Debugf("Using height hint %d retrieved "+ @@ -361,7 +361,7 @@ func (n *TxNotifier) UpdateConfDetails(txid chainhash.Hash, Log.Debugf("Updating conf details for txid=%v details", txid) - err := n.hintCache.CommitConfirmHint(details.BlockHeight, txid) + err := n.confirmHintCache.CommitConfirmHint(details.BlockHeight, txid) if err != nil { // The error is not fatal, so we should not return an error to // the caller. @@ -584,7 +584,7 @@ out: } if len(txsToUpdateHints) > 0 { - err := n.hintCache.CommitConfirmHint( + err := n.confirmHintCache.CommitConfirmHint( n.currentHeight, txsToUpdateHints..., ) if err != nil { @@ -684,7 +684,7 @@ func (n *TxNotifier) DisconnectTip(blockHeight uint32) error { txs = append(txs, tx) } - err := n.hintCache.CommitConfirmHint(n.currentHeight, txs...) + err := n.confirmHintCache.CommitConfirmHint(n.currentHeight, txs...) if err != nil { Log.Errorf("Unable to update confirm hint to %d for %v: %v", n.currentHeight, txs, err) From f4128c9afb5fa3952a639cb707c977ba78fab200 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 9 Oct 2018 16:24:30 -0700 Subject: [PATCH 07/23] chainntnfs/txnotifier: move rescanState & confNtfnSet decl to top --- chainntnfs/txnotifier.go | 90 ++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 41 deletions(-) diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index 6e7dd44d..981b784a 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -19,6 +19,55 @@ var ( ErrTxMaxConfs = errors.New("too many confirmations requested") ) +// rescanState indicates the progression of a registration before the notifier +// can begin dispatching confirmations at tip. +type rescanState byte + +const ( + // rescanNotStarted is the initial state, denoting that a historical + // dispatch may be required. + rescanNotStarted rescanState = iota + + // rescanPending indicates that a dispatch has already been made, and we + // are waiting for its completion. No other rescans should be dispatched + // while in this state. + rescanPending + + // rescanComplete signals either that a rescan was dispatched and has + // completed, or that we began watching at tip immediately. In either + // case, the notifier can only dispatch notifications from tip when in + // this state. + rescanComplete +) + +// confNtfnSet holds all known, registered confirmation notifications for a +// single txid. If duplicates notifications are requested, only one historical +// dispatch will be spawned to ensure redundant scans are not permitted. A +// single conf detail will be constructed and dispatched to all interested +// clients. +type confNtfnSet struct { + // ntfns keeps tracks of all the active client notification requests for + // a transaction. + ntfns map[uint64]*ConfNtfn + + // rescanStatus represents the current rescan state for the transaction. + rescanStatus rescanState + + // details serves as a cache of the confirmation details of a + // transaction that we'll use to determine if a transaction has already + // confirmed at the time of registration. + details *TxConfirmation +} + +// newConfNtfnSet constructs a fresh confNtfnSet for a group of clients +// interested in a notification for a particular txid. +func newConfNtfnSet() *confNtfnSet { + return &confNtfnSet{ + ntfns: make(map[uint64]*ConfNtfn), + rescanStatus: rescanNotStarted, + } +} + // ConfNtfn represents a notifier client's request to receive a notification // once the target transaction gets sufficient confirmations. The client is // asynchronously notified via the ConfirmationEvent channels. @@ -135,47 +184,6 @@ type TxNotifier struct { sync.Mutex } -// rescanState indicates the progression of a registration before the notifier -// can begin dispatching confirmations at tip. -type rescanState uint8 - -const ( - // rescanNotStarted is the initial state, denoting that a historical - // dispatch may be required. - rescanNotStarted rescanState = iota - - // rescanPending indicates that a dispatch has already been made, and we - // are waiting for its completion. No other rescans should be dispatched - // while in this state. - rescanPending - - // rescanComplete signals either that a rescan was dispatched and has - // completed, or that we began watching at tip immediately. In either - // case, the notifier can only dispatch notifications from tip when in - // this state. - rescanComplete -) - -// confNtfnSet holds all known, registered confirmation notifications for a -// single txid. If duplicates notifications are requested, only one historical -// dispatch will be spawned to ensure redundant scans are not permitted. A -// single conf detail will be constructed and dispatched to all interested -// clients. -type confNtfnSet struct { - ntfns map[uint64]*ConfNtfn - rescanStatus rescanState - details *TxConfirmation -} - -// newConfNtfnSet constructs a fresh confNtfnSet for a group of clients -// interested in a notification for a particular txid. -func newConfNtfnSet() *confNtfnSet { - return &confNtfnSet{ - ntfns: make(map[uint64]*ConfNtfn), - rescanStatus: rescanNotStarted, - } -} - // NewTxNotifier creates a TxNotifier. The current height of the blockchain is // accepted as a parameter. func NewTxNotifier(startHeight uint32, reorgSafetyLimit uint32, From 87123d5e237ae6c29f39cf3d2e20d0d844231777 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 08/23] chainntnfs: extend SpendEvent with reorg channel In this commit, we add a new channel within the SpendEvent struct that will be sent upon whenever the spending transaction of the registered outpoint gets reorged out of the chain. This will pave the road for successfully handling a funding transaction getting reorged out of the chain among other things. --- chainntnfs/interface.go | 50 ++++++++++++++++++++++++++++++++++++---- chainntnfs/txnotifier.go | 8 ------- 2 files changed, 45 insertions(+), 13 deletions(-) diff --git a/chainntnfs/interface.go b/chainntnfs/interface.go index 5acec0a2..f8f386d5 100644 --- a/chainntnfs/interface.go +++ b/chainntnfs/interface.go @@ -159,19 +159,39 @@ type ConfirmationEvent struct { // Confirmed is a channel that will be sent upon once the transaction // has been fully confirmed. The struct sent will contain all the // details of the channel's confirmation. - Confirmed chan *TxConfirmation // MUST be buffered. + // + // NOTE: This channel must be buffered. + Confirmed chan *TxConfirmation // Updates is a channel that will sent upon, at every incremental // confirmation, how many confirmations are left to declare the // transaction as fully confirmed. - Updates chan uint32 // MUST be buffered. + // + // NOTE: This channel must be buffered with the number of required + // confirmations. + Updates chan uint32 // TODO(roasbeef): all goroutines on ln channel updates should also // have a struct chan that's closed if funding gets re-org out. Need // to sync, to request another confirmation event ntfn, then re-open // channel after confs. - NegativeConf chan int32 // MUST be buffered. + // NegativeConf is a channel that will be sent upon if the transaction + // confirms, but is later reorged out of the chain. The integer sent + // through the channel represents the reorg depth. + // + // NOTE: This channel must be buffered. + NegativeConf chan int32 +} + +// NewConfirmationEvent constructs a new ConfirmationEvent with newly opened +// channels. +func NewConfirmationEvent(numConfs uint32) *ConfirmationEvent { + return &ConfirmationEvent{ + Confirmed: make(chan *TxConfirmation, 1), + Updates: make(chan uint32, numConfs), + NegativeConf: make(chan int32, 1), + } } // SpendDetail contains details pertaining to a spent output. This struct itself @@ -196,7 +216,16 @@ type SpendDetail struct { type SpendEvent struct { // Spend is a receive only channel which will be sent upon once the // target outpoint has been spent. - Spend <-chan *SpendDetail // MUST be buffered. + // + // NOTE: This channel must be buffered. + Spend chan *SpendDetail + + // Reorg is a channel that will be sent upon once we detect the spending + // transaction of the outpoint in question has been reorged out of the + // chain. + // + // NOTE: This channel must be buffered. + Reorg chan struct{} // Cancel is a closure that should be executed by the caller in the // case that they wish to prematurely abandon their registered spend @@ -204,6 +233,15 @@ type SpendEvent struct { Cancel func() } +// NewSpendEvent constructs a new SpendEvent with newly opened channels. +func NewSpendEvent(cancel func()) *SpendEvent { + return &SpendEvent{ + Spend: make(chan *SpendDetail, 1), + Reorg: make(chan struct{}, 1), + Cancel: cancel, + } +} + // BlockEpoch represents metadata concerning each new block connected to the // main chain. type BlockEpoch struct { @@ -225,7 +263,9 @@ type BlockEpoch struct { type BlockEpochEvent struct { // Epochs is a receive only channel that will be sent upon each time a // new block is connected to the end of the main chain. - Epochs <-chan *BlockEpoch // MUST be buffered. + // + // NOTE: This channel must be buffered. + Epochs <-chan *BlockEpoch // Cancel is a closure that should be executed by the caller in the // case that they wish to abandon their registered spend notification. diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index 981b784a..bba85353 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -126,14 +126,6 @@ type HistoricalConfDispatch struct { EndHeight uint32 } -// NewConfirmationEvent constructs a new ConfirmationEvent with newly opened -// channels. -func NewConfirmationEvent(numConfs uint32) *ConfirmationEvent { - return &ConfirmationEvent{ - Confirmed: make(chan *TxConfirmation, 1), - Updates: make(chan uint32, numConfs), - NegativeConf: make(chan int32, 1), - } } // TxNotifier is used to register transaction confirmation notifications and From fc7a33b64fcc4ff7608ac100302f80ac60e2ef2d Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 09/23] chainntnfs/txnotifier: add fields/structs to track spend notifications In this commit, we introduce the required fields for the TxNotifier to properly carry its duties in notifying its registered clients about the spend of an outpoint. These are not yet used, but will be throughout the some of the following commits. --- chainntnfs/txnotifier.go | 137 +++++++++++++++++++++++++++++++++------ 1 file changed, 117 insertions(+), 20 deletions(-) diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index bba85353..356a739a 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" ) @@ -68,6 +69,31 @@ func newConfNtfnSet() *confNtfnSet { } } +// spendNtfnSet holds all known, registered spend notifications for an outpoint. +// If duplicate notifications are requested, only one historical dispatch will +// be spawned to ensure redundant scans are not permitted. +type spendNtfnSet struct { + // ntfns keeps tracks of all the active client notification requests for + // an outpoint. + ntfns map[uint64]*SpendNtfn + + // rescanStatus represents the current rescan state for the outpoint. + rescanStatus rescanState + + // details serves as a cache of the spend details for an outpoint that + // we'll use to determine if an outpoint has already been spent at the + // time of registration. + details *SpendDetail +} + +// newSpendNtfnSet constructs a new spend notification set. +func newSpendNtfnSet() *spendNtfnSet { + return &spendNtfnSet{ + ntfns: make(map[uint64]*SpendNtfn), + rescanStatus: rescanNotStarted, + } +} + // ConfNtfn represents a notifier client's request to receive a notification // once the target transaction gets sufficient confirmations. The client is // asynchronously notified via the ConfirmationEvent channels. @@ -126,28 +152,78 @@ type HistoricalConfDispatch struct { EndHeight uint32 } +// SpendNtfn represents a client's request to receive a notification once an +// outpoint has been spent on-chain. The client is asynchronously notified via +// the SpendEvent channels. +type SpendNtfn struct { + // SpendID uniquely identies the spend notification request for the + // specified outpoint. + SpendID uint64 + + // OutPoint is the outpoint for which a client has requested a spend + // notification for. + OutPoint wire.OutPoint + + // PkScript is the script of the outpoint. This is needed in order to + // match compact filters when attempting a historical rescan to + // determine if the outpoint has already been spent. + PkScript []byte + + // Event contains references to the channels that the notifications are + // to be sent over. + Event *SpendEvent + + // HeightHint is the earliest height in the chain that we expect to find + // the spending transaction of the specified outpoint. This value will + // be overridden by the spend hint cache if it contains an entry for it. + HeightHint uint32 + + // dispatched signals whether a spend notification has been disptached + // to the client. + dispatched bool } -// TxNotifier is used to register transaction confirmation notifications and -// dispatch them as the transactions confirm. A client can request to be -// notified when a particular transaction has sufficient on-chain confirmations -// (or be notified immediately if the tx already does), and the TxNotifier -// will watch changes to the blockchain in order to satisfy these requests. +// HistoricalSpendDispatch parameterizes a manual rescan to determine the +// spending details (if any) of an outpoint. The parameters include the start +// and end block heights specifying the range of blocks to scan. +type HistoricalSpendDispatch struct { + // OutPoint is the outpoint which we should attempt to find the spending + OutPoint wire.OutPoint + + // PkScript is the script of the outpoint. This is needed in order to + // match compact filters when attempting a historical rescan. + PkScript []byte + + // StartHeight specified the block height at which to begin the + // historical rescan. + StartHeight uint32 + + // EndHeight specifies the last block height (inclusive) that the + // historical rescan should consider. + EndHeight uint32 +} + +// TxNotifier is a struct responsible for delivering transaction notifications +// to subscribers. These notifications can be of two different types: +// transaction confirmations and/or outpoint spends. The TxNotifier will watch +// the blockchain as new blocks come in, in order to satisfy its client +// requests. type TxNotifier struct { // currentHeight is the height of the tracked blockchain. It is used to // determine the number of confirmations a tx has and ensure blocks are // connected and disconnected in order. currentHeight uint32 - // reorgSafetyLimit is the chain depth beyond which it is assumed a block - // will not be reorganized out of the chain. This is used to determine when - // to prune old confirmation requests so that reorgs are handled correctly. - // The coinbase maturity period is a reasonable value to use. + // reorgSafetyLimit is the chain depth beyond which it is assumed a + // block will not be reorganized out of the chain. This is used to + // determine when to prune old notification requests so that reorgs are + // handled correctly. The coinbase maturity period is a reasonable value + // to use. reorgSafetyLimit uint32 - // reorgDepth is the depth of a chain organization that this system is being - // informed of. This is incremented as long as a sequence of blocks are - // disconnected without being interrupted by a new block. + // reorgDepth is the depth of a chain organization that this system is + // being informed of. This is incremented as long as a sequence of + // blocks are disconnected without being interrupted by a new block. reorgDepth uint32 // confNotifications is an index of notification requests by transaction @@ -156,19 +232,34 @@ type TxNotifier struct { // txsByInitialHeight is an index of watched transactions by the height // that they are included at in the blockchain. This is tracked so that - // incorrect notifications are not sent if a transaction is reorganized - // out of the chain and so that negative confirmations can be recognized. + // incorrect notifications are not sent if a transaction is reorged out + // of the chain and so that negative confirmations can be recognized. txsByInitialHeight map[uint32]map[chainhash.Hash]struct{} - // ntfnsByConfirmHeight is an index of notification requests by the height - // at which the transaction will have sufficient confirmations. + // ntfnsByConfirmHeight is an index of notification requests by the + // height at which the transaction will have sufficient confirmations. ntfnsByConfirmHeight map[uint32]map[*ConfNtfn]struct{} - // confirmHintCache is a cache used to maintain the latest height hints for - // transactions. Each height hint represents the earliest height at + // spendNotifications is an index of all active notification requests + // per outpoint. + spendNotifications map[wire.OutPoint]*spendNtfnSet + + // opsBySpendHeight is an index that keeps tracks of the spending height + // of an outpoint we are currently tracking notifications for. This is + // used in order to recover from the spending transaction of an outpoint + // being reorged out of the chain. + opsBySpendHeight map[uint32]map[wire.OutPoint]struct{} + + // confirmHintCache is a cache used to maintain the latest height hints + // for transactions. Each height hint represents the earliest height at // which the transactions could have been confirmed within the chain. confirmHintCache ConfirmHintCache + // spendHintCache is a cache used to maintain the latest height hints + // for outpoints. Each height hint represents the earliest height at + // which the outpoints could have been spent within the chain. + spendHintCache SpendHintCache + // quit is closed in order to signal that the notifier is gracefully // exiting. quit chan struct{} @@ -177,9 +268,12 @@ type TxNotifier struct { } // NewTxNotifier creates a TxNotifier. The current height of the blockchain is -// accepted as a parameter. +// accepted as a parameter. The different hint caches (confirm and spend) are +// used as an optimization in order to retrieve a better starting point when +// dispatching a recan for a historical event in the chain. func NewTxNotifier(startHeight uint32, reorgSafetyLimit uint32, - confirmHintCache ConfirmHintCache) *TxNotifier { + confirmHintCache ConfirmHintCache, + spendHintCache SpendHintCache) *TxNotifier { return &TxNotifier{ currentHeight: startHeight, @@ -187,7 +281,10 @@ func NewTxNotifier(startHeight uint32, reorgSafetyLimit uint32, confNotifications: make(map[chainhash.Hash]*confNtfnSet), txsByInitialHeight: make(map[uint32]map[chainhash.Hash]struct{}), ntfnsByConfirmHeight: make(map[uint32]map[*ConfNtfn]struct{}), + spendNotifications: make(map[wire.OutPoint]*spendNtfnSet), + opsBySpendHeight: make(map[uint32]map[wire.OutPoint]struct{}), confirmHintCache: confirmHintCache, + spendHintCache: spendHintCache, quit: make(chan struct{}), } } From 4d7fa9ecc48ddd13ae5f8f91f740d75434bf12f6 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 10/23] chainntnfs/txnotifier: allow registration of spend notifications In this commit, we introduce the registration logic for spend notifications to the TxNotifier. Most of this logic was taken from the different existing ChainNotifier implementations, however, it features some useful additions in order to make the ChainNotifier a bit more robust. Some of these additions include the following: 1. RegisterSpend will now return a HistoricalSpendDispatch struct, which includes the details for successfully determining if an outpoint was spent in the past. A HistoricalSpendDispatch will only be returned upon the first registration of an outpoint. This is done as, previously, if multiple clients registered for the same outpoint, then multiple historical rescans would also be dispatched, incurring a toll on the backend itself. 2. UpdateSpendDetails will now be used to determine when a historical rescan has completed, no matter if a spending transaction was found or not. This is needed in order to responsibly update the spend hints for outpoints at tip, otherwise we'd attempt to update them even though we haven't yet determined if they have been spent or not. This will dispatch notifications to all currently registered clients for the same outpoint. In the event that another client registers later on, then the spending details are cached in memory in order to prevent further historical rescans. --- chainntnfs/txnotifier.go | 305 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 301 insertions(+), 4 deletions(-) diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index 356a739a..a4a0ad19 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -557,6 +557,300 @@ func (n *TxNotifier) dispatchConfDetails( return nil } +// RegisterSpend handles a new spend notification request. The client will be +// notified once the outpoint is detected as spent within the chain. +// +// The registration succeeds if no error is returned. If the returned +// HistoricalSpendDisaptch is non-nil, the caller is responsible for attempting +// to determine whether the outpoint has been spent between the start and end +// heights. +// +// NOTE: If the outpoint has already been spent within the chain before the +// notifier's current tip, the spend details must be provided with the +// UpdateSpendDetails method, otherwise we will wait for the outpoint to +// be spent at tip, even though it already has. +func (n *TxNotifier) RegisterSpend(ntfn *SpendNtfn) (*HistoricalSpendDispatch, error) { + select { + case <-n.quit: + return nil, ErrTxNotifierExiting + default: + } + + // Before proceeding to register the notification, we'll query our spend + // hint cache to determine whether a better one exists. + startHeight := ntfn.HeightHint + hint, err := n.spendHintCache.QuerySpendHint(ntfn.OutPoint) + if err == nil { + if hint > startHeight { + Log.Debugf("Using height hint %d retrieved from cache "+ + "for %v", startHeight, ntfn.OutPoint) + startHeight = hint + } + } else if err != ErrSpendHintNotFound { + Log.Errorf("Unable to query spend hint for %v: %v", + ntfn.OutPoint, err) + } + + n.Lock() + defer n.Unlock() + + Log.Infof("New spend subscription: spend_id=%d, outpoint=%v, "+ + "height_hint=%d", ntfn.SpendID, ntfn.OutPoint, ntfn.HeightHint) + + // Keep track of the notification request so that we can properly + // dispatch a spend notification later on. + spendSet, ok := n.spendNotifications[ntfn.OutPoint] + if !ok { + // If this is the first registration for the outpoint, we'll + // construct a spendNtfnSet to coalesce all notifications. + spendSet = newSpendNtfnSet() + n.spendNotifications[ntfn.OutPoint] = spendSet + } + spendSet.ntfns[ntfn.SpendID] = ntfn + + // We'll now let the caller know whether a historical rescan is needed + // depending on the current rescan status. + switch spendSet.rescanStatus { + + // If the spending details for this outpoint have already been + // determined and cached, then we can use them to immediately dispatch + // the spend notification to the client. + case rescanComplete: + return nil, n.dispatchSpendDetails(ntfn, spendSet.details) + + // If there is an active rescan to determine whether the outpoint has + // been spent, then we won't trigger another one. + case rescanPending: + return nil, nil + + // Otherwise, we'll fall through and let the caller know that a rescan + // should be dispatched to determine whether the outpoint has already + // been spent. + case rescanNotStarted: + } + + // However, if the spend hint, either provided by the caller or + // retrieved from the cache, is found to be at a later height than the + // TxNotifier is aware of, then we'll refrain from dispatching a + // historical rescan and wait for the spend to come in at tip. + if startHeight > n.currentHeight { + Log.Debugf("Spend hint of %d for %v is above current height %d", + startHeight, ntfn.OutPoint, n.currentHeight) + + // We'll also set the rescan status as complete to ensure that + // spend hints for this outpoint get updated upon + // connected/disconnected blocks. + spendSet.rescanStatus = rescanComplete + return nil, nil + } + + // We'll set the rescan status to pending to ensure subsequent + // notifications don't also attempt a historical dispatch. + spendSet.rescanStatus = rescanPending + + return &HistoricalSpendDispatch{ + OutPoint: ntfn.OutPoint, + PkScript: ntfn.PkScript, + StartHeight: startHeight, + EndHeight: n.currentHeight, + }, nil +} + +// CancelSpend cancels an existing request for a spend notification of an +// outpoint. The request is identified by its spend ID. +func (n *TxNotifier) CancelSpend(op wire.OutPoint, spendID uint64) { + select { + case <-n.quit: + return + default: + } + + n.Lock() + defer n.Unlock() + + Log.Infof("Canceling spend notification: spend_id=%d, outpoint=%v", + spendID, op) + + spendSet, ok := n.spendNotifications[op] + if !ok { + return + } + ntfn, ok := spendSet.ntfns[spendID] + if !ok { + return + } + + // We'll close all the notification channels to let the client know + // their cancel request has been fulfilled. + close(ntfn.Event.Spend) + close(ntfn.Event.Reorg) + delete(spendSet.ntfns, spendID) +} + +// ProcessRelevantSpendTx processes a transaction provided externally. This will +// check whether the transaction is relevant to the notifier if it spends any +// outputs for which we currently have registered notifications for. If it is +// relevant, spend notifications will be dispatched to the caller. +func (n *TxNotifier) ProcessRelevantSpendTx(tx *wire.MsgTx, txHeight int32) error { + select { + case <-n.quit: + return ErrTxNotifierExiting + default: + } + + // Ensure we hold the lock throughout handling the notification to + // prevent the notifier from advancing its height underneath us. + n.Lock() + defer n.Unlock() + + // Grab the set of active registered outpoints to determine if the + // transaction spends any of them. + spendNtfns := n.spendNotifications + + // We'll check if this transaction spends an output that has an existing + // spend notification for it. + for i, txIn := range tx.TxIn { + // If this input doesn't spend an existing registered outpoint, + // we'll go on to the next. + prevOut := txIn.PreviousOutPoint + if _, ok := spendNtfns[prevOut]; !ok { + continue + } + + // Otherwise, we'll create a spend summary and send off the + // details to the notification subscribers. + txHash := tx.TxHash() + details := &SpendDetail{ + SpentOutPoint: &prevOut, + SpenderTxHash: &txHash, + SpendingTx: tx, + SpenderInputIndex: uint32(i), + SpendingHeight: txHeight, + } + if err := n.updateSpendDetails(prevOut, details); err != nil { + return err + } + } + + return nil +} + +// UpdateSpendDetails attempts to update the spend details for all active spend +// notification requests for an outpoint. This method should be used once a +// historical scan of the chain has finished. If the historical scan did not +// find a spending transaction for the outpoint, the spend details may be nil. +// +// NOTE: A notification request for the outpoint must be registered first to +// ensure notifications are delivered. +func (n *TxNotifier) UpdateSpendDetails(op wire.OutPoint, + details *SpendDetail) error { + + select { + case <-n.quit: + return ErrTxNotifierExiting + default: + } + + // Ensure we hold the lock throughout handling the notification to + // prevent the notifier from advancing its height underneath us. + n.Lock() + defer n.Unlock() + + return n.updateSpendDetails(op, details) +} + +// updateSpendDetails attempts to update the spend details for all active spend +// notification requests for an outpoint. This method should be used once a +// historical scan of the chain has finished. If the historical scan did not +// find a spending transaction for the outpoint, the spend details may be nil. +// +// NOTE: This method must be called with the TxNotifier's lock held. +func (n *TxNotifier) updateSpendDetails(op wire.OutPoint, + details *SpendDetail) error { + + // Mark the ongoing historical rescan for this outpoint as finished. + // This will allow us to update the spend hints for this outpoint at + // tip. + spendSet, ok := n.spendNotifications[op] + if !ok { + return fmt.Errorf("no notifications found for outpoint %v", op) + } + + // If the spend details have already been found either at tip, then the + // notifications should have already been dispatched, so we can exit + // early to prevent sending duplicate notifications. + if spendSet.details != nil { + return nil + } + + // Since the historical rescan has completed for this outpoint, we'll + // mark its rescan status as complete in order to ensure that the + // TxNotifier can properly update its spend hints upon + // connected/disconnected blocks. + spendSet.rescanStatus = rescanComplete + + // If the historical rescan was not able to find a spending transaction + // for this outpoint, then we can track the spend at tip. + if details == nil { + return nil + } + + // If the historical rescan found the spending transaction for this + // outpoint, but it's at a later height than the notifier (this can + // happen due to latency with the backend during a reorg), then we'll + // defer handling the notification until the notifier has caught up to + // such height. + if uint32(details.SpendingHeight) > n.currentHeight { + return nil + } + + // Now that we've determined the outpoint has been spent, we'll commit + // its spending height as its hint in the cache and dispatch + // notifications to all of its respective clients. + err := n.spendHintCache.CommitSpendHint( + uint32(details.SpendingHeight), op, + ) + if err != nil { + // The error is not fatal as this is an optimistic optimization, + // so we'll avoid returning an error. + Log.Debugf("Unable to update spend hint to %d for %v: %v", + details.SpendingHeight, op, err) + } + + spendSet.details = details + for _, ntfn := range spendSet.ntfns { + err := n.dispatchSpendDetails(ntfn, spendSet.details) + if err != nil { + return err + } + } + + return nil +} + +// dispatchSpendDetails dispatches a spend notification to the client. +// +// NOTE: This must be called with the TxNotifier's lock held. +func (n *TxNotifier) dispatchSpendDetails(ntfn *SpendNtfn, details *SpendDetail) error { + // If there are no spend details to dispatch or if the notification has + // already been dispatched, then we can skip dispatching to this client. + if details == nil || ntfn.dispatched { + return nil + } + + Log.Infof("Dispatching spend notification for outpoint=%v at height=%d", + ntfn.OutPoint, n.currentHeight) + + select { + case ntfn.Event.Spend <- details: + ntfn.dispatched = true + case <-n.quit: + return ErrTxNotifierExiting + } + + return nil +} + // ConnectTip handles a new block extending the current chain. This checks each // transaction in the block to see if any watched transactions are included. // Also, if any watched transactions now have the required number of @@ -890,13 +1184,16 @@ func (n *TxNotifier) TearDown() { for _, confSet := range n.confNotifications { for _, ntfn := range confSet.ntfns { - if ntfn.dispatched { - continue - } - close(ntfn.Event.Confirmed) close(ntfn.Event.Updates) close(ntfn.Event.NegativeConf) } } + + for _, spendSet := range n.spendNotifications { + for _, ntfn := range spendSet.ntfns { + close(ntfn.Event.Spend) + close(ntfn.Event.Reorg) + } + } } From e6b2755a57ed47c6e3ca465b5285ac66e697d8bc Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 11/23] chainntnfs/txnotifier: watch for spends at tip In this commit, we add support to allow the TxNotifier to properly determine whether a new block extending the chain contains a transaction that spends a registered outpoint. In the event that it does, spend notifications will be dispatched to all active registered clients for such outpoint. --- chainntnfs/txnotifier.go | 110 ++++++++++++++++++++++++++++++++------- 1 file changed, 90 insertions(+), 20 deletions(-) diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index a4a0ad19..04fc81a9 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -851,13 +851,22 @@ func (n *TxNotifier) dispatchSpendDetails(ntfn *SpendNtfn, details *SpendDetail) return nil } -// ConnectTip handles a new block extending the current chain. This checks each -// transaction in the block to see if any watched transactions are included. -// Also, if any watched transactions now have the required number of -// confirmations as a result of this block being connected, this dispatches -// notifications. -func (n *TxNotifier) ConnectTip(blockHash *chainhash.Hash, - blockHeight uint32, txns []*btcutil.Tx) error { +// ConnectTip handles a new block extending the current chain. It will go +// through every transaction and determine if it is relevant to any of its +// clients. A transaction can be relevant in either of the following two ways: +// +// 1. One of the inputs in the transaction spends an outpoint for which we +// currently have an active spend registration for. +// +// 2. The transaction is a transaction for which we currently have an active +// confirmation registration for. +// +// In the event that the transaction is relevant, a confirmation/spend +// notification will be dispatched to the relevant clients. Confirmation +// notifications will only be dispatched for transactions that have met the +// required number of confirmations required by the client. +func (n *TxNotifier) ConnectTip(blockHash *chainhash.Hash, blockHeight uint32, + txns []*btcutil.Tx) error { select { case <-n.quit: @@ -876,14 +885,58 @@ func (n *TxNotifier) ConnectTip(blockHash *chainhash.Hash, n.currentHeight++ n.reorgDepth = 0 - // Record any newly confirmed transactions by their confirmed height so - // that notifications get dispatched when the transactions reach their - // required number of confirmations. We'll also watch these transactions - // at the height they were included in the chain so reorgs can be - // handled correctly. + // First, we'll iterate over all the transactions found in this block to + // determine if it includes any relevant transactions to the TxNotifier. for _, tx := range txns { txHash := tx.Hash() + // In order to determine if this transaction is relevant to the + // notifier, we'll check its inputs for any outstanding spend + // notifications. + for i, txIn := range tx.MsgTx().TxIn { + prevOut := txIn.PreviousOutPoint + spendSet, ok := n.spendNotifications[prevOut] + if !ok { + continue + } + + // If we have any, we'll record its spend height so that + // notifications get dispatched to the respective + // clients. + spendDetails := &SpendDetail{ + SpentOutPoint: &prevOut, + SpenderTxHash: txHash, + SpendingTx: tx.MsgTx(), + SpenderInputIndex: uint32(i), + SpendingHeight: int32(blockHeight), + } + + // TODO(wilmer): cancel pending historical rescans if any? + spendSet.rescanStatus = rescanComplete + spendSet.details = spendDetails + for _, ntfn := range spendSet.ntfns { + // In the event that this notification was aware + // that the spending transaction of its outpoint + // was reorged out of the chain, we'll consume + // the reorg notification if it hasn't been + // done yet already. + select { + case <-ntfn.Event.Reorg: + default: + } + } + + // We'll note the outpoints spending height in order to + // correctly handle dispatching notifications when the + // spending transactions gets reorged out of the chain. + opSet, exists := n.opsBySpendHeight[blockHeight] + if !exists { + opSet = make(map[wire.OutPoint]struct{}) + n.opsBySpendHeight[blockHeight] = opSet + } + opSet[prevOut] = struct{}{} + } + // Check if we have any pending notifications for this txid. If // none are found, we can proceed to the next transaction. confSet, ok := n.confNotifications[*txHash] @@ -903,6 +956,7 @@ func (n *TxNotifier) ConnectTip(blockHash *chainhash.Hash, TxIndex: uint32(tx.Index()), } + // TODO(wilmer): cancel pending historical rescans if any? confSet.rescanStatus = rescanComplete confSet.details = details for _, ntfn := range confSet.ntfns { @@ -1031,17 +1085,33 @@ out: return ErrTxNotifierExiting } } - delete(n.ntfnsByConfirmHeight, n.currentHeight) + delete(n.ntfnsByConfirmHeight, blockHeight) - // Clear entries from confNotifications and confTxsByInitialHeight. We - // assume that reorgs deeper than the reorg safety limit do not happen, - // so we can clear out entries for the block that is now mature. - if n.currentHeight >= n.reorgSafetyLimit { - matureBlockHeight := n.currentHeight - n.reorgSafetyLimit - for txHash := range n.txsByInitialHeight[matureBlockHeight] { - delete(n.confNotifications, txHash) + // We'll also dispatch spend notifications for all the outpoints that + // were spent at this new block height. + for op := range n.opsBySpendHeight[blockHeight] { + spendSet := n.spendNotifications[op] + for _, ntfn := range spendSet.ntfns { + err := n.dispatchSpendDetails(ntfn, spendSet.details) + if err != nil { + return err + } + } + } + + // Finally, we'll clear the entries from our set of notifications for + // transactions and outpoints that are no longer under the risk of being + // reorged out of the chain. + if blockHeight >= n.reorgSafetyLimit { + matureBlockHeight := blockHeight - n.reorgSafetyLimit + for tx := range n.txsByInitialHeight[matureBlockHeight] { + delete(n.confNotifications, tx) } delete(n.txsByInitialHeight, matureBlockHeight) + for op := range n.opsBySpendHeight[matureBlockHeight] { + delete(n.spendNotifications, op) + } + delete(n.opsBySpendHeight, matureBlockHeight) } return nil From f27e73fcb88a1d0c55cc5da572a2be425df73aac Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 12/23] chainntnfs/txnotifier: detect reorgs for spending transactions of registered outpoints In this commit, we introduce support to the TxNotifier to detect spending transactions of registered outpoints being reorged out of the chain. In the event that a reorg does occur, we'll consume the Spend notification if it hasn't been consumed yet, and dispatch a Reorg notification instead. --- chainntnfs/txnotifier.go | 66 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 61 insertions(+), 5 deletions(-) diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index 04fc81a9..13c45ff7 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -1118,9 +1118,11 @@ out: } // DisconnectTip handles the tip of the current chain being disconnected during -// a chain reorganization. If any watched transactions were included in this -// block, internal structures are updated to ensure a confirmation notification -// is not sent unless the transaction is included in the new chain. +// a chain reorganization. If any watched transactions or spending transactions +// for registered outpoints were included in this block, internal structures are +// updated to ensure confirmation/spend notifications are consumed (if not +// already), and reorg notifications are dispatched instead. Confirmation/spend +// notifications will be dispatched again upon block inclusion. func (n *TxNotifier) DisconnectTip(blockHeight uint32) error { select { case <-n.quit: @@ -1193,9 +1195,34 @@ func (n *TxNotifier) DisconnectTip(blockHeight uint32) error { } } - // Finally, we can remove the transactions we're currently watching that - // were included in this block height. + // We'll also go through our watched outpoints and attempt to drain + // their dispatched notifications to ensure dispatching notifications to + // clients later on is always non-blocking. We're only interested in + // outpoints whose spending transaction was included at the height being + // disconnected. + for op := range n.opsBySpendHeight[blockHeight] { + // Since the spending transaction is being reorged out of the + // chain, we'll need to clear out the spending details of the + // outpoint. + spendSet := n.spendNotifications[op] + spendSet.details = nil + + // For all requests which have had a spend notification + // dispatched, we'll attempt to drain it and send a reorg + // notification instead. + for _, ntfn := range spendSet.ntfns { + if err := n.dispatchSpendReorg(ntfn); err != nil { + return err + } + } + } + + // Finally, we can remove the transactions that were confirmed and the + // outpoints that were spent at the height being disconnected. We'll + // still continue to track them until they have been confirmed/spent and + // are no longer under the risk of being reorged out of the chain again. delete(n.txsByInitialHeight, blockHeight) + delete(n.opsBySpendHeight, blockHeight) return nil } @@ -1243,6 +1270,35 @@ func (n *TxNotifier) dispatchConfReorg(ntfn *ConfNtfn, return nil } +// dispatchSpendReorg dispatches a reorg notification to the client if a spend +// notiification was already delivered. +// +// NOTE: This must be called with the TxNotifier's lock held. +func (n *TxNotifier) dispatchSpendReorg(ntfn *SpendNtfn) error { + if !ntfn.dispatched { + return nil + } + + // Attempt to drain the spend notification to ensure sends to the Spend + // channel are always non-blocking. + select { + case <-ntfn.Event.Spend: + default: + } + + // Send a reorg notification to the client in order for them to + // correctly handle reorgs. + select { + case ntfn.Event.Reorg <- struct{}{}: + case <-n.quit: + return ErrTxNotifierExiting + } + + ntfn.dispatched = false + + return nil +} + // TearDown is to be called when the owner of the TxNotifier is exiting. This // closes the event channels of all registered notifications that have not been // dispatched yet. From 2935392f51fca2a70054367c1d36e1a0559722a0 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 13/23] chainntnfs/txnotifier: correctly update confirm/spend hints on chain updates In this commit, we address an issue w.r.t. updating the confirm hints for transactions and spend hints for outpoints on chain updates. Previously, upon a block being disconnected, we'd attempt to commit a new height hint for all outstanding confirmation notifications. This is not correct because we'll end up modifying the height hint for things that have confirmed at a previous height than the one being disconnected. This would cause issues on restart when attempting a historical dispatch, as we would start scanning at a height above which the transaction actually confirmed in. This has been addressed by only updating the hints for outstanding notifications that are still unconfirmed/unspent and for notifications that were confirmed/spent within the block being connected/disconnected. --- chainntnfs/txnotifier.go | 158 ++++++++++++++++++++++++--------------- 1 file changed, 98 insertions(+), 60 deletions(-) diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index 13c45ff7..50a243b1 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -992,54 +992,11 @@ func (n *TxNotifier) ConnectTip(blockHash *chainhash.Hash, blockHeight uint32, } } - // In order to update the height hint for all the required transactions - // under one database transaction, we'll gather the set of unconfirmed - // transactions along with the ones that confirmed at the current - // height. To do so, we'll iterate over the confNotifications map, which - // contains the transactions we currently have notifications for. Since - // this map doesn't tell us whether the transaction has confirmed or - // not, we'll need to look at txsByInitialHeight to determine so. - var txsToUpdateHints []chainhash.Hash - for confirmedTx := range n.txsByInitialHeight[n.currentHeight] { - txsToUpdateHints = append(txsToUpdateHints, confirmedTx) - } -out: - for maybeUnconfirmedTx, confSet := range n.confNotifications { - // We shouldn't update the confirm hints if we still have a - // pending rescan in progress. We'll skip writing any for - // notification sets that haven't reached rescanComplete. - if confSet.rescanStatus != rescanComplete { - continue - } - - for height, confirmedTxs := range n.txsByInitialHeight { - // Skip the transactions that confirmed at the new block - // height as those have already been added. - if height == blockHeight { - continue - } - - // If the transaction was found within the set of - // confirmed transactions at this height, we'll skip it. - if _, ok := confirmedTxs[maybeUnconfirmedTx]; ok { - continue out - } - } - txsToUpdateHints = append(txsToUpdateHints, maybeUnconfirmedTx) - } - - if len(txsToUpdateHints) > 0 { - err := n.confirmHintCache.CommitConfirmHint( - n.currentHeight, txsToUpdateHints..., - ) - if err != nil { - // The error is not fatal, so we should not return an - // error to the caller. - Log.Errorf("Unable to update confirm hint to %d for "+ - "%v: %v", n.currentHeight, txsToUpdateHints, - err) - } - } + // Now that we've determined which transactions were confirmed and which + // outpoints were spent within the new block, we can update their + // entries in their respective caches, along with all of our unconfirmed + // transactions and unspent outpoints. + n.updateHints(blockHeight) // Next, we'll dispatch an update to all of the notification clients for // our watched transactions with the number of confirmations left at @@ -1141,18 +1098,10 @@ func (n *TxNotifier) DisconnectTip(blockHeight uint32) error { n.currentHeight-- n.reorgDepth++ - // Rewind the height hint for all watched transactions. - var txs []chainhash.Hash - for tx := range n.confNotifications { - txs = append(txs, tx) - } - - err := n.confirmHintCache.CommitConfirmHint(n.currentHeight, txs...) - if err != nil { - Log.Errorf("Unable to update confirm hint to %d for %v: %v", - n.currentHeight, txs, err) - return err - } + // With the block disconnected, we'll update the confirm and spend hints + // for our transactions and outpoints to reflect the new height, except + // for those that have confirmed/spent at previous heights. + n.updateHints(blockHeight) // We'll go through all of our watched transactions and attempt to drain // their notification channels to ensure sending notifications to the @@ -1227,6 +1176,95 @@ func (n *TxNotifier) DisconnectTip(blockHeight uint32) error { return nil } +// updateHints attempts to update the confirm and spend hints for all relevant +// transactions and outpoints respectively. The height parameter is used to +// determine which transactions and outpoints we should update based on whether +// a new block is being connected/disconnected. +// +// NOTE: This must be called with the TxNotifier's lock held and after its +// height has already been reflected by a block being connected/disconnected. +func (n *TxNotifier) updateHints(height uint32) { + // TODO(wilmer): update under one database transaction. + // + // To update the height hint for all the required transactions under one + // database transaction, we'll gather the set of unconfirmed + // transactions along with the ones that confirmed at the height being + // connected/disconnected. + txsToUpdateHints := n.unconfirmedTxs() + for confirmedTx := range n.txsByInitialHeight[height] { + txsToUpdateHints = append(txsToUpdateHints, confirmedTx) + } + err := n.confirmHintCache.CommitConfirmHint( + n.currentHeight, txsToUpdateHints..., + ) + if err != nil { + // The error is not fatal as this is an optimistic optimization, + // so we'll avoid returning an error. + Log.Debugf("Unable to update confirm hints to %d for "+ + "%v: %v", n.currentHeight, txsToUpdateHints, err) + } + + // Similarly, to update the height hint for all the required outpoints + // under one database transaction, we'll gather the set of unspent + // outpoints along with the ones that were spent at the height being + // connected/disconnected. + opsToUpdateHints := n.unspentOutPoints() + for spentOp := range n.opsBySpendHeight[height] { + opsToUpdateHints = append(opsToUpdateHints, spentOp) + } + err = n.spendHintCache.CommitSpendHint( + n.currentHeight, opsToUpdateHints..., + ) + if err != nil { + // The error is not fatal as this is an optimistic optimization, + // so we'll avoid returning an error. + Log.Debugf("Unable to update spend hints to %d for "+ + "%v: %v", n.currentHeight, opsToUpdateHints, err) + } +} + +// unconfirmedTxs returns the set of transactions that are still seen as +// unconfirmed by the TxNotifier. +// +// NOTE: This method must be called with the TxNotifier's lock held. +func (n *TxNotifier) unconfirmedTxs() []chainhash.Hash { + var unconfirmedTxs []chainhash.Hash + for tx, confNtfnSet := range n.confNotifications { + // If the notification is already aware of its confirmation + // details, or it's in the process of learning them, we'll skip + // it as we can't yet determine if it's confirmed or not. + if confNtfnSet.rescanStatus != rescanComplete || + confNtfnSet.details != nil { + continue + } + + unconfirmedTxs = append(unconfirmedTxs, tx) + } + + return unconfirmedTxs +} + +// unspentOutPoints returns the set of outpoints that are still seen as unspent +// by the TxNotifier. +// +// NOTE: This method must be called with the TxNotifier's lock held. +func (n *TxNotifier) unspentOutPoints() []wire.OutPoint { + var unspentOps []wire.OutPoint + for op, spendNtfnSet := range n.spendNotifications { + // If the notification is already aware of its spend details, or + // it's in the process of learning them, we'll skip it as we + // can't yet determine if it's unspent or not. + if spendNtfnSet.rescanStatus != rescanComplete || + spendNtfnSet.details != nil { + continue + } + + unspentOps = append(unspentOps, op) + } + + return unspentOps +} + // dispatchConfReorg dispatches a reorg notification to the client if the // confirmation notification was already delivered. // From 1fe3d598365528901515d2a6bac64f51953dcfe1 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 14/23] chainntnfs/txnotifier_test: extend tests to handle spend notifications --- chainntnfs/txnotifier_test.go | 1135 +++++++++++++++++++++++++++++---- 1 file changed, 1013 insertions(+), 122 deletions(-) diff --git a/chainntnfs/txnotifier_test.go b/chainntnfs/txnotifier_test.go index 6675709b..98a62f2f 100644 --- a/chainntnfs/txnotifier_test.go +++ b/chainntnfs/txnotifier_test.go @@ -10,7 +10,10 @@ import ( "github.com/lightningnetwork/lnd/chainntnfs" ) -var zeroHash chainhash.Hash +var ( + zeroHash chainhash.Hash + zeroOutPoint wire.OutPoint +) type mockHintCache struct { mu sync.Mutex @@ -96,9 +99,9 @@ func newMockHintCache() *mockHintCache { } } -// TestTxConfFutureDispatch tests that the TxNotifier dispatches registered -// notifications when the transaction confirms after registration. -func TestTxConfFutureDispatch(t *testing.T) { +// TestTxNotifierFutureConfDispatch tests that the TxNotifier dispatches +// registered notifications when a transaction confirms after registration. +func TestTxNotifierFutureConfDispatch(t *testing.T) { t.Parallel() const ( @@ -113,7 +116,7 @@ func TestTxConfFutureDispatch(t *testing.T) { ) hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier(10, 100, hintCache) + n := chainntnfs.NewTxNotifier(10, 100, hintCache, hintCache) // Create the test transactions and register them with the TxNotifier // before including them in a block to receive future @@ -192,7 +195,7 @@ func TestTxConfFutureDispatch(t *testing.T) { BlockHeight: 11, TxIndex: 0, } - assertEqualTxConf(t, txConf, &expectedConf) + assertConfDetails(t, txConf, &expectedConf) default: t.Fatalf("Expected confirmation for tx1") } @@ -263,15 +266,16 @@ func TestTxConfFutureDispatch(t *testing.T) { BlockHeight: 11, TxIndex: 1, } - assertEqualTxConf(t, txConf, &expectedConf) + assertConfDetails(t, txConf, &expectedConf) default: t.Fatalf("Expected confirmation for tx2") } } -// TestTxConfHistoricalDispatch tests that the TxNotifier dispatches registered -// notifications when the transaction is confirmed before registration. -func TestTxConfHistoricalDispatch(t *testing.T) { +// TestTxNotifierHistoricalConfDispatch tests that the TxNotifier dispatches +// registered notifications when the transaction is confirmed before +// registration. +func TestTxNotifierHistoricalConfDispatch(t *testing.T) { t.Parallel() const ( @@ -286,7 +290,7 @@ func TestTxConfHistoricalDispatch(t *testing.T) { ) hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier(10, 100, hintCache) + n := chainntnfs.NewTxNotifier(10, 100, hintCache, hintCache) // Create the test transactions at a height before the TxNotifier's // starting height so that they are confirmed once registering them. @@ -338,7 +342,7 @@ func TestTxConfHistoricalDispatch(t *testing.T) { // A confirmation notification for tx1 should also be dispatched. select { case txConf := <-ntfn1.Event.Confirmed: - assertEqualTxConf(t, txConf, &txConf1) + assertConfDetails(t, txConf, &txConf1) default: t.Fatalf("Expected confirmation for tx1") } @@ -413,16 +417,547 @@ func TestTxConfHistoricalDispatch(t *testing.T) { // its required number of confirmations. select { case txConf := <-ntfn2.Event.Confirmed: - assertEqualTxConf(t, txConf, &txConf2) + assertConfDetails(t, txConf, &txConf2) default: t.Fatalf("Expected confirmation for tx2") } } -// TestTxConfChainReorg tests that TxNotifier dispatches Confirmed and -// NegativeConf notifications appropriately when there is a chain -// reorganization. -func TestTxConfChainReorg(t *testing.T) { +// TestTxNotifierFutureSpendDispatch tests that the TxNotifier dispatches +// registered notifications when an outpoint is spent after registration. +func TestTxNotifierFutureSpendDispatch(t *testing.T) { + t.Parallel() + + hintCache := newMockHintCache() + n := chainntnfs.NewTxNotifier(10, 100, hintCache, hintCache) + + // We'll start off by registering for a spend notification of an + // outpoint. + ntfn := &chainntnfs.SpendNtfn{ + OutPoint: zeroOutPoint, + Event: chainntnfs.NewSpendEvent(nil), + } + if _, err := n.RegisterSpend(ntfn); err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + + // We should not receive a notification as the outpoint has not been + // spent yet. + select { + case <-ntfn.Event.Spend: + t.Fatal("received unexpected spend notification") + default: + } + + // Construct the details of the spending transaction of the outpoint + // above. We'll include it in the next block, which should trigger a + // spend notification. + spendTx := wire.NewMsgTx(2) + spendTx.AddTxIn(&wire.TxIn{PreviousOutPoint: zeroOutPoint}) + spendTxHash := spendTx.TxHash() + block := btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{spendTx}, + }) + err := n.ConnectTip(block.Hash(), 11, block.Transactions()) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + + expectedSpendDetails := &chainntnfs.SpendDetail{ + SpentOutPoint: &ntfn.OutPoint, + SpenderTxHash: &spendTxHash, + SpendingTx: spendTx, + SpenderInputIndex: 0, + SpendingHeight: 11, + } + + // Ensure that the details of the notification match as expected. + select { + case spendDetails := <-ntfn.Event.Spend: + assertSpendDetails(t, spendDetails, expectedSpendDetails) + default: + t.Fatal("expected to receive spend details") + } + + // Finally, we'll ensure that if the spending transaction has also been + // spent, then we don't receive another spend notification. + prevOut := wire.OutPoint{Hash: spendTxHash, Index: 0} + spendOfSpend := wire.NewMsgTx(2) + spendOfSpend.AddTxIn(&wire.TxIn{PreviousOutPoint: prevOut}) + block = btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{spendOfSpend}, + }) + err = n.ConnectTip(block.Hash(), 12, block.Transactions()) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + + select { + case <-ntfn.Event.Spend: + t.Fatal("received unexpected spend notification") + default: + } +} + +// TestTxNotifierHistoricalSpendDispatch tests that the TxNotifier dispatches +// registered notifications when an outpoint is spent before registration. +func TestTxNotifierHistoricalSpendDispatch(t *testing.T) { + t.Parallel() + + const startingHeight = 10 + + hintCache := newMockHintCache() + n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache) + + // We'll start by constructing the spending details of the outpoint + // below. + spentOutpoint := zeroOutPoint + spendTx := wire.NewMsgTx(2) + spendTx.AddTxIn(&wire.TxIn{PreviousOutPoint: zeroOutPoint}) + spendTxHash := spendTx.TxHash() + + expectedSpendDetails := &chainntnfs.SpendDetail{ + SpentOutPoint: &spentOutpoint, + SpenderTxHash: &spendTxHash, + SpendingTx: spendTx, + SpenderInputIndex: 0, + SpendingHeight: startingHeight - 1, + } + + // We'll register for a spend notification of the outpoint and ensure + // that a notification isn't dispatched. + ntfn := &chainntnfs.SpendNtfn{ + OutPoint: spentOutpoint, + Event: chainntnfs.NewSpendEvent(nil), + } + if _, err := n.RegisterSpend(ntfn); err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + + select { + case <-ntfn.Event.Spend: + t.Fatal("received unexpected spend notification") + default: + } + + // Because we're interested in testing the case of a historical spend, + // we'll hand off the spending details of the outpoint to the notifier + // as it is not possible for it to view historical events in the chain. + // By doing this, we replicate the functionality of the ChainNotifier. + err := n.UpdateSpendDetails(ntfn.OutPoint, expectedSpendDetails) + if err != nil { + t.Fatalf("unable to update spend details: %v", err) + } + + // Now that we have the spending details, we should receive a spend + // notification. We'll ensure that the details match as intended. + select { + case spendDetails := <-ntfn.Event.Spend: + assertSpendDetails(t, spendDetails, expectedSpendDetails) + default: + t.Fatalf("expected to receive spend details") + } + + // Finally, we'll ensure that if the spending transaction has also been + // spent, then we don't receive another spend notification. + prevOut := wire.OutPoint{Hash: spendTxHash, Index: 0} + spendOfSpend := wire.NewMsgTx(2) + spendOfSpend.AddTxIn(&wire.TxIn{PreviousOutPoint: prevOut}) + block := btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{spendOfSpend}, + }) + err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions()) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + + select { + case <-ntfn.Event.Spend: + t.Fatal("received unexpected spend notification") + default: + } +} + +// TestTxNotifierMultipleHistoricalRescans ensures that we don't attempt to +// request multiple historical confirmation rescans per transactions. +func TestTxNotifierMultipleHistoricalConfRescans(t *testing.T) { + t.Parallel() + + const startingHeight = 10 + hintCache := newMockHintCache() + n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache) + + // The first registration for a transaction in the notifier should + // request a historical confirmation rescan as it does not have a + // historical view of the chain. + confNtfn1 := &chainntnfs.ConfNtfn{ + ConfID: 0, + TxID: &zeroHash, + Event: chainntnfs.NewConfirmationEvent(1), + } + historicalConfDispatch1, err := n.RegisterConf(confNtfn1) + if err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + if historicalConfDispatch1 == nil { + t.Fatal("expected to receive historical dispatch request") + } + + // We'll register another confirmation notification for the same + // transaction. This should not request a historical confirmation rescan + // since the first one is still pending. + confNtfn2 := &chainntnfs.ConfNtfn{ + ConfID: 1, + TxID: &zeroHash, + Event: chainntnfs.NewConfirmationEvent(1), + } + historicalConfDispatch2, err := n.RegisterConf(confNtfn2) + if err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + if historicalConfDispatch2 != nil { + t.Fatal("received unexpected historical rescan request") + } + + // Finally, we'll mark the ongoing historical rescan as complete and + // register another notification. We should also expect not to see a + // historical rescan request since the confirmation details should be + // cached. + confDetails := &chainntnfs.TxConfirmation{ + BlockHeight: startingHeight - 1, + } + if err := n.UpdateConfDetails(*confNtfn2.TxID, confDetails); err != nil { + t.Fatalf("unable to update conf details: %v", err) + } + + confNtfn3 := &chainntnfs.ConfNtfn{ + ConfID: 2, + TxID: &zeroHash, + Event: chainntnfs.NewConfirmationEvent(1), + } + historicalConfDispatch3, err := n.RegisterConf(confNtfn3) + if err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + if historicalConfDispatch3 != nil { + t.Fatal("received unexpected historical rescan request") + } +} + +// TestTxNotifierMultipleHistoricalRescans ensures that we don't attempt to +// request multiple historical spend rescans per outpoints. +func TestTxNotifierMultipleHistoricalSpendRescans(t *testing.T) { + t.Parallel() + + const startingHeight = 10 + hintCache := newMockHintCache() + n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache) + + // The first registration for an outpoint in the notifier should request + // a historical spend rescan as it does not have a historical view of + // the chain. + ntfn1 := &chainntnfs.SpendNtfn{ + SpendID: 0, + OutPoint: zeroOutPoint, + Event: chainntnfs.NewSpendEvent(nil), + } + historicalDispatch1, err := n.RegisterSpend(ntfn1) + if err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + if historicalDispatch1 == nil { + t.Fatal("expected to receive historical dispatch request") + } + + // We'll register another spend notification for the same outpoint. This + // should not request a historical spend rescan since the first one is + // still pending. + ntfn2 := &chainntnfs.SpendNtfn{ + SpendID: 1, + OutPoint: zeroOutPoint, + Event: chainntnfs.NewSpendEvent(nil), + } + historicalDispatch2, err := n.RegisterSpend(ntfn2) + if err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + if historicalDispatch2 != nil { + t.Fatal("received unexpected historical rescan request") + } + + // Finally, we'll mark the ongoing historical rescan as complete and + // register another notification. We should also expect not to see a + // historical rescan request since the confirmation details should be + // cached. + spendDetails := &chainntnfs.SpendDetail{ + SpentOutPoint: &ntfn2.OutPoint, + SpenderTxHash: &zeroHash, + SpendingTx: wire.NewMsgTx(2), + SpenderInputIndex: 0, + SpendingHeight: startingHeight - 1, + } + err = n.UpdateSpendDetails(ntfn2.OutPoint, spendDetails) + if err != nil { + t.Fatalf("unable to update spend details: %v", err) + } + + ntfn3 := &chainntnfs.SpendNtfn{ + SpendID: 2, + OutPoint: zeroOutPoint, + Event: chainntnfs.NewSpendEvent(nil), + } + historicalDispatch3, err := n.RegisterSpend(ntfn3) + if err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + if historicalDispatch3 != nil { + t.Fatal("received unexpected historical rescan request") + } +} + +// TestTxNotifierMultipleHistoricalNtfns ensures that the TxNotifier will only +// request one rescan for a transaction/outpoint when having multiple client +// registrations. Once the rescan has completed and retrieved the +// confirmation/spend details, a notification should be dispatched to _all_ +// clients. +func TestTxNotifierMultipleHistoricalNtfns(t *testing.T) { + t.Parallel() + + const ( + numNtfns = 5 + startingHeight = 10 + ) + + hintCache := newMockHintCache() + n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache) + + // We'll start off by registered 5 clients for a confirmation + // notification on the same transaction. + confNtfns := make([]*chainntnfs.ConfNtfn, numNtfns) + for i := uint64(0); i < numNtfns; i++ { + confNtfns[i] = &chainntnfs.ConfNtfn{ + ConfID: i, + TxID: &zeroHash, + Event: chainntnfs.NewConfirmationEvent(1), + } + if _, err := n.RegisterConf(confNtfns[i]); err != nil { + t.Fatalf("unable to register conf ntfn #%d: %v", i, err) + } + } + + // Ensure none of them have received the confirmation details. + for i, ntfn := range confNtfns { + select { + case <-ntfn.Event.Confirmed: + t.Fatalf("request #%d received unexpected confirmation "+ + "notification", i) + default: + } + } + + // We'll assume a historical rescan was dispatched and found the + // following confirmation details. We'll let the notifier know so that + // it can stop watching at tip. + expectedConfDetails := &chainntnfs.TxConfirmation{ + BlockHeight: startingHeight - 1, + } + err := n.UpdateConfDetails(*confNtfns[0].TxID, expectedConfDetails) + if err != nil { + t.Fatalf("unable to update conf details: %v", err) + } + + // With the confirmation details retrieved, each client should now have + // been notified of the confirmation. + for i, ntfn := range confNtfns { + select { + case confDetails := <-ntfn.Event.Confirmed: + assertConfDetails(t, confDetails, expectedConfDetails) + default: + t.Fatalf("request #%d expected to received "+ + "confirmation notification", i) + } + } + + // In order to ensure that the confirmation details are properly cached, + // we'll register another client for the same transaction. We should not + // see a historical rescan request and the confirmation notification + // should come through immediately. + extraConfNtfn := &chainntnfs.ConfNtfn{ + ConfID: numNtfns + 1, + TxID: &zeroHash, + Event: chainntnfs.NewConfirmationEvent(1), + } + historicalConfRescan, err := n.RegisterConf(extraConfNtfn) + if err != nil { + t.Fatalf("unable to register conf ntfn: %v", err) + } + if historicalConfRescan != nil { + t.Fatal("received unexpected historical rescan request") + } + + select { + case confDetails := <-extraConfNtfn.Event.Confirmed: + assertConfDetails(t, confDetails, expectedConfDetails) + default: + t.Fatal("expected to receive spend notification") + } + + // Similarly, we'll do the same thing but for spend notifications. + spendNtfns := make([]*chainntnfs.SpendNtfn, numNtfns) + for i := uint64(0); i < numNtfns; i++ { + spendNtfns[i] = &chainntnfs.SpendNtfn{ + SpendID: i, + OutPoint: zeroOutPoint, + Event: chainntnfs.NewSpendEvent(nil), + } + if _, err := n.RegisterSpend(spendNtfns[i]); err != nil { + t.Fatalf("unable to register spend ntfn #%d: %v", i, err) + } + } + + // Ensure none of them have received the spend details. + for i, ntfn := range spendNtfns { + select { + case <-ntfn.Event.Spend: + t.Fatalf("request #%d received unexpected spend "+ + "notification", i) + default: + } + } + + // We'll assume a historical rescan was dispatched and found the + // following spend details. We'll let the notifier know so that it can + // stop watching at tip. + expectedSpendDetails := &chainntnfs.SpendDetail{ + SpentOutPoint: &spendNtfns[0].OutPoint, + SpenderTxHash: &zeroHash, + SpendingTx: wire.NewMsgTx(2), + SpenderInputIndex: 0, + SpendingHeight: startingHeight - 1, + } + err = n.UpdateSpendDetails(spendNtfns[0].OutPoint, expectedSpendDetails) + if err != nil { + t.Fatalf("unable to update spend details: %v", err) + } + + // With the spend details retrieved, each client should now have been + // notified of the spend. + for i, ntfn := range spendNtfns { + select { + case spendDetails := <-ntfn.Event.Spend: + assertSpendDetails(t, spendDetails, expectedSpendDetails) + default: + t.Fatalf("request #%d expected to received spend "+ + "notification", i) + } + } + + // Finally, in order to ensure that the spend details are properly + // cached, we'll register another client for the same outpoint. We + // should not see a historical rescan request and the spend notification + // should come through immediately. + extraSpendNtfn := &chainntnfs.SpendNtfn{ + SpendID: numNtfns + 1, + OutPoint: zeroOutPoint, + Event: chainntnfs.NewSpendEvent(nil), + } + historicalSpendRescan, err := n.RegisterSpend(extraSpendNtfn) + if err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + if historicalSpendRescan != nil { + t.Fatal("received unexpected historical rescan request") + } + + select { + case spendDetails := <-extraSpendNtfn.Event.Spend: + assertSpendDetails(t, spendDetails, expectedSpendDetails) + default: + t.Fatal("expected to receive spend notification") + } +} + +// TestTxNotifierCancelSpend ensures that a spend notification after a client +// has canceled their intent to receive one. +func TestTxNotifierCancelSpend(t *testing.T) { + t.Parallel() + + const startingHeight = 10 + hintCache := newMockHintCache() + n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache) + + // We'll register two notification requests. Only the second one will be + // canceled. + ntfn1 := &chainntnfs.SpendNtfn{ + SpendID: 0, + OutPoint: zeroOutPoint, + Event: chainntnfs.NewSpendEvent(nil), + } + if _, err := n.RegisterSpend(ntfn1); err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + + ntfn2 := &chainntnfs.SpendNtfn{ + SpendID: 1, + OutPoint: zeroOutPoint, + Event: chainntnfs.NewSpendEvent(nil), + } + if _, err := n.RegisterSpend(ntfn2); err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + + // Construct the spending details of the outpoint and create a dummy + // block containing it. + spendTx := wire.NewMsgTx(2) + spendTx.AddTxIn(&wire.TxIn{PreviousOutPoint: ntfn1.OutPoint}) + spendTxHash := spendTx.TxHash() + expectedSpendDetails := &chainntnfs.SpendDetail{ + SpentOutPoint: &ntfn1.OutPoint, + SpenderTxHash: &spendTxHash, + SpendingTx: spendTx, + SpenderInputIndex: 0, + SpendingHeight: startingHeight + 1, + } + + block := btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{spendTx}, + }) + + // Before extending the notifier's tip with the dummy block above, we'll + // cancel the second request. + n.CancelSpend(ntfn2.OutPoint, ntfn2.SpendID) + + err := n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions()) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + + // The first request should still be active, so we should receive a + // spend notification with the correct spending details. + select { + case spendDetails := <-ntfn1.Event.Spend: + assertSpendDetails(t, spendDetails, expectedSpendDetails) + default: + t.Fatalf("expected to receive spend notification") + } + + // The second one, however, should not have. The event's Spend channel + // must have also been closed to indicate the caller that the TxNotifier + // can no longer fulfill their canceled request. + select { + case _, ok := <-ntfn2.Event.Spend: + if ok { + t.Fatal("expected Spend channel to be closed") + } + default: + t.Fatal("expected Spend channel to be closed") + } +} + +// TestTxNotifierConfReorg ensures that clients are notified of a reorg when a +// transaction for which they registered a confirmation notification has been +// reorged out of the chain. +func TestTxNotifierConfReorg(t *testing.T) { t.Parallel() const ( @@ -438,7 +973,7 @@ func TestTxConfChainReorg(t *testing.T) { ) hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier(7, 100, hintCache) + n := chainntnfs.NewTxNotifier(7, 100, hintCache, hintCache) // Tx 1 will be confirmed in block 9 and requires 2 confs. tx1Hash := tx1.TxHash() @@ -649,7 +1184,7 @@ func TestTxConfChainReorg(t *testing.T) { BlockHeight: 12, TxIndex: 0, } - assertEqualTxConf(t, txConf, &expectedConf) + assertConfDetails(t, txConf, &expectedConf) default: t.Fatalf("Expected confirmation for tx2") } @@ -679,18 +1214,221 @@ func TestTxConfChainReorg(t *testing.T) { BlockHeight: 12, TxIndex: 1, } - assertEqualTxConf(t, txConf, &expectedConf) + assertConfDetails(t, txConf, &expectedConf) default: t.Fatalf("Expected confirmation for tx3") } } -// TestTxConfHeightHintCache ensures that the height hints for transactions are -// kept track of correctly with each new block connected/disconnected. This test -// also asserts that the height hints are not updated until the simulated +// TestTxNotifierSpendReorg ensures that clients are notified of a reorg when +// the spending transaction of an outpoint for which they registered a spend +// notification for has been reorged out of the chain. +func TestTxNotifierSpendReorg(t *testing.T) { + t.Parallel() + + const startingHeight = 10 + hintCache := newMockHintCache() + n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache) + + // We'll have two outpoints that will be spent throughout the test. The + // first will be spent and will not experience a reorg, while the second + // one will. + op1 := zeroOutPoint + op1.Index = 1 + spendTx1 := wire.NewMsgTx(2) + spendTx1.AddTxIn(&wire.TxIn{PreviousOutPoint: op1}) + spendTxHash1 := spendTx1.TxHash() + expectedSpendDetails1 := &chainntnfs.SpendDetail{ + SpentOutPoint: &op1, + SpenderTxHash: &spendTxHash1, + SpendingTx: spendTx1, + SpenderInputIndex: 0, + SpendingHeight: startingHeight + 1, + } + + op2 := zeroOutPoint + op2.Index = 2 + spendTx2 := wire.NewMsgTx(2) + spendTx2.AddTxIn(&wire.TxIn{PreviousOutPoint: zeroOutPoint}) + spendTx2.AddTxIn(&wire.TxIn{PreviousOutPoint: op2}) + spendTxHash2 := spendTx2.TxHash() + + // The second outpoint will experience a reorg and get re-spent at a + // different height, so we'll need to construct the spend details for + // before and after the reorg. + expectedSpendDetails2BeforeReorg := chainntnfs.SpendDetail{ + SpentOutPoint: &op2, + SpenderTxHash: &spendTxHash2, + SpendingTx: spendTx2, + SpenderInputIndex: 1, + SpendingHeight: startingHeight + 2, + } + + // The spend details after the reorg will be exactly the same, except + // for the spend confirming at the next height. + expectedSpendDetails2AfterReorg := expectedSpendDetails2BeforeReorg + expectedSpendDetails2AfterReorg.SpendingHeight++ + + // We'll register for a spend notification for each outpoint above. + ntfn1 := &chainntnfs.SpendNtfn{ + SpendID: 78, + OutPoint: op1, + Event: chainntnfs.NewSpendEvent(nil), + } + if _, err := n.RegisterSpend(ntfn1); err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + + ntfn2 := &chainntnfs.SpendNtfn{ + SpendID: 21, + OutPoint: op2, + Event: chainntnfs.NewSpendEvent(nil), + } + if _, err := n.RegisterSpend(ntfn2); err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } + + // We'll extend the chain by connecting a new block at tip. This block + // will only contain the spending transaction of the first outpoint. + block1 := btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{spendTx1}, + }) + err := n.ConnectTip( + block1.Hash(), startingHeight+1, block1.Transactions(), + ) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + + // We should receive a spend notification for the first outpoint with + // its correct spending details. + select { + case spendDetails := <-ntfn1.Event.Spend: + assertSpendDetails(t, spendDetails, expectedSpendDetails1) + default: + t.Fatal("expected to receive spend details") + } + + // We should not, however, receive one for the second outpoint as it has + // yet to be spent. + select { + case <-ntfn2.Event.Spend: + t.Fatal("received unexpected spend notification") + default: + } + + // Now, we'll extend the chain again, this time with a block containing + // the spending transaction of the second outpoint. + block2 := btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{spendTx2}, + }) + err = n.ConnectTip( + block2.Hash(), startingHeight+2, block2.Transactions(), + ) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + + // We should not receive another spend notification for the first + // outpoint. + select { + case <-ntfn1.Event.Spend: + t.Fatal("received unexpected spend notification") + default: + } + + // We should receive one for the second outpoint. + select { + case spendDetails := <-ntfn2.Event.Spend: + assertSpendDetails( + t, spendDetails, &expectedSpendDetails2BeforeReorg, + ) + default: + t.Fatal("expected to receive spend details") + } + + // Now, to replicate a chain reorg, we'll disconnect the block that + // contained the spending transaction of the second outpoint. + if err := n.DisconnectTip(startingHeight + 2); err != nil { + t.Fatalf("unable to disconnect block: %v", err) + } + + // No notifications should be dispatched for the first outpoint as it + // was spent at a previous height. + select { + case <-ntfn1.Event.Spend: + t.Fatal("received unexpected spend notification") + case <-ntfn1.Event.Reorg: + t.Fatal("received unexpected spend reorg notification") + default: + } + + // We should receive a reorg notification for the second outpoint. + select { + case <-ntfn2.Event.Spend: + t.Fatal("received unexpected spend notification") + case <-ntfn2.Event.Reorg: + default: + t.Fatal("expected spend reorg notification") + } + + // We'll now extend the chain with an empty block, to ensure that we can + // properly detect when an outpoint has been re-spent at a later height. + emptyBlock := btcutil.NewBlock(&wire.MsgBlock{}) + err = n.ConnectTip( + emptyBlock.Hash(), startingHeight+2, emptyBlock.Transactions(), + ) + if err != nil { + t.Fatalf("unable to disconnect block: %v", err) + } + + // We shouldn't receive notifications for either of the outpoints. + select { + case <-ntfn1.Event.Spend: + t.Fatal("received unexpected spend notification") + case <-ntfn1.Event.Reorg: + t.Fatal("received unexpected spend reorg notification") + case <-ntfn2.Event.Spend: + t.Fatal("received unexpected spend notification") + case <-ntfn2.Event.Reorg: + t.Fatal("received unexpected spend reorg notification") + default: + } + + // Finally, extend the chain with another block containing the same + // spending transaction of the second outpoint. + err = n.ConnectTip( + block2.Hash(), startingHeight+3, block2.Transactions(), + ) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + + // We should now receive a spend notification once again for the second + // outpoint containing the new spend details. + select { + case spendDetails := <-ntfn2.Event.Spend: + assertSpendDetails( + t, spendDetails, &expectedSpendDetails2AfterReorg, + ) + default: + t.Fatalf("expected to receive spend notification") + } + + // Once again, we should not receive one for the first outpoint. + select { + case <-ntfn1.Event.Spend: + t.Fatal("received unexpected spend notification") + default: + } +} + +// TestTxNotifierConfirmHintCache ensures that the height hints for transactions +// are kept track of correctly with each new block connected/disconnected. This +// test also asserts that the height hints are not updated until the simulated // historical dispatches have returned, and we know the transactions aren't // already in the chain. -func TestTxConfHeightHintCache(t *testing.T) { +func TestTxNotifierConfirmHintCache(t *testing.T) { t.Parallel() const ( @@ -702,9 +1440,7 @@ func TestTxConfHeightHintCache(t *testing.T) { // Initialize our TxNotifier instance backed by a height hint cache. hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier( - startingHeight, 100, hintCache, - ) + n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache) // Create two test transactions and register them for notifications. tx1 := wire.MsgTx{Version: 1} @@ -884,129 +1620,284 @@ func TestTxConfHeightHintCache(t *testing.T) { } } -func TestTxConfTearDown(t *testing.T) { +// TestTxNotifierSpendHintCache ensures that the height hints for outpoints are +// kept track of correctly with each new block connected/disconnected. This test +// also asserts that the height hints are not updated until the simulated +// historical dispatches have returned, and we know the outpoints haven't +// already been spent in the chain. +func TestTxNotifierSpendHintCache(t *testing.T) { t.Parallel() - var ( - tx1 = wire.MsgTx{Version: 1} - tx2 = wire.MsgTx{Version: 2} + const ( + startingHeight = 200 + dummyHeight = 201 + op1Height = 202 + op2Height = 203 ) + // Intiialize our TxNotifier instance backed by a height hint cache. hintCache := newMockHintCache() - n := chainntnfs.NewTxNotifier(10, 100, hintCache) + n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache) - // Create the test transactions and register them with the TxNotifier to - // receive notifications. - tx1Hash := tx1.TxHash() - ntfn1 := chainntnfs.ConfNtfn{ - TxID: &tx1Hash, + // Create two test outpoints and register them for spend notifications. + op1 := wire.OutPoint{Hash: zeroHash, Index: 1} + ntfn1 := &chainntnfs.SpendNtfn{ + OutPoint: op1, + Event: chainntnfs.NewSpendEvent(nil), + } + op2 := wire.OutPoint{Hash: zeroHash, Index: 2} + ntfn2 := &chainntnfs.SpendNtfn{ + OutPoint: op2, + Event: chainntnfs.NewSpendEvent(nil), + } + + if _, err := n.RegisterSpend(ntfn1); err != nil { + t.Fatalf("unable to register spend for op1: %v", err) + } + if _, err := n.RegisterSpend(ntfn2); err != nil { + t.Fatalf("unable to register spend for op2: %v", err) + } + + // Both outpoints should not have a spend hint set upon registration, as + // we must first determine whether they have already been spent in the + // chain. + _, err := hintCache.QuerySpendHint(op1) + if err != chainntnfs.ErrSpendHintNotFound { + t.Fatalf("unexpected error when querying for height hint "+ + "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound, + err) + } + _, err = hintCache.QuerySpendHint(op2) + if err != chainntnfs.ErrSpendHintNotFound { + t.Fatalf("unexpected error when querying for height hint "+ + "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound, + err) + } + + // Create a new empty block and extend the chain. + emptyBlock := btcutil.NewBlock(&wire.MsgBlock{}) + err = n.ConnectTip( + emptyBlock.Hash(), dummyHeight, emptyBlock.Transactions(), + ) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + + // Since we haven't called UpdateSpendDetails on any of the test + // outpoints, this implies that there is a still a pending historical + // rescan for them, so their spend hints should not be created/updated. + _, err = hintCache.QuerySpendHint(op1) + if err != chainntnfs.ErrSpendHintNotFound { + t.Fatalf("unexpected error when querying for height hint "+ + "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound, + err) + } + _, err = hintCache.QuerySpendHint(op2) + if err != chainntnfs.ErrSpendHintNotFound { + t.Fatalf("unexpected error when querying for height hint "+ + "expected: %v, got %v", chainntnfs.ErrSpendHintNotFound, + err) + } + + // Now, we'll simulate that their historical rescans have finished by + // calling UpdateSpendDetails. This should allow their spend hints to be + // updated upon every block connected/disconnected. + if err := n.UpdateSpendDetails(ntfn1.OutPoint, nil); err != nil { + t.Fatalf("unable to update spend details: %v", err) + } + if err := n.UpdateSpendDetails(ntfn2.OutPoint, nil); err != nil { + t.Fatalf("unable to update spend details: %v", err) + } + + // We'll create a new block that only contains the spending transaction + // of the first outpoint. + spendTx1 := wire.NewMsgTx(2) + spendTx1.AddTxIn(&wire.TxIn{PreviousOutPoint: ntfn1.OutPoint}) + block1 := btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{spendTx1}, + }) + err = n.ConnectTip(block1.Hash(), op1Height, block1.Transactions()) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + + // Both outpoints should have their spend hints reflect the height of + // the new block being connected due to the first outpoint being spent + // at this height, and the second outpoint still being unspent. + op1Hint, err := hintCache.QuerySpendHint(ntfn1.OutPoint) + if err != nil { + t.Fatalf("unable to query for spend hint of op1: %v", err) + } + if op1Hint != op1Height { + t.Fatalf("expected hint %d, got %d", op1Height, op1Hint) + } + op2Hint, err := hintCache.QuerySpendHint(ntfn2.OutPoint) + if err != nil { + t.Fatalf("unable to query for spend hint of op2: %v", err) + } + if op2Hint != op1Height { + t.Fatalf("expected hint %d, got %d", op1Height, op2Hint) + } + + // Then, we'll create another block that spends the second outpoint. + spendTx2 := wire.NewMsgTx(2) + spendTx2.AddTxIn(&wire.TxIn{PreviousOutPoint: ntfn2.OutPoint}) + block2 := btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{spendTx2}, + }) + err = n.ConnectTip(block2.Hash(), op2Height, block2.Transactions()) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + + // Only the second outpoint should have its spend hint updated due to + // being spent within the new block. The first outpoint's spend hint + // should remain the same as it's already been spent before. + op1Hint, err = hintCache.QuerySpendHint(ntfn1.OutPoint) + if err != nil { + t.Fatalf("unable to query for spend hint of op1: %v", err) + } + if op1Hint != op1Height { + t.Fatalf("expected hint %d, got %d", op1Height, op1Hint) + } + op2Hint, err = hintCache.QuerySpendHint(ntfn2.OutPoint) + if err != nil { + t.Fatalf("unable to query for spend hint of op2: %v", err) + } + if op2Hint != op2Height { + t.Fatalf("expected hint %d, got %d", op2Height, op2Hint) + } + + // Finally, we'll attempt do disconnect the last block in order to + // simulate a chain reorg. + if err := n.DisconnectTip(op2Height); err != nil { + t.Fatalf("unable to disconnect block: %v", err) + } + + // This should update the second outpoint's spend hint within the cache + // to the previous height, as that's where its spending transaction was + // included in within the chain. The first outpoint's spend hint should + // remain the same. + op1Hint, err = hintCache.QuerySpendHint(ntfn1.OutPoint) + if err != nil { + t.Fatalf("unable to query for spend hint of op1: %v", err) + } + if op1Hint != op1Height { + t.Fatalf("expected hint %d, got %d", op1Height, op1Hint) + } + op2Hint, err = hintCache.QuerySpendHint(ntfn2.OutPoint) + if err != nil { + t.Fatalf("unable to query for spend hint of op2: %v", err) + } + if op2Hint != op1Height { + t.Fatalf("expected hint %d, got %d", op1Height, op2Hint) + } +} + +// TestTxNotifierTearDown ensures that the TxNotifier properly alerts clients +// that it is shutting down and will be unable to deliver notifications. +func TestTxNotifierTearDown(t *testing.T) { + t.Parallel() + + hintCache := newMockHintCache() + n := chainntnfs.NewTxNotifier(10, 100, hintCache, hintCache) + + // To begin the test, we'll register for a confirmation and spend + // notification. + confNtfn := &chainntnfs.ConfNtfn{ + TxID: &zeroHash, NumConfirmations: 1, Event: chainntnfs.NewConfirmationEvent(1), } - if _, err := n.RegisterConf(&ntfn1); err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - if err := n.UpdateConfDetails(*ntfn1.TxID, nil); err != nil { - t.Fatalf("unable to update conf details: %v", err) + if _, err := n.RegisterConf(confNtfn); err != nil { + t.Fatalf("unable to register conf ntfn: %v", err) } - tx2Hash := tx2.TxHash() - ntfn2 := chainntnfs.ConfNtfn{ - TxID: &tx2Hash, - NumConfirmations: 2, - Event: chainntnfs.NewConfirmationEvent(2), + spendNtfn := &chainntnfs.SpendNtfn{ + OutPoint: zeroOutPoint, + Event: chainntnfs.NewSpendEvent(nil), } - if _, err := n.RegisterConf(&ntfn2); err != nil { - t.Fatalf("unable to register ntfn: %v", err) - } - if err := n.UpdateConfDetails(*ntfn2.TxID, nil); err != nil { - t.Fatalf("unable to update conf details: %v", err) + if _, err := n.RegisterSpend(spendNtfn); err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) } - // Include the transactions in a block and add it to the TxNotifier. - // This should confirm tx1, but not tx2. - block := btcutil.NewBlock(&wire.MsgBlock{ - Transactions: []*wire.MsgTx{&tx1, &tx2}, - }) - - err := n.ConnectTip(block.Hash(), 11, block.Transactions()) - if err != nil { - t.Fatalf("Failed to connect block: %v", err) - } - - // We do not care about the correctness of the notifications since they - // are tested in other methods, but we'll still attempt to retrieve them - // for the sake of not being able to later once the notification - // channels are closed. - select { - case <-ntfn1.Event.Updates: - default: - t.Fatal("Expected confirmation update for tx1") - } - - select { - case <-ntfn1.Event.Confirmed: - default: - t.Fatalf("Expected confirmation for tx1") - } - - select { - case <-ntfn2.Event.Updates: - default: - t.Fatal("Expected confirmation update for tx2") - } - - select { - case txConf := <-ntfn2.Event.Confirmed: - t.Fatalf("Received unexpected confirmation for tx2: %v", txConf) - default: - } - - // The notification channels should be closed for notifications that - // have not been dispatched yet, so we should not expect to receive any - // more updates. + // With the notifications registered, we'll now tear down the notifier. + // The notification channels should be closed for notifications, whether + // they have been dispatched or not, so we should not expect to receive + // any more updates. n.TearDown() - // tx1 should not receive any more updates because it has already been - // confirmed and the TxNotifier has been shut down. select { - case <-ntfn1.Event.Updates: - t.Fatal("Received unexpected confirmation update for tx1") - case txConf := <-ntfn1.Event.Confirmed: - t.Fatalf("Received unexpected confirmation for tx1: %v", txConf) + case _, ok := <-confNtfn.Event.Confirmed: + if ok { + t.Fatal("expected closed Confirmed channel for conf ntfn") + } + case _, ok := <-confNtfn.Event.Updates: + if ok { + t.Fatal("expected closed Updates channel for conf ntfn") + } + case _, ok := <-confNtfn.Event.NegativeConf: + if ok { + t.Fatal("expected closed NegativeConf channel for conf ntfn") + } + case _, ok := <-spendNtfn.Event.Spend: + if ok { + t.Fatal("expected closed Spend channel for spend ntfn") + } + case _, ok := <-spendNtfn.Event.Reorg: + if ok { + t.Fatalf("expected closed Reorg channel for spend ntfn") + } default: + t.Fatalf("expected closed notification channels for all ntfns") } - // tx2 should not receive any more updates after the notifications - // channels have been closed and the TxNotifier shut down. - select { - case _, more := <-ntfn2.Event.Updates: - if more { - t.Fatal("Expected closed Updates channel for tx2") - } - case _, more := <-ntfn2.Event.Confirmed: - if more { - t.Fatalf("Expected closed Confirmed channel for tx2") - } - default: - t.Fatalf("Expected closed notification channels for tx2") + // Now that the notifier is torn down, we should no longer be able to + // register notification requests. + if _, err := n.RegisterConf(confNtfn); err == nil { + t.Fatal("expected confirmation registration to fail") + } + if _, err := n.RegisterSpend(spendNtfn); err == nil { + t.Fatal("expected spend registration to fail") } } -func assertEqualTxConf(t *testing.T, - actualConf, expectedConf *chainntnfs.TxConfirmation) { +func assertConfDetails(t *testing.T, result, expected *chainntnfs.TxConfirmation) { + t.Helper() - if actualConf.BlockHeight != expectedConf.BlockHeight { + if result.BlockHeight != expected.BlockHeight { t.Fatalf("Incorrect block height in confirmation details: "+ - "expected %d, got %d", - expectedConf.BlockHeight, actualConf.BlockHeight) + "expected %d, got %d", expected.BlockHeight, + result.BlockHeight) } - if !actualConf.BlockHash.IsEqual(expectedConf.BlockHash) { + if !result.BlockHash.IsEqual(expected.BlockHash) { t.Fatalf("Incorrect block hash in confirmation details: "+ - "expected %d, got %d", expectedConf.BlockHash, actualConf.BlockHash) + "expected %d, got %d", expected.BlockHash, + result.BlockHash) } - if actualConf.TxIndex != expectedConf.TxIndex { + if result.TxIndex != expected.TxIndex { t.Fatalf("Incorrect tx index in confirmation details: "+ - "expected %d, got %d", expectedConf.TxIndex, actualConf.TxIndex) + "expected %d, got %d", expected.TxIndex, result.TxIndex) + } +} + +func assertSpendDetails(t *testing.T, result, expected *chainntnfs.SpendDetail) { + t.Helper() + + if *result.SpentOutPoint != *expected.SpentOutPoint { + t.Fatalf("expected spent outpoint %v, got %v", + expected.SpentOutPoint, result.SpentOutPoint) + } + if !result.SpenderTxHash.IsEqual(expected.SpenderTxHash) { + t.Fatalf("expected spender tx hash %v, got %v", + expected.SpenderTxHash, result.SpenderTxHash) + } + if result.SpenderInputIndex != expected.SpenderInputIndex { + t.Fatalf("expected spender input index %d, got %d", + expected.SpenderInputIndex, result.SpenderInputIndex) + } + if result.SpendingHeight != expected.SpendingHeight { + t.Fatalf("expected spending height %d, got %d", + expected.SpendingHeight, result.SpendingHeight) } } From 180dffd1541e9fc4cc870d65e6dba7230fe121e0 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 15/23] chainntnfs/bitcoindnotify: handle spend notification registration w/ TxNotifier In this commit, we modify the logic within RegisterSpendNtfn for the BitcoindNotifier to account for the recent changes made to the TxNotifier. Since it is now able to handle spend notification registration and dispatch, we can bypass all the current logic within the BitcoindNotifier and interact directly with the TxNotifier instead. The most notable changes include the following: 1. We'll only attempt a historical rescan if the TxNotifier tells us so. 2. We'll dispatch the historical rescan within the main goroutine to prevent WaitGroup panics, due to the asynchronous nature of the notifier. --- chainntnfs/bitcoindnotify/bitcoind.go | 364 +++++++++------------- chainntnfs/bitcoindnotify/bitcoind_dev.go | 1 + 2 files changed, 150 insertions(+), 215 deletions(-) diff --git a/chainntnfs/bitcoindnotify/bitcoind.go b/chainntnfs/bitcoindnotify/bitcoind.go index fa6812d4..7072808f 100644 --- a/chainntnfs/bitcoindnotify/bitcoind.go +++ b/chainntnfs/bitcoindnotify/bitcoind.go @@ -12,7 +12,6 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/btcsuite/btcwallet/chain" - "github.com/btcsuite/btcwallet/wtxmgr" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/queue" ) @@ -144,6 +143,7 @@ func (b *BitcoindNotifier) Start() error { b.txNotifier = chainntnfs.NewTxNotifier( uint32(currentHeight), reorgSafetyLimit, b.confirmHintCache, + b.spendHintCache, ) b.bestBlock = chainntnfs.BlockEpoch{ @@ -259,6 +259,8 @@ out: // included in the active chain. We'll do this // in a goroutine to prevent blocking // potentially long rescans. + // + // TODO(wilmer): add retry logic if rescan fails? b.wg.Add(1) go func() { defer b.wg.Done() @@ -286,6 +288,25 @@ out: } }() + case *chainntnfs.HistoricalSpendDispatch: + // In order to ensure we don't block the caller + // on what may be a long rescan, we'll launch a + // goroutine to do so in the background. + // + // TODO(wilmer): add retry logic if rescan fails? + b.wg.Add(1) + go func() { + defer b.wg.Done() + + err := b.dispatchSpendDetailsManually(msg) + if err != nil { + chainntnfs.Log.Errorf("Rescan to "+ + "determine the spend "+ + "details of %v failed: %v", + msg.OutPoint, err) + } + }() + case *blockEpochRegistration: chainntnfs.Log.Infof("New block epoch subscription") b.blockEpochClients[msg.epochID] = msg @@ -383,7 +404,23 @@ out: b.bestBlock = newBestBlock case chain.RelevantTx: - b.handleRelevantTx(item, b.bestBlock.Height) + // We only care about notifying on confirmed + // spends, so if this is a mempool spend, we can + // ignore it and wait for the spend to appear in + // on-chain. + if item.Block == nil { + continue + } + + tx := &item.TxRecord.MsgTx + err := b.txNotifier.ProcessRelevantSpendTx( + tx, item.Block.Height, + ) + if err != nil { + chainntnfs.Log.Errorf("Unable to "+ + "process transaction %v: %v", + tx.TxHash(), err) + } } case <-b.quit: @@ -393,55 +430,6 @@ out: b.wg.Done() } -// handleRelevantTx notifies any clients of a relevant transaction. -func (b *BitcoindNotifier) handleRelevantTx(tx chain.RelevantTx, bestHeight int32) { - msgTx := tx.TxRecord.MsgTx - - // We only care about notifying on confirmed spends, so in case this is - // a mempool spend, we can continue, and wait for the spend to appear - // in chain. - if tx.Block == nil { - return - } - - // First, check if this transaction spends an output - // that has an existing spend notification for it. - for i, txIn := range msgTx.TxIn { - prevOut := txIn.PreviousOutPoint - - // If this transaction indeed does spend an - // output which we have a registered - // notification for, then create a spend - // summary, finally sending off the details to - // the notification subscriber. - if clients, ok := b.spendNotifications[prevOut]; ok { - spenderSha := msgTx.TxHash() - spendDetails := &chainntnfs.SpendDetail{ - SpentOutPoint: &prevOut, - SpenderTxHash: &spenderSha, - SpendingTx: &msgTx, - SpenderInputIndex: uint32(i), - } - spendDetails.SpendingHeight = tx.Block.Height - - for _, ntfn := range clients { - chainntnfs.Log.Infof("Dispatching confirmed "+ - "spend notification for outpoint=%v "+ - "at height %v", ntfn.targetOutpoint, - spendDetails.SpendingHeight) - ntfn.spendChan <- spendDetails - - // Close spendChan to ensure that any calls to - // Cancel will not block. This is safe to do - // since the channel is buffered, and the - // message can still be read by the receiver. - close(ntfn.spendChan) - } - delete(b.spendNotifications, prevOut) - } - } -} - // historicalConfDetails looks up whether a transaction is already included in a // block in the active chain and, if so, returns details about the confirmation. func (b *BitcoindNotifier) historicalConfDetails(txid *chainhash.Hash, @@ -717,167 +705,120 @@ type spendCancel struct { func (b *BitcoindNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) { - // Before proceeding to register the notification, we'll query our - // height hint cache to determine whether a better one exists. - if hint, err := b.spendHintCache.QuerySpendHint(*outpoint); err == nil { - if hint > heightHint { - chainntnfs.Log.Debugf("Using height hint %d retrieved "+ - "from cache for %v", hint, outpoint) - heightHint = hint - } + // First, we'll construct a spend notification request and hand it off + // to the txNotifier. + spendID := atomic.AddUint64(&b.spendClientCounter, 1) + cancel := func() { + b.txNotifier.CancelSpend(*outpoint, spendID) } - // Construct a notification request for the outpoint and send it to the - // main event loop. - ntfn := &spendNotification{ - targetOutpoint: outpoint, - spendChan: make(chan *chainntnfs.SpendDetail, 1), - spendID: atomic.AddUint64(&b.spendClientCounter, 1), + ntfn := &chainntnfs.SpendNtfn{ + SpendID: spendID, + OutPoint: *outpoint, + PkScript: pkScript, + Event: chainntnfs.NewSpendEvent(cancel), + HeightHint: heightHint, } - select { - case <-b.quit: - return nil, ErrChainNotifierShuttingDown - case b.notificationRegistry <- ntfn: - } - - if err := b.chainConn.NotifySpent([]*wire.OutPoint{outpoint}); err != nil { - return nil, err - } - - // The following conditional checks to ensure that when a spend - // notification is registered, the output hasn't already been spent. If - // the output is no longer in the UTXO set, the chain will be rescanned - // from the point where the output was added. The rescan will dispatch - // the notification. - txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true) + historicalDispatch, err := b.txNotifier.RegisterSpend(ntfn) if err != nil { return nil, err } - // If the output is unspent, then we'll write it to the cache with the - // given height hint. This allows us to increase the height hint as the - // chain extends and the output remains unspent. + // If the txNotifier didn't return any details to perform a historical + // scan of the chain, then we can return early as there's nothing left + // for us to do. + if historicalDispatch == nil { + return ntfn.Event, nil + } + + // We'll then request the backend to notify us when it has detected the + // outpoint as spent. + if err := b.chainConn.NotifySpent([]*wire.OutPoint{outpoint}); err != nil { + return nil, err + } + + // In addition to the check above, we'll also check the backend's UTXO + // set to determine whether the outpoint has been spent. If it hasn't, + // we can return to the caller as well. + txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true) + if err != nil { + return nil, err + } if txOut != nil { - err := b.spendHintCache.CommitSpendHint(heightHint, *outpoint) + // We'll let the txNotifier know the outpoint is still unspent + // in order to begin updating its spend hint. + err := b.txNotifier.UpdateSpendDetails(*outpoint, nil) if err != nil { - // The error is not fatal, so we should not return an - // error to the caller. - chainntnfs.Log.Error("Unable to update spend hint to "+ - "%d for %v: %v", heightHint, *outpoint, err) - } - } else { - // Otherwise, we'll determine when the output was spent. - // - // First, we'll attempt to retrieve the transaction's block hash - // using the backend's transaction index. - tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash) - if err != nil { - // Avoid returning an error if the transaction was not - // found to proceed with fallback methods. - jsonErr, ok := err.(*btcjson.RPCError) - if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo { - return nil, fmt.Errorf("unable to query for "+ - "txid %v: %v", outpoint.Hash, err) - } + return nil, err } - var blockHash *chainhash.Hash - if tx != nil && tx.BlockHash != "" { - // If we're able to retrieve a valid block hash from the - // transaction, then we'll use it as our rescan starting - // point. - blockHash, err = chainhash.NewHashFromStr(tx.BlockHash) - if err != nil { - return nil, err - } - } else { - // Otherwise, we'll attempt to retrieve the hash for the - // block at the heightHint. - blockHash, err = b.chainConn.GetBlockHash( - int64(heightHint), - ) - if err != nil { - return nil, fmt.Errorf("unable to retrieve "+ - "hash for block with height %d: %v", - heightHint, err) - } - } + return ntfn.Event, nil + } - // We'll only scan old blocks if the transaction has actually - // been included within a block. Otherwise, we'll encounter an - // error when scanning for blocks. This can happens in the case - // of a race condition, wherein the output itself is unspent, - // and only arrives in the mempool after the getxout call. - if blockHash != nil { - // Rescan all the blocks until the current one. - startHeight, err := b.chainConn.GetBlockHeight( - blockHash, - ) - if err != nil { - return nil, err - } - - _, endHeight, err := b.chainConn.GetBestBlock() - if err != nil { - return nil, err - } - - // In order to ensure we don't block the caller on what - // may be a long rescan, we'll launch a goroutine to do - // so in the background. - b.wg.Add(1) - go func() { - defer b.wg.Done() - - err := b.dispatchSpendDetailsManually( - *outpoint, startHeight, endHeight, - ) - if err != nil { - chainntnfs.Log.Errorf("Rescan for spend "+ - "notification txout(%x) "+ - "failed: %v", outpoint, err) - } - }() + // Otherwise, we'll determine when the output was spent by scanning the + // chain. We'll begin by determining where to start our historical + // rescan. + // + // As a minimal optimization, we'll query the backend's transaction + // index (if enabled) to determine if we have a better rescan starting + // height. We can do this as the GetRawTransaction call will return the + // hash of the block it was included in within the chain. + tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash) + if err != nil { + // Avoid returning an error if the transaction was not found to + // proceed with fallback methods. + jsonErr, ok := err.(*btcjson.RPCError) + if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo { + return nil, fmt.Errorf("unable to query for "+ + "txid %v: %v", outpoint.Hash, err) } } - return &chainntnfs.SpendEvent{ - Spend: ntfn.spendChan, - Cancel: func() { - cancel := &spendCancel{ - op: *outpoint, - spendID: ntfn.spendID, - } + // If the transaction index was enabled, we'll use the block's hash to + // retrieve its height and check whether it provides a better starting + // point for our rescan. + if tx != nil { + // If the transaction containing the outpoint hasn't confirmed + // on-chain, then there's no need to perform a rescan. + if tx.BlockHash == "" { + return ntfn.Event, nil + } - // Submit spend cancellation to notification dispatcher. - select { - case b.notificationCancels <- cancel: - // Cancellation is being handled, drain the - // spend chan until it is closed before yielding - // to the caller. - for { - select { - case _, ok := <-ntfn.spendChan: - if !ok { - return - } - case <-b.quit: - return - } - } - case <-b.quit: - } - }, - }, nil + blockHash, err := chainhash.NewHashFromStr(tx.BlockHash) + if err != nil { + return nil, err + } + blockHeight, err := b.chainConn.GetBlockHeight(blockHash) + if err != nil { + return nil, err + } + + if uint32(blockHeight) > historicalDispatch.StartHeight { + historicalDispatch.StartHeight = uint32(blockHeight) + } + } + + // Now that we've determined the starting point of our rescan, we can + // dispatch it. + select { + case b.notificationRegistry <- historicalDispatch: + return ntfn.Event, nil + case <-b.quit: + return nil, ErrChainNotifierShuttingDown + } } // disaptchSpendDetailsManually attempts to manually scan the chain within the // given height range for a transaction that spends the given outpoint. If one // is found, it's spending details are sent to the notifier dispatcher, which // will then dispatch the notification to all of its clients. -func (b *BitcoindNotifier) dispatchSpendDetailsManually(op wire.OutPoint, - startHeight, endHeight int32) error { +func (b *BitcoindNotifier) dispatchSpendDetailsManually( + historicalDispatchDetails *chainntnfs.HistoricalSpendDispatch) error { + + op := historicalDispatchDetails.OutPoint + startHeight := historicalDispatchDetails.StartHeight + endHeight := historicalDispatchDetails.EndHeight // Begin scanning blocks at every height to determine if the outpoint // was spent. @@ -890,6 +831,7 @@ func (b *BitcoindNotifier) dispatchSpendDetailsManually(op wire.OutPoint, default: } + // First, we'll fetch the block for the current height. blockHash, err := b.chainConn.GetBlockHash(int64(height)) if err != nil { return fmt.Errorf("unable to retrieve hash for block "+ @@ -901,38 +843,30 @@ func (b *BitcoindNotifier) dispatchSpendDetailsManually(op wire.OutPoint, "%v: %v", blockHash, err) } + // Then, we'll manually go over every transaction in it and + // determine whether it spends the outpoint in question. for _, tx := range block.Transactions { - for _, in := range tx.TxIn { - if in.PreviousOutPoint != op { + for i, txIn := range tx.TxIn { + if txIn.PreviousOutPoint != op { continue } - // If this transaction input spends the - // outpoint, we'll gather the details of the - // spending transaction and dispatch a spend - // notification to our clients. - relTx := chain.RelevantTx{ - TxRecord: &wtxmgr.TxRecord{ - MsgTx: *tx, - Hash: tx.TxHash(), - Received: block.Header.Timestamp, - }, - Block: &wtxmgr.BlockMeta{ - Block: wtxmgr.Block{ - Hash: *blockHash, - Height: height, - }, - Time: block.Header.Timestamp, - }, + // If it does, we'll construct its spend details + // and hand them over to the TxNotifier so that + // it can properly notify its registered + // clients. + txHash := tx.TxHash() + details := &chainntnfs.SpendDetail{ + SpentOutPoint: &op, + SpenderTxHash: &txHash, + SpendingTx: tx, + SpenderInputIndex: uint32(i), + SpendingHeight: int32(height), } - select { - case b.notificationRegistry <- relTx: - case <-b.quit: - return ErrChainNotifierShuttingDown - } - - return nil + return b.txNotifier.UpdateSpendDetails( + op, details, + ) } } } diff --git a/chainntnfs/bitcoindnotify/bitcoind_dev.go b/chainntnfs/bitcoindnotify/bitcoind_dev.go index 8189ecf4..ceeeb8ab 100644 --- a/chainntnfs/bitcoindnotify/bitcoind_dev.go +++ b/chainntnfs/bitcoindnotify/bitcoind_dev.go @@ -31,6 +31,7 @@ func (b *BitcoindNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Has b.txNotifier = chainntnfs.NewTxNotifier( uint32(bestHeight), reorgSafetyLimit, b.confirmHintCache, + b.spendHintCache, ) if generateBlocks != nil { From 0927f35dc121dc832bea07863d339cd25be5cc2c Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 16/23] chainntnfs/bitcoindnotify: remove old spend notification handling logic In this commit, we remove the old spend notification logic within the BitcoindNotifier as it's been phased out by the TxNotifier. --- chainntnfs/bitcoindnotify/bitcoind.go | 78 --------------------------- 1 file changed, 78 deletions(-) diff --git a/chainntnfs/bitcoindnotify/bitcoind.go b/chainntnfs/bitcoindnotify/bitcoind.go index 7072808f..f4f51821 100644 --- a/chainntnfs/bitcoindnotify/bitcoind.go +++ b/chainntnfs/bitcoindnotify/bitcoind.go @@ -68,8 +68,6 @@ type BitcoindNotifier struct { notificationCancels chan interface{} notificationRegistry chan interface{} - spendNotifications map[wire.OutPoint]map[uint64]*spendNotification - txNotifier *chainntnfs.TxNotifier blockEpochClients map[uint64]*blockEpochRegistration @@ -106,8 +104,6 @@ func New(chainConn *chain.BitcoindConn, spendHintCache chainntnfs.SpendHintCache blockEpochClients: make(map[uint64]*blockEpochRegistration), - spendNotifications: make(map[wire.OutPoint]map[uint64]*spendNotification), - spendHintCache: spendHintCache, confirmHintCache: confirmHintCache, @@ -173,11 +169,6 @@ func (b *BitcoindNotifier) Stop() error { // Notify all pending clients of our shutdown by closing the related // notification channels. - for _, spendClients := range b.spendNotifications { - for _, spendClient := range spendClients { - close(spendClient.spendChan) - } - } for _, epochClient := range b.blockEpochClients { close(epochClient.cancelChan) epochClient.wg.Wait() @@ -204,19 +195,6 @@ out: select { case cancelMsg := <-b.notificationCancels: switch msg := cancelMsg.(type) { - case *spendCancel: - chainntnfs.Log.Infof("Cancelling spend "+ - "notification for out_point=%v, "+ - "spend_id=%v", msg.op, msg.spendID) - - // Before we attempt to close the spendChan, - // ensure that the notification hasn't already - // yet been dispatched. - if outPointClients, ok := b.spendNotifications[msg.op]; ok { - close(outPointClients[msg.spendID].spendChan) - delete(b.spendNotifications[msg.op], msg.spendID) - } - case *epochCancel: chainntnfs.Log.Infof("Cancelling epoch "+ "notification, epoch_id=%v", msg.epochID) @@ -244,16 +222,6 @@ out: } case registerMsg := <-b.notificationRegistry: switch msg := registerMsg.(type) { - case *spendNotification: - chainntnfs.Log.Infof("New spend subscription: "+ - "utxo=%v", msg.targetOutpoint) - op := *msg.targetOutpoint - - if _, ok := b.spendNotifications[op]; !ok { - b.spendNotifications[op] = make(map[uint64]*spendNotification) - } - b.spendNotifications[op][msg.spendID] = msg - case *chainntnfs.HistoricalConfDispatch: // Look up whether the transaction is already // included in the active chain. We'll do this @@ -326,9 +294,6 @@ out: } } msg.errorChan <- nil - - case chain.RelevantTx: - b.handleRelevantTx(msg, b.bestBlock.Height) } case ntfn := <-b.chainConn.Notifications(): @@ -618,27 +583,6 @@ func (b *BitcoindNotifier) handleBlockConnected(block chainntnfs.BlockEpoch) err chainntnfs.Log.Infof("New block: height=%v, sha=%v", block.Height, block.Hash) - // Finally, we'll update the spend height hint for all of our watched - // outpoints that have not been spent yet. This is safe to do as we do - // not watch already spent outpoints for spend notifications. - ops := make([]wire.OutPoint, 0, len(b.spendNotifications)) - for op := range b.spendNotifications { - ops = append(ops, op) - } - - if len(ops) > 0 { - err := b.spendHintCache.CommitSpendHint( - uint32(block.Height), ops..., - ) - if err != nil { - // The error is not fatal since we are connecting a - // block, and advancing the spend hint is an optimistic - // optimization. - chainntnfs.Log.Errorf("Unable to update spend hint to "+ - "%d for %v: %v", block.Height, ops, err) - } - } - // We want to set the best block before dispatching notifications so // if any subscribers make queries based on their received block epoch, // our state is fully updated in time. @@ -675,28 +619,6 @@ func (b *BitcoindNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistr } } -// spendNotification couples a target outpoint along with the channel used for -// notifications once a spend of the outpoint has been detected. -type spendNotification struct { - targetOutpoint *wire.OutPoint - - spendChan chan *chainntnfs.SpendDetail - - spendID uint64 - - heightHint uint32 -} - -// spendCancel is a message sent to the BitcoindNotifier when a client wishes -// to cancel an outstanding spend notification that has yet to be dispatched. -type spendCancel struct { - // op is the target outpoint of the notification to be cancelled. - op wire.OutPoint - - // spendID the ID of the notification to cancel. - spendID uint64 -} - // RegisterSpendNtfn registers an intent to be notified once the target // outpoint has been spent by a transaction on-chain. Once a spend of the target // outpoint has been detected, the details of the spending event will be sent From 88edd320d5d0ae5ad3aefb51d8115132b8bd0bbe Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 17/23] chainntnfs/btcdnotify: handle spend notification registration w/ TxNotifier In this commit, we modify the logic within RegisterSpendNtfn for the BtcdNotifier to account for the recent changes made to the TxNotifier. Since it is now able to handle spend notification registration and dispatch, we can bypass all the current logic within the BtcdNotifier and interact directly with the TxNotifier instead. The most notable change is that now we'll only attempt a historical rescan if the TxNotifier tells us so. --- chainntnfs/btcdnotify/btcd.go | 376 ++++++++++-------------------- chainntnfs/btcdnotify/btcd_dev.go | 1 + 2 files changed, 124 insertions(+), 253 deletions(-) diff --git a/chainntnfs/btcdnotify/btcd.go b/chainntnfs/btcdnotify/btcd.go index 3fe740a7..6547b398 100644 --- a/chainntnfs/btcdnotify/btcd.go +++ b/chainntnfs/btcdnotify/btcd.go @@ -168,6 +168,7 @@ func (b *BtcdNotifier) Start() error { b.txNotifier = chainntnfs.NewTxNotifier( uint32(currentHeight), reorgSafetyLimit, b.confirmHintCache, + b.spendHintCache, ) b.bestBlock = chainntnfs.BlockEpoch{ @@ -329,6 +330,8 @@ out: // included in the active chain. We'll do this // in a goroutine to prevent blocking // potentially long rescans. + // + // TODO(wilmer): add retry logic if rescan fails? b.wg.Add(1) go func() { defer b.wg.Done() @@ -449,60 +452,23 @@ out: // partially completed. b.bestBlock = newBestBlock - // NOTE: we currently only use txUpdates for mempool spends and - // rescan spends. It might get removed entirely in the future. case item := <-b.txUpdates.ChanOut(): newSpend := item.(*txUpdate) // We only care about notifying on confirmed spends, so - // in case this is a mempool spend, we can continue, - // and wait for the spend to appear in chain. + // if this is a mempool spend, we can ignore it and wait + // for the spend to appear in on-chain. if newSpend.details == nil { continue } - spendingTx := newSpend.tx - - // First, check if this transaction spends an output - // that has an existing spend notification for it. - for i, txIn := range spendingTx.MsgTx().TxIn { - prevOut := txIn.PreviousOutPoint - - // If this transaction indeed does spend an - // output which we have a registered - // notification for, then create a spend - // summary, finally sending off the details to - // the notification subscriber. - if clients, ok := b.spendNotifications[prevOut]; ok { - spenderSha := newSpend.tx.Hash() - spendDetails := &chainntnfs.SpendDetail{ - SpentOutPoint: &prevOut, - SpenderTxHash: spenderSha, - SpendingTx: spendingTx.MsgTx(), - SpenderInputIndex: uint32(i), - } - spendDetails.SpendingHeight = newSpend.details.Height - - for _, ntfn := range clients { - chainntnfs.Log.Infof("Dispatching "+ - "confirmed spend "+ - "notification for "+ - "outpoint=%v at height %v", - ntfn.targetOutpoint, - spendDetails.SpendingHeight) - ntfn.spendChan <- spendDetails - - // Close spendChan to ensure - // that any calls to Cancel - // will not block. This is safe - // to do since the channel is - // buffered, and the message - // can still be read by the - // receiver. - close(ntfn.spendChan) - } - delete(b.spendNotifications, prevOut) - } + tx := newSpend.tx.MsgTx() + err := b.txNotifier.ProcessRelevantSpendTx( + tx, newSpend.details.Height, + ) + if err != nil { + chainntnfs.Log.Errorf("Unable to process "+ + "transaction %v: %v", tx.TxHash(), err) } case <-b.quit: @@ -713,94 +679,12 @@ func (b *BtcdNotifier) handleBlockConnected(epoch chainntnfs.BlockEpoch) error { chainntnfs.Log.Infof("New block: height=%v, sha=%v", epoch.Height, epoch.Hash) - // Define a helper struct for coalescing the spend notifications we will - // dispatch after trying to commit the spend hints. - type spendNtfnBatch struct { - details *chainntnfs.SpendDetail - clients map[uint64]*spendNotification - } - - // Scan over the list of relevant transactions and possibly dispatch - // notifications for spends. - spendBatches := make(map[wire.OutPoint]spendNtfnBatch) - for _, tx := range newBlock.txns { - mtx := tx.MsgTx() - txSha := mtx.TxHash() - - for i, txIn := range mtx.TxIn { - prevOut := txIn.PreviousOutPoint - - // If this transaction indeed does spend an output which - // we have a registered notification for, then create a - // spend summary, finally sending off the details to the - // notification subscriber. - clients, ok := b.spendNotifications[prevOut] - if !ok { - continue - } - delete(b.spendNotifications, prevOut) - - spendDetails := &chainntnfs.SpendDetail{ - SpentOutPoint: &prevOut, - SpenderTxHash: &txSha, - SpendingTx: mtx, - SpenderInputIndex: uint32(i), - SpendingHeight: int32(newBlock.height), - } - - spendBatches[prevOut] = spendNtfnBatch{ - details: spendDetails, - clients: clients, - } - } - } - - // Finally, we'll update the spend height hint for all of our watched - // outpoints that have not been spent yet. This is safe to do as we do - // not watch already spent outpoints for spend notifications. - ops := make([]wire.OutPoint, 0, len(b.spendNotifications)) - for op := range b.spendNotifications { - ops = append(ops, op) - } - - if len(ops) > 0 { - err := b.spendHintCache.CommitSpendHint( - uint32(epoch.Height), ops..., - ) - if err != nil { - // The error is not fatal since we are connecting a - // block, and advancing the spend hint is an optimistic - // optimization. - chainntnfs.Log.Errorf("Unable to update spend hint to "+ - "%d for %v: %v", epoch.Height, ops, err) - } - } - - // We want to set the best block before dispatching notifications - // so if any subscribers make queries based on their received - // block epoch, our state is fully updated in time. + // We want to set the best block before dispatching notifications so if + // any subscribers make queries based on their received block epoch, our + // state is fully updated in time. b.bestBlock = epoch - - // Next we'll notify any subscribed clients of the block. b.notifyBlockEpochs(int32(newBlock.height), &newBlock.hash) - // Finally, send off the spend details to the notification subscribers. - for _, batch := range spendBatches { - for _, ntfn := range batch.clients { - chainntnfs.Log.Infof("Dispatching spend "+ - "notification for outpoint=%v", - ntfn.targetOutpoint) - - ntfn.spendChan <- batch.details - - // Close spendChan to ensure that any calls to - // Cancel will not block. This is safe to do - // since the channel is buffered, and the - // message can still be read by the receiver. - close(ntfn.spendChan) - } - } - return nil } @@ -859,145 +743,131 @@ type spendCancel struct { func (b *BtcdNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) { - // Before proceeding to register the notification, we'll query our - // height hint cache to determine whether a better one exists. - if hint, err := b.spendHintCache.QuerySpendHint(*outpoint); err == nil { - if hint > heightHint { - chainntnfs.Log.Debugf("Using height hint %d retrieved "+ - "from cache for %v", hint, outpoint) - heightHint = hint - } + // First, we'll construct a spend notification request and hand it off + // to the txNotifier. + spendID := atomic.AddUint64(&b.spendClientCounter, 1) + cancel := func() { + b.txNotifier.CancelSpend(*outpoint, spendID) + } + ntfn := &chainntnfs.SpendNtfn{ + SpendID: spendID, + OutPoint: *outpoint, + PkScript: pkScript, + Event: chainntnfs.NewSpendEvent(cancel), + HeightHint: heightHint, } - // Construct a notification request for the outpoint and send it to the - // main event loop. - ntfn := &spendNotification{ - targetOutpoint: outpoint, - spendChan: make(chan *chainntnfs.SpendDetail, 1), - spendID: atomic.AddUint64(&b.spendClientCounter, 1), - heightHint: heightHint, - } - - select { - case <-b.quit: - return nil, ErrChainNotifierShuttingDown - case b.notificationRegistry <- ntfn: - } - - // TODO(roasbeef): update btcd rescan logic to also use both - if err := b.chainConn.NotifySpent([]*wire.OutPoint{outpoint}); err != nil { - return nil, err - } - - // The following conditional checks to ensure that when a spend - // notification is registered, the output hasn't already been spent. If - // the output is no longer in the UTXO set, the chain will be rescanned - // from the point where the output was added. The rescan will dispatch - // the notification. - txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true) + historicalDispatch, err := b.txNotifier.RegisterSpend(ntfn) if err != nil { return nil, err } - // If the output is unspent, then we'll write it to the cache with the - // given height hint. This allows us to increase the height hint as the - // chain extends and the output remains unspent. + // If the txNotifier didn't return any details to perform a historical + // scan of the chain, then we can return early as there's nothing left + // for us to do. + if historicalDispatch == nil { + return ntfn.Event, nil + } + + // We'll then request the backend to notify us when it has detected the + // outpoint as spent. + ops := []*wire.OutPoint{outpoint} + if err := b.chainConn.NotifySpent(ops); err != nil { + return nil, err + } + + // In addition to the check above, we'll also check the backend's UTXO + // set to determine whether the outpoint has been spent. If it hasn't, + // we can return to the caller as well. + txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true) + if err != nil { + return nil, err + } if txOut != nil { - err := b.spendHintCache.CommitSpendHint(heightHint, *outpoint) + // We'll let the txNotifier know the outpoint is still unspent + // in order to begin updating its spend hint. + err := b.txNotifier.UpdateSpendDetails(*outpoint, nil) if err != nil { - // The error is not fatal, so we should not return an - // error to the caller. - chainntnfs.Log.Error("Unable to update spend hint to "+ - "%d for %v: %v", heightHint, *outpoint, err) - } - } else { - // Otherwise, we'll determine when the output was spent. - // - // First, we'll attempt to retrieve the transaction's block hash - // using the backend's transaction index. - tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash) - if err != nil { - // Avoid returning an error if the transaction was not - // found to proceed with fallback methods. - jsonErr, ok := err.(*btcjson.RPCError) - if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo { - return nil, fmt.Errorf("unable to query for "+ - "txid %v: %v", outpoint.Hash, err) - } + return nil, err } - var blockHash *chainhash.Hash - if tx != nil && tx.BlockHash != "" { - // If we're able to retrieve a valid block hash from the - // transaction, then we'll use it as our rescan starting - // point. - blockHash, err = chainhash.NewHashFromStr(tx.BlockHash) - if err != nil { - return nil, err - } - } else { - // Otherwise, we'll attempt to retrieve the hash for the - // block at the heightHint. - blockHash, err = b.chainConn.GetBlockHash( - int64(heightHint), - ) - if err != nil { - return nil, err - } - } + return ntfn.Event, nil + } - // We'll only request a rescan if the transaction has actually - // been included within a block. Otherwise, we'll encounter an - // error when scanning for blocks. This can happen in the case - // of a race condition, wherein the output itself is unspent, - // and only arrives in the mempool after the getxout call. - if blockHash != nil { - ops := []*wire.OutPoint{outpoint} + // Otherwise, we'll determine when the output was spent by scanning the + // chain. We'll begin by determining where to start our historical + // rescan. + startHash, err := b.chainConn.GetBlockHash( + int64(historicalDispatch.StartHeight), + ) + if err != nil { + return nil, fmt.Errorf("unable to get block hash for height "+ + "%d: %v", historicalDispatch.StartHeight, err) + } - // In order to ensure that we don't block the caller on - // what may be a long rescan, we'll launch a new - // goroutine to handle the async result of the rescan. - asyncResult := b.chainConn.RescanAsync( - blockHash, nil, ops, - ) - go func() { - rescanErr := asyncResult.Receive() - if rescanErr != nil { - chainntnfs.Log.Errorf("Rescan for spend "+ - "notification txout(%x) "+ - "failed: %v", outpoint, rescanErr) - } - }() + // As a minimal optimization, we'll query the backend's transaction + // index (if enabled) to determine if we have a better rescan starting + // height. We can do this as the GetRawTransaction call will return the + // hash of the block it was included in within the chain. + tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash) + if err != nil { + // Avoid returning an error if the transaction was not found to + // proceed with fallback methods. + jsonErr, ok := err.(*btcjson.RPCError) + if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo { + return nil, fmt.Errorf("unable to query for "+ + "txid %v: %v", outpoint.Hash, err) } } - return &chainntnfs.SpendEvent{ - Spend: ntfn.spendChan, - Cancel: func() { - cancel := &spendCancel{ - op: *outpoint, - spendID: ntfn.spendID, - } + // If the transaction index was enabled, we'll use the block's hash to + // retrieve its height and check whether it provides a better starting + // point for our rescan. + if tx != nil { + // If the transaction containing the outpoint hasn't confirmed + // on-chain, then there's no need to perform a rescan. + if tx.BlockHash == "" { + return ntfn.Event, nil + } - // Submit spend cancellation to notification dispatcher. - select { - case b.notificationCancels <- cancel: - // Cancellation is being handled, drain the spend chan until it is - // closed before yielding to the caller. - for { - select { - case _, ok := <-ntfn.spendChan: - if !ok { - return - } - case <-b.quit: - return - } - } - case <-b.quit: + blockHash, err := chainhash.NewHashFromStr(tx.BlockHash) + if err != nil { + return nil, err + } + blockHeader, err := b.chainConn.GetBlockHeaderVerbose(blockHash) + if err != nil { + return nil, fmt.Errorf("unable to get header for "+ + "block %v: %v", blockHash, err) + } + + if uint32(blockHeader.Height) > historicalDispatch.StartHeight { + startHash, err = b.chainConn.GetBlockHash( + int64(blockHeader.Height), + ) + if err != nil { + return nil, fmt.Errorf("unable to get block "+ + "hash for height %d: %v", + blockHeader.Height, err) } - }, - }, nil + } + } + + // In order to ensure that we don't block the caller on what may be a + // long rescan, we'll launch a new goroutine to handle the async result + // of the rescan. We purposefully prevent from adding this goroutine to + // the WaitGroup as we cannnot wait for a quit signal due to the + // asyncResult channel not being exposed. + // + // TODO(wilmer): add retry logic if rescan fails? + asyncResult := b.chainConn.RescanAsync(startHash, nil, ops) + go func() { + if rescanErr := asyncResult.Receive(); rescanErr != nil { + chainntnfs.Log.Errorf("Rescan to determine the spend "+ + "details of %v failed: %v", outpoint, rescanErr) + } + }() + + return ntfn.Event, nil } // RegisterConfirmationsNtfn registers a notification with BtcdNotifier diff --git a/chainntnfs/btcdnotify/btcd_dev.go b/chainntnfs/btcdnotify/btcd_dev.go index 4146baa5..7e39d8a5 100644 --- a/chainntnfs/btcdnotify/btcd_dev.go +++ b/chainntnfs/btcdnotify/btcd_dev.go @@ -30,6 +30,7 @@ func (b *BtcdNotifier) UnsafeStart(bestHeight int32, bestHash *chainhash.Hash, b.txNotifier = chainntnfs.NewTxNotifier( uint32(bestHeight), reorgSafetyLimit, b.confirmHintCache, + b.spendHintCache, ) b.chainUpdates.Start() From 74139c9a3f7c026dd6d031e829fdf7b7ee7678c4 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 18/23] chainntnfs/btcdnotify: remove old spend notification handling logic In this commit, we remove the old spend notification logic within the BtcdNotifier as it's been phased out by the TxNotifier. --- chainntnfs/btcdnotify/btcd.go | 54 ----------------------------------- 1 file changed, 54 deletions(-) diff --git a/chainntnfs/btcdnotify/btcd.go b/chainntnfs/btcdnotify/btcd.go index 6547b398..c7eecfbc 100644 --- a/chainntnfs/btcdnotify/btcd.go +++ b/chainntnfs/btcdnotify/btcd.go @@ -74,8 +74,6 @@ type BtcdNotifier struct { notificationCancels chan interface{} notificationRegistry chan interface{} - spendNotifications map[wire.OutPoint]map[uint64]*spendNotification - txNotifier *chainntnfs.TxNotifier blockEpochClients map[uint64]*blockEpochRegistration @@ -114,8 +112,6 @@ func New(config *rpcclient.ConnConfig, spendHintCache chainntnfs.SpendHintCache, blockEpochClients: make(map[uint64]*blockEpochRegistration), - spendNotifications: make(map[wire.OutPoint]map[uint64]*spendNotification), - chainUpdates: queue.NewConcurrentQueue(10), txUpdates: queue.NewConcurrentQueue(10), @@ -204,11 +200,6 @@ func (b *BtcdNotifier) Stop() error { // Notify all pending clients of our shutdown by closing the related // notification channels. - for _, spendClients := range b.spendNotifications { - for _, spendClient := range spendClients { - close(spendClient.spendChan) - } - } for _, epochClient := range b.blockEpochClients { close(epochClient.cancelChan) epochClient.wg.Wait() @@ -276,19 +267,6 @@ out: select { case cancelMsg := <-b.notificationCancels: switch msg := cancelMsg.(type) { - case *spendCancel: - chainntnfs.Log.Infof("Cancelling spend "+ - "notification for out_point=%v, "+ - "spend_id=%v", msg.op, msg.spendID) - - // Before we attempt to close the spendChan, - // ensure that the notification hasn't already - // yet been dispatched. - if outPointClients, ok := b.spendNotifications[msg.op]; ok { - close(outPointClients[msg.spendID].spendChan) - delete(b.spendNotifications[msg.op], msg.spendID) - } - case *epochCancel: chainntnfs.Log.Infof("Cancelling epoch "+ "notification, epoch_id=%v", msg.epochID) @@ -315,16 +293,6 @@ out: } case registerMsg := <-b.notificationRegistry: switch msg := registerMsg.(type) { - case *spendNotification: - chainntnfs.Log.Infof("New spend subscription: "+ - "utxo=%v", msg.targetOutpoint) - op := *msg.targetOutpoint - - if _, ok := b.spendNotifications[op]; !ok { - b.spendNotifications[op] = make(map[uint64]*spendNotification) - } - b.spendNotifications[op][msg.spendID] = msg - case *chainntnfs.HistoricalConfDispatch: // Look up whether the transaction is already // included in the active chain. We'll do this @@ -713,28 +681,6 @@ func (b *BtcdNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistratio } } -// spendNotification couples a target outpoint along with the channel used for -// notifications once a spend of the outpoint has been detected. -type spendNotification struct { - targetOutpoint *wire.OutPoint - - spendChan chan *chainntnfs.SpendDetail - - spendID uint64 - - heightHint uint32 -} - -// spendCancel is a message sent to the BtcdNotifier when a client wishes to -// cancel an outstanding spend notification that has yet to be dispatched. -type spendCancel struct { - // op is the target outpoint of the notification to be cancelled. - op wire.OutPoint - - // spendID the ID of the notification to cancel. - spendID uint64 -} - // RegisterSpendNtfn registers an intent to be notified once the target // outpoint has been spent by a transaction on-chain. Once a spend of the target // outpoint has been detected, the details of the spending event will be sent From bfd11a251e11b286f3d3a48399969a3173dbb16c Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 19/23] chainntnfs/neutrinonotify: handle spend notification registration w/ TxNotifier In this commit, we modify the logic within RegisterSpendNtfn for the NeutrinoNotifier to account for the recent changes made to the TxNotifier. Since it is now able to handle spend notification registration and dispatch, we can bypass all the current logic within the NeutrinoNotifier and interact directly with the TxNotifier instead. The most notable change is that now we'll only attempt a historical rescan if the TxNotifier tells us so. --- chainntnfs/neutrinonotify/neutrino.go | 210 ++++++---------------- chainntnfs/neutrinonotify/neutrino_dev.go | 1 + 2 files changed, 55 insertions(+), 156 deletions(-) diff --git a/chainntnfs/neutrinonotify/neutrino.go b/chainntnfs/neutrinonotify/neutrino.go index 8e854442..9a2d5d76 100644 --- a/chainntnfs/neutrinonotify/neutrino.go +++ b/chainntnfs/neutrinonotify/neutrino.go @@ -164,6 +164,7 @@ func (n *NeutrinoNotifier) Start() error { n.txNotifier = chainntnfs.NewTxNotifier( n.bestHeight, reorgSafetyLimit, n.confirmHintCache, + n.spendHintCache, ) n.chainConn = &NeutrinoChainConn{n.p2pNode} @@ -603,68 +604,6 @@ func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) error { chainntnfs.Log.Infof("New block: height=%v, sha=%v", newBlock.height, newBlock.hash) - // Create a helper struct for coalescing spend notifications triggered - // by this block. - type spendNtfnBatch struct { - details *chainntnfs.SpendDetail - clients map[uint64]*spendNotification - } - - // Scan over the list of relevant transactions and assemble the - // possible spend notifications we need to dispatch. - spendBatches := make(map[wire.OutPoint]spendNtfnBatch) - for _, tx := range newBlock.txns { - mtx := tx.MsgTx() - txSha := mtx.TxHash() - - for i, txIn := range mtx.TxIn { - prevOut := txIn.PreviousOutPoint - - // If this transaction indeed does spend an output which - // we have a registered notification for, then create a - // spend summary and add it to our batch of spend - // notifications to be delivered. - clients, ok := n.spendNotifications[prevOut] - if !ok { - continue - } - delete(n.spendNotifications, prevOut) - - spendDetails := &chainntnfs.SpendDetail{ - SpentOutPoint: &prevOut, - SpenderTxHash: &txSha, - SpendingTx: mtx, - SpenderInputIndex: uint32(i), - SpendingHeight: int32(newBlock.height), - } - - spendBatches[prevOut] = spendNtfnBatch{ - details: spendDetails, - clients: clients, - } - - } - } - - // Now, we'll update the spend height hint for all of our watched - // outpoints that have not been spent yet. This is safe to do as we do - // not watch already spent outpoints for spend notifications. - ops := make([]wire.OutPoint, 0, len(n.spendNotifications)) - for op := range n.spendNotifications { - ops = append(ops, op) - } - - if len(ops) > 0 { - err := n.spendHintCache.CommitSpendHint(newBlock.height, ops...) - if err != nil { - // The error is not fatal since we are connecting a - // block, and advancing the spend hint is an optimistic - // optimization. - chainntnfs.Log.Errorf("Unable to update spend hint to "+ - "%d for %v: %v", newBlock.height, ops, err) - } - } - // We want to set the best block before dispatching notifications // so if any subscribers make queries based on their received // block epoch, our state is fully updated in time. @@ -674,23 +613,6 @@ func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) error { // of the block. n.notifyBlockEpochs(int32(newBlock.height), &newBlock.hash) - // Finally, send off the spend details to the notification subscribers. - for _, batch := range spendBatches { - for _, ntfn := range batch.clients { - chainntnfs.Log.Infof("Dispatching spend "+ - "notification for outpoint=%v", - ntfn.targetOutpoint) - - ntfn.spendChan <- batch.details - - // Close spendChan to ensure that any calls to - // Cancel will not block. This is safe to do - // since the channel is buffered, and the - // message can still be read by the receiver. - close(ntfn.spendChan) - } - } - return nil } @@ -766,86 +688,59 @@ type spendCancel struct { func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) { - n.heightMtx.RLock() - currentHeight := n.bestHeight - n.heightMtx.RUnlock() - - // Before proceeding to register the notification, we'll query our - // height hint cache to determine whether a better one exists. - if hint, err := n.spendHintCache.QuerySpendHint(*outpoint); err == nil { - if hint > heightHint { - chainntnfs.Log.Debugf("Using height hint %d retrieved "+ - "from cache for %v", hint, outpoint) - heightHint = hint - } + // First, we'll construct a spend notification request and hand it off + // to the txNotifier. + spendID := atomic.AddUint64(&n.spendClientCounter, 1) + cancel := func() { + n.txNotifier.CancelSpend(*outpoint, spendID) + } + ntfn := &chainntnfs.SpendNtfn{ + SpendID: spendID, + OutPoint: *outpoint, + Event: chainntnfs.NewSpendEvent(cancel), + HeightHint: heightHint, } - // Construct a notification request for the outpoint. We'll defer - // sending it to the main event loop until after we've guaranteed that - // the outpoint has not been spent. - ntfn := &spendNotification{ - targetOutpoint: outpoint, - spendChan: make(chan *chainntnfs.SpendDetail, 1), - spendID: atomic.AddUint64(&n.spendClientCounter, 1), - heightHint: heightHint, + historicalDispatch, err := n.txNotifier.RegisterSpend(ntfn) + if err != nil { + return nil, err } - spendEvent := &chainntnfs.SpendEvent{ - Spend: ntfn.spendChan, - Cancel: func() { - cancel := &spendCancel{ - op: *outpoint, - spendID: ntfn.spendID, - } - - // Submit spend cancellation to notification dispatcher. - select { - case n.notificationCancels <- cancel: - // Cancellation is being handled, drain the - // spend chan until it is closed before yielding - // to the caller. - for { - select { - case _, ok := <-ntfn.spendChan: - if !ok { - return - } - case <-n.quit: - return - } - } - case <-n.quit: - } - }, + // If the txNotifier didn't return any details to perform a historical + // scan of the chain, then we can return early as there's nothing left + // for us to do. + if historicalDispatch == nil { + return ntfn.Event, nil } // Ensure that neutrino is caught up to the height hint before we - // attempt to fetch the utxo from the chain. If we're behind, then we + // attempt to fetch the UTXO from the chain. If we're behind, then we // may miss a notification dispatch. for { n.heightMtx.RLock() - currentHeight = n.bestHeight + currentHeight := n.bestHeight n.heightMtx.RUnlock() - if currentHeight < heightHint { - time.Sleep(time.Millisecond * 200) - continue + if currentHeight >= historicalDispatch.StartHeight { + break } - break - } - - inputToWatch := neutrino.InputWithScript{ - OutPoint: *outpoint, - PkScript: pkScript, + time.Sleep(time.Millisecond * 200) } // Before sending off the notification request, we'll attempt to see if // this output is still spent or not at this point in the chain. + inputToWatch := neutrino.InputWithScript{ + OutPoint: *outpoint, + PkScript: pkScript, + } spendReport, err := n.p2pNode.GetUtxo( neutrino.WatchInputs(inputToWatch), neutrino.StartBlock(&waddrmgr.BlockStamp{ - Height: int32(heightHint), + Height: int32(historicalDispatch.StartHeight), + }), + neutrino.EndBlock(&waddrmgr.BlockStamp{ + Height: int32(historicalDispatch.EndHeight), }), ) if err != nil && !strings.Contains(err.Error(), "not found") { @@ -857,30 +752,33 @@ func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, if spendReport != nil && spendReport.SpendingTx != nil { // As a result, we'll launch a goroutine to immediately // dispatch the notification with a normal response. - go func() { - txSha := spendReport.SpendingTx.TxHash() - select { - case ntfn.spendChan <- &chainntnfs.SpendDetail{ - SpentOutPoint: outpoint, - SpenderTxHash: &txSha, - SpendingTx: spendReport.SpendingTx, - SpenderInputIndex: spendReport.SpendingInputIndex, - SpendingHeight: int32(spendReport.SpendingTxHeight), - }: - case <-n.quit: - return - } + spendingTxHash := spendReport.SpendingTx.TxHash() + spendDetails := &chainntnfs.SpendDetail{ + SpentOutPoint: outpoint, + SpenderTxHash: &spendingTxHash, + SpendingTx: spendReport.SpendingTx, + SpenderInputIndex: spendReport.SpendingInputIndex, + SpendingHeight: int32(spendReport.SpendingTxHeight), + } - }() + err := n.txNotifier.UpdateSpendDetails(*outpoint, spendDetails) + if err != nil { + return nil, err + } - return spendEvent, nil + return ntfn.Event, nil + } + + // If the output is still unspent, then we'll mark our historical rescan + // as complete and update our rescan's filter to watch for the spend of + // the outpoint in question. + if err := n.txNotifier.UpdateSpendDetails(*outpoint, nil); err != nil { + return nil, err } - // If the output is still unspent, then we'll update our rescan's - // filter, and send the request to the dispatcher goroutine. rescanUpdate := []neutrino.UpdateOption{ neutrino.AddInputs(inputToWatch), - neutrino.Rewind(currentHeight), + neutrino.Rewind(historicalDispatch.EndHeight), neutrino.DisableDisconnectedNtfns(true), } diff --git a/chainntnfs/neutrinonotify/neutrino_dev.go b/chainntnfs/neutrinonotify/neutrino_dev.go index b5ff182b..3a1b19b9 100644 --- a/chainntnfs/neutrinonotify/neutrino_dev.go +++ b/chainntnfs/neutrinonotify/neutrino_dev.go @@ -50,6 +50,7 @@ func (n *NeutrinoNotifier) UnsafeStart(bestHeight int32, n.txNotifier = chainntnfs.NewTxNotifier( uint32(bestHeight), reorgSafetyLimit, n.confirmHintCache, + n.spendHintCache, ) n.chainConn = &NeutrinoChainConn{n.p2pNode} From deca4cfe4453b4ff71626b3fe096c07a3dad7647 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 20/23] chainntnfs/neutrinonotify: remove old spend notification handling logic In this commit, we remove the old spend notification logic within the NeutrinoNotifier as it's been phased out by the TxNotifier. --- chainntnfs/neutrinonotify/neutrino.go | 55 --------------------------- 1 file changed, 55 deletions(-) diff --git a/chainntnfs/neutrinonotify/neutrino.go b/chainntnfs/neutrinonotify/neutrino.go index 9a2d5d76..494ca92b 100644 --- a/chainntnfs/neutrinonotify/neutrino.go +++ b/chainntnfs/neutrinonotify/neutrino.go @@ -68,8 +68,6 @@ type NeutrinoNotifier struct { notificationCancels chan interface{} notificationRegistry chan interface{} - spendNotifications map[wire.OutPoint]map[uint64]*spendNotification - txNotifier *chainntnfs.TxNotifier blockEpochClients map[uint64]*blockEpochRegistration @@ -109,8 +107,6 @@ func New(node *neutrino.ChainService, spendHintCache chainntnfs.SpendHintCache, blockEpochClients: make(map[uint64]*blockEpochRegistration), - spendNotifications: make(map[wire.OutPoint]map[uint64]*spendNotification), - p2pNode: node, rescanErr: make(chan error), @@ -196,11 +192,6 @@ func (n *NeutrinoNotifier) Stop() error { // Notify all pending clients of our shutdown by closing the related // notification channels. - for _, spendClients := range n.spendNotifications { - for _, spendClient := range spendClients { - close(spendClient.spendChan) - } - } for _, epochClient := range n.blockEpochClients { close(epochClient.cancelChan) epochClient.wg.Wait() @@ -264,19 +255,6 @@ out: select { case cancelMsg := <-n.notificationCancels: switch msg := cancelMsg.(type) { - case *spendCancel: - chainntnfs.Log.Infof("Cancelling spend "+ - "notification for out_point=%v, "+ - "spend_id=%v", msg.op, msg.spendID) - - // Before we attempt to close the spendChan, - // ensure that the notification hasn't already - // yet been dispatched. - if outPointClients, ok := n.spendNotifications[msg.op]; ok { - close(outPointClients[msg.spendID].spendChan) - delete(n.spendNotifications[msg.op], msg.spendID) - } - case *epochCancel: chainntnfs.Log.Infof("Cancelling epoch "+ "notification, epoch_id=%v", msg.epochID) @@ -304,17 +282,6 @@ out: case registerMsg := <-n.notificationRegistry: switch msg := registerMsg.(type) { - case *spendNotification: - chainntnfs.Log.Infof("New spend subscription: "+ - "utxo=%v, height_hint=%v", - msg.targetOutpoint, msg.heightHint) - op := *msg.targetOutpoint - - if _, ok := n.spendNotifications[op]; !ok { - n.spendNotifications[op] = make(map[uint64]*spendNotification) - } - n.spendNotifications[op][msg.spendID] = msg - case *chainntnfs.HistoricalConfDispatch: // Look up whether the transaction is already // included in the active chain. We'll do this @@ -659,28 +626,6 @@ func (n *NeutrinoNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistr } } -// spendNotification couples a target outpoint along with the channel used for -// notifications once a spend of the outpoint has been detected. -type spendNotification struct { - targetOutpoint *wire.OutPoint - - spendChan chan *chainntnfs.SpendDetail - - spendID uint64 - - heightHint uint32 -} - -// spendCancel is a message sent to the NeutrinoNotifier when a client wishes -// to cancel an outstanding spend notification that has yet to be dispatched. -type spendCancel struct { - // op is the target outpoint of the notification to be cancelled. - op wire.OutPoint - - // spendID the ID of the notification to cancel. - spendID uint64 -} - // RegisterSpendNtfn registers an intent to be notified once the target // outpoint has been spent by a transaction on-chain. Once a spend of the // target outpoint has been detected, the details of the spending event will be From 6458868ba06be40a6128c6b8da6691e48ae0ea7b Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Fri, 5 Oct 2018 02:07:55 -0700 Subject: [PATCH 21/23] chainntnfs/interface_test: add spend reorg test --- chainntnfs/interface_test.go | 158 +++++++++++++++++++++++++++++++++++ 1 file changed, 158 insertions(+) diff --git a/chainntnfs/interface_test.go b/chainntnfs/interface_test.go index 180de026..b4bfa30e 100644 --- a/chainntnfs/interface_test.go +++ b/chainntnfs/interface_test.go @@ -232,6 +232,8 @@ func checkNotificationFields(ntfn *chainntnfs.SpendDetail, outpoint *wire.OutPoint, spenderSha *chainhash.Hash, height int32, t *testing.T) { + t.Helper() + if *ntfn.SpentOutPoint != *outpoint { t.Fatalf("ntfn includes wrong output, reports "+ "%v instead of %v", @@ -756,6 +758,8 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness, // already happened. The notifier should dispatch a spend notification // immediately. checkSpends := func() { + t.Helper() + const numClients = 2 spendClients := make([]*chainntnfs.SpendEvent, numClients) for i := 0; i < numClients; i++ { @@ -1108,6 +1112,156 @@ func testReorgConf(miner *rpctest.Harness, notifier chainntnfs.TestChainNotifier } } +// testReorgSpend ensures that the different ChainNotifier implementations +// correctly handle outpoints whose spending transaction has been reorged out of +// the chain. +func testReorgSpend(miner *rpctest.Harness, + notifier chainntnfs.TestChainNotifier, t *testing.T) { + + // We'll start by creating an output and registering a spend + // notification for it. + outpoint, pkScript := chainntnfs.CreateSpendableOutput(t, miner) + _, currentHeight, err := miner.Node.GetBestBlock() + if err != nil { + t.Fatalf("unable to retrieve current height: %v", err) + } + spendIntent, err := notifier.RegisterSpendNtfn( + outpoint, pkScript, uint32(currentHeight), + ) + if err != nil { + t.Fatalf("unable to register for spend: %v", err) + } + + // Set up a new miner that we can use to cause a reorg. + miner2, err := rpctest.New(chainntnfs.NetParams, nil, []string{"--txindex"}) + if err != nil { + t.Fatalf("unable to create mining node: %v", err) + } + if err := miner2.SetUp(false, 0); err != nil { + t.Fatalf("unable to set up mining node: %v", err) + } + defer miner2.TearDown() + + // We start by connecting the new miner to our original miner, in order + // to have a consistent view of the chain from both miners. They should + // be on the same block height. + if err := rpctest.ConnectNode(miner, miner2); err != nil { + t.Fatalf("unable to connect miners: %v", err) + } + nodeSlice := []*rpctest.Harness{miner, miner2} + if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil { + t.Fatalf("unable to sync miners: %v", err) + } + _, minerHeight1, err := miner.Node.GetBestBlock() + if err != nil { + t.Fatalf("unable to get miner1's current height: %v", err) + } + _, minerHeight2, err := miner2.Node.GetBestBlock() + if err != nil { + t.Fatalf("unable to get miner2's current height: %v", err) + } + if minerHeight1 != minerHeight2 { + t.Fatalf("expected both miners to be on the same height: "+ + "%v vs %v", minerHeight1, minerHeight2) + } + + // We disconnect the two nodes, such that we can start mining on them + // individually without the other one learning about the new blocks. + err = miner.Node.AddNode(miner2.P2PAddress(), rpcclient.ANRemove) + if err != nil { + t.Fatalf("unable to disconnect miners: %v", err) + } + + // Craft the spending transaction for the outpoint created above and + // confirm it under the chain of the original miner. + spendTx := chainntnfs.CreateSpendTx(t, outpoint, pkScript) + spendTxHash, err := miner.Node.SendRawTransaction(spendTx, true) + if err != nil { + t.Fatalf("unable to broadcast spend tx: %v", err) + } + if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil { + t.Fatalf("spend tx not relayed to miner: %v", err) + } + const numBlocks = 1 + if _, err := miner.Node.Generate(numBlocks); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + // We should see a spend notification dispatched with the correct spend + // details. + select { + case spendDetails := <-spendIntent.Spend: + checkNotificationFields( + spendDetails, outpoint, spendTxHash, + currentHeight+numBlocks, t, + ) + case <-time.After(5 * time.Second): + t.Fatal("expected spend notification to be dispatched") + } + + // Now, with the other miner, we'll generate one more block than the + // other miner and connect them to cause a reorg. + if _, err := miner2.Node.Generate(numBlocks + 1); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + if err := rpctest.ConnectNode(miner, miner2); err != nil { + t.Fatalf("unable to connect miners: %v", err) + } + nodeSlice = []*rpctest.Harness{miner2, miner} + if err := rpctest.JoinNodes(nodeSlice, rpctest.Blocks); err != nil { + t.Fatalf("unable to sync miners: %v", err) + } + _, minerHeight1, err = miner.Node.GetBestBlock() + if err != nil { + t.Fatalf("unable to get miner1's current height: %v", err) + } + _, minerHeight2, err = miner2.Node.GetBestBlock() + if err != nil { + t.Fatalf("unable to get miner2's current height: %v", err) + } + if minerHeight1 != minerHeight2 { + t.Fatalf("expected both miners to be on the same height: "+ + "%v vs %v", minerHeight1, minerHeight2) + } + + // We should receive a reorg notification. + select { + case _, ok := <-spendIntent.Reorg: + if !ok { + t.Fatal("unexpected reorg channel closed") + } + case <-time.After(5 * time.Second): + t.Fatal("expected to receive reorg notification") + } + + // Now that both miners are on the same chain, we'll confirm the + // spending transaction of the outpoint and receive a notification for + // it. + if _, err = miner2.Node.SendRawTransaction(spendTx, true); err != nil { + t.Fatalf("unable to broadcast spend tx: %v", err) + } + if err := chainntnfs.WaitForMempoolTx(miner, spendTxHash); err != nil { + t.Fatalf("tx not relayed to miner: %v", err) + } + _, currentHeight, err = miner.Node.GetBestBlock() + if err != nil { + t.Fatalf("unable to retrieve current height: %v", err) + } + if _, err := miner.Node.Generate(numBlocks); err != nil { + t.Fatalf("unable to generate single block: %v", err) + } + + select { + case spendDetails := <-spendIntent.Spend: + checkNotificationFields( + spendDetails, outpoint, spendTxHash, + currentHeight+numBlocks, t, + ) + case <-time.After(5 * time.Second): + t.Fatal("expected spend notification to be dispatched") + } +} + // testCatchUpClientOnMissedBlocks tests the case of multiple registered client // receiving historical block epoch notifications due to their best known block // being out of date. @@ -1549,6 +1703,10 @@ var ntfnTests = []testCase{ name: "reorg conf", test: testReorgConf, }, + { + name: "reorg spend", + test: testReorgSpend, + }, } var blockCatchupTests = []blockCatchupTestCase{ From 60a1d73e0838e2f0b9a93d9fd8e4580b8d4478e2 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Sun, 14 Oct 2018 17:52:09 -0700 Subject: [PATCH 22/23] chainntnfs/txnotifier: commit height hint after rescan is complete In this commit, we'll now commit the current height of the TxNotifier as the height hint for an outpoint/transaction after a rescan has been completed and has determined that the outpoint is unspent/transaction is unconfirmed. We do this to prevent another potentially long rescan if the daemon is restarted before a new block comes in (which is when the hints will be updated again). --- chainntnfs/txnotifier.go | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index 50a243b1..e2a682ea 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -447,6 +447,20 @@ func (n *TxNotifier) UpdateConfDetails(txid chainhash.Hash, if details == nil { Log.Debugf("Conf details for txid=%v not found during "+ "historical dispatch, waiting to dispatch at tip", txid) + + // We'll commit the current height as the confirm hint to + // prevent another potentially long rescan if we restart before + // a new block comes in. + err := n.confirmHintCache.CommitConfirmHint( + n.currentHeight, txid, + ) + if err != nil { + // The error is not fatal as this is an optimistic + // optimization, so we'll avoid returning an error. + Log.Debugf("Unable to update confirm hint to %d for "+ + "%v: %v", n.currentHeight, txid, err) + } + return nil } @@ -792,6 +806,17 @@ func (n *TxNotifier) updateSpendDetails(op wire.OutPoint, // If the historical rescan was not able to find a spending transaction // for this outpoint, then we can track the spend at tip. if details == nil { + // We'll commit the current height as the spend hint to prevent + // another potentially long rescan if we restart before a new + // block comes in. + err := n.spendHintCache.CommitSpendHint(n.currentHeight, op) + if err != nil { + // The error is not fatal as this is an optimistic + // optimization, so we'll avoid returning an error. + Log.Debugf("Unable to update spend hint to %d for %v: %v", + n.currentHeight, op, err) + } + return nil } From e6b1a27cd732ff8bb644bba71f4bb27ca3a8abef Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Sat, 20 Oct 2018 16:30:57 -0700 Subject: [PATCH 23/23] chainntnfs/neutrinonotify: make filter update synchronous In this commit, we modify the notifier to handle filter updates synchronously. We do this to prevent race conditions between new block notifications and filter updates. Otherwise, it's possible for a new block to come in that should match our filter, but doesn't due to the filter being updated after. We also modify their order so that the filter is updated first. We do this so we can immediately start watching for the event at tip while the rescan is ongoing. --- chainntnfs/neutrinonotify/neutrino.go | 199 +++++++++++++++----------- 1 file changed, 113 insertions(+), 86 deletions(-) diff --git a/chainntnfs/neutrinonotify/neutrino.go b/chainntnfs/neutrinonotify/neutrino.go index 494ca92b..f0d4da2e 100644 --- a/chainntnfs/neutrinonotify/neutrino.go +++ b/chainntnfs/neutrinonotify/neutrino.go @@ -217,6 +217,14 @@ type filteredBlock struct { connect bool } +// rescanFilterUpdate represents a request that will be sent to the +// notificaionRegistry in order to prevent race conditions between the filter +// update and new block notifications. +type rescanFilterUpdate struct { + updateOptions []neutrino.UpdateOption + errChan chan error +} + // onFilteredBlockConnected is a callback which is executed each a new block is // connected to the end of the main chain. func (n *NeutrinoNotifier) onFilteredBlockConnected(height int32, @@ -283,9 +291,8 @@ out: case registerMsg := <-n.notificationRegistry: switch msg := registerMsg.(type) { case *chainntnfs.HistoricalConfDispatch: - // Look up whether the transaction is already - // included in the active chain. We'll do this - // in a goroutine to prevent blocking + // We'll start a historical rescan chain of the + // chain asynchronously to prevent blocking // potentially long rescans. n.wg.Add(1) go func() { @@ -299,18 +306,6 @@ out: chainntnfs.Log.Error(err) } - // We'll map the script into an address - // type so we can instruct neutrino to - // match if the transaction containing - // the script is found in a block. - params := n.p2pNode.ChainParams() - _, addrs, _, err := txscript.ExtractPkScriptAddrs( - msg.PkScript, ¶ms, - ) - if err != nil { - chainntnfs.Log.Error(err) - } - // If the historical dispatch finished // without error, we will invoke // UpdateConfDetails even if none were @@ -324,25 +319,6 @@ out: if err != nil { chainntnfs.Log.Error(err) } - - if confDetails != nil { - return - } - - // If we can't fully dispatch - // confirmation, then we'll update our - // filter so we can be notified of its - // future initial confirmation. - rescanUpdate := []neutrino.UpdateOption{ - neutrino.AddAddrs(addrs...), - neutrino.Rewind(msg.EndHeight), - neutrino.DisableDisconnectedNtfns(true), - } - err = n.chainView.Update(rescanUpdate...) - if err != nil { - chainntnfs.Log.Errorf("Unable to update rescan: %v", - err) - } }() case *blockEpochRegistration: @@ -367,6 +343,14 @@ out: } } msg.errorChan <- nil + + case *rescanFilterUpdate: + err := n.chainView.Update(msg.updateOptions...) + if err != nil { + chainntnfs.Log.Errorf("Unable to "+ + "update rescan filter: %v", err) + } + msg.errChan <- err } case item := <-n.chainUpdates.ChanOut(): @@ -658,9 +642,47 @@ func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, return ntfn.Event, nil } - // Ensure that neutrino is caught up to the height hint before we - // attempt to fetch the UTXO from the chain. If we're behind, then we - // may miss a notification dispatch. + // To determine whether this outpoint has been spent on-chain, we'll + // update our filter to watch for the transaction at tip and we'll also + // dispatch a historical rescan to determine if it has been spent in the + // past. + // + // We'll update our filter first to ensure we can immediately detect the + // spend at tip. To do so, we'll map the script into an address + // type so we can instruct neutrino to match if the transaction + // containing the script is found in a block. + inputToWatch := neutrino.InputWithScript{ + OutPoint: *outpoint, + PkScript: pkScript, + } + errChan := make(chan error, 1) + select { + case n.notificationRegistry <- &rescanFilterUpdate{ + updateOptions: []neutrino.UpdateOption{ + neutrino.AddInputs(inputToWatch), + neutrino.Rewind(historicalDispatch.EndHeight), + neutrino.DisableDisconnectedNtfns(true), + }, + errChan: errChan, + }: + case <-n.quit: + return nil, ErrChainNotifierShuttingDown + } + + select { + case err = <-errChan: + case <-n.quit: + return nil, ErrChainNotifierShuttingDown + } + if err != nil { + return nil, fmt.Errorf("unable to update filter: %v", err) + } + + // With the filter updated, we'll dispatch our historical rescan to + // ensure we detect the spend if it happened in the past. We'll ensure + // that neutrino is caught up to the starting height before we attempt + // to fetch the UTXO from the chain. If we're behind, then we may miss a + // notification dispatch. for { n.heightMtx.RLock() currentHeight := n.bestHeight @@ -673,12 +695,6 @@ func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, time.Sleep(time.Millisecond * 200) } - // Before sending off the notification request, we'll attempt to see if - // this output is still spent or not at this point in the chain. - inputToWatch := neutrino.InputWithScript{ - OutPoint: *outpoint, - PkScript: pkScript, - } spendReport, err := n.p2pNode.GetUtxo( neutrino.WatchInputs(inputToWatch), neutrino.StartBlock(&waddrmgr.BlockStamp{ @@ -694,60 +710,28 @@ func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, // If a spend report was returned, and the transaction is present, then // this means that the output is already spent. + var spendDetails *chainntnfs.SpendDetail if spendReport != nil && spendReport.SpendingTx != nil { - // As a result, we'll launch a goroutine to immediately - // dispatch the notification with a normal response. spendingTxHash := spendReport.SpendingTx.TxHash() - spendDetails := &chainntnfs.SpendDetail{ + spendDetails = &chainntnfs.SpendDetail{ SpentOutPoint: outpoint, SpenderTxHash: &spendingTxHash, SpendingTx: spendReport.SpendingTx, SpenderInputIndex: spendReport.SpendingInputIndex, SpendingHeight: int32(spendReport.SpendingTxHeight), } - - err := n.txNotifier.UpdateSpendDetails(*outpoint, spendDetails) - if err != nil { - return nil, err - } - - return ntfn.Event, nil } - // If the output is still unspent, then we'll mark our historical rescan - // as complete and update our rescan's filter to watch for the spend of - // the outpoint in question. - if err := n.txNotifier.UpdateSpendDetails(*outpoint, nil); err != nil { - return nil, err - } - - rescanUpdate := []neutrino.UpdateOption{ - neutrino.AddInputs(inputToWatch), - neutrino.Rewind(historicalDispatch.EndHeight), - neutrino.DisableDisconnectedNtfns(true), - } - - if err := n.chainView.Update(rescanUpdate...); err != nil { - return nil, err - } - - select { - case n.notificationRegistry <- ntfn: - case <-n.quit: - return nil, ErrChainNotifierShuttingDown - } - - // Finally, we'll add a spent hint with the current height to the cache - // in order to better keep track of when this outpoint is spent. - err = n.spendHintCache.CommitSpendHint(currentHeight, *outpoint) + // Finally, no matter whether the rescan found a spend in the past or + // not, we'll mark our historical rescan as complete to ensure the + // outpoint's spend hint gets updated upon connected/disconnected + // blocks. + err = n.txNotifier.UpdateSpendDetails(*outpoint, spendDetails) if err != nil { - // The error is not fatal, so we should not return an error to - // the caller. - chainntnfs.Log.Errorf("Unable to update spend hint to %d for "+ - "%v: %v", currentHeight, outpoint, err) + return nil, err } - return spendEvent, nil + return ntfn.Event, nil } // RegisterConfirmationsNtfn registers a notification with NeutrinoNotifier @@ -784,12 +768,55 @@ func (n *NeutrinoNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, return ntfn.Event, nil } + // To determine whether this transaction has confirmed on-chain, we'll + // update our filter to watch for the transaction at tip and we'll also + // dispatch a historical rescan to determine if it has confirmed in the + // past. + // + // We'll update our filter first to ensure we can immediately detect the + // confirmation at tip. To do so, we'll map the script into an address + // type so we can instruct neutrino to match if the transaction + // containing the script is found in a block. + params := n.p2pNode.ChainParams() + _, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, ¶ms) + if err != nil { + return nil, fmt.Errorf("unable to extract script: %v", err) + } + + // We'll send the filter update request to the notifier's main event + // handler and wait for its response. + errChan := make(chan error, 1) select { - case n.notificationRegistry <- dispatch: - return ntfn.Event, nil + case n.notificationRegistry <- &rescanFilterUpdate{ + updateOptions: []neutrino.UpdateOption{ + neutrino.AddAddrs(addrs...), + neutrino.Rewind(dispatch.EndHeight), + neutrino.DisableDisconnectedNtfns(true), + }, + errChan: errChan, + }: case <-n.quit: return nil, ErrChainNotifierShuttingDown } + + select { + case err = <-errChan: + case <-n.quit: + return nil, ErrChainNotifierShuttingDown + } + if err != nil { + return nil, fmt.Errorf("unable to update filter: %v", err) + } + + // Finally, with the filter updates, we can dispatch the historical + // rescan to ensure we can detect if the event happened in the past. + select { + case n.notificationRegistry <- dispatch: + case <-n.quit: + return nil, ErrChainNotifierShuttingDown + } + + return ntfn.Event, nil } // blockEpochRegistration represents a client's intent to receive a