2016-01-07 00:03:17 +03:00
|
|
|
package btcdnotify
|
|
|
|
|
|
|
|
import (
|
2016-12-15 12:07:12 +03:00
|
|
|
"errors"
|
2017-11-13 23:42:50 +03:00
|
|
|
"fmt"
|
2016-01-07 00:03:17 +03:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2016-02-27 03:30:14 +03:00
|
|
|
"time"
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/btcjson"
|
2018-12-07 08:14:02 +03:00
|
|
|
"github.com/btcsuite/btcd/chaincfg"
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/rpcclient"
|
2019-08-17 05:56:26 +03:00
|
|
|
"github.com/btcsuite/btcd/txscript"
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
2018-07-18 11:31:49 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2018-10-12 18:08:14 +03:00
|
|
|
"github.com/lightningnetwork/lnd/queue"
|
2016-01-07 00:03:17 +03:00
|
|
|
)
|
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
const (
|
|
|
|
// notifierType uniquely identifies this concrete implementation of the
|
|
|
|
// ChainNotifier interface.
|
|
|
|
notifierType = "btcd"
|
|
|
|
)
|
|
|
|
|
2016-09-23 05:12:12 +03:00
|
|
|
// chainUpdate encapsulates an update to the current main chain. This struct is
|
|
|
|
// used as an element within an unbounded queue in order to avoid blocking the
|
|
|
|
// main rpc dispatch rule.
|
|
|
|
type chainUpdate struct {
|
2017-01-06 00:56:27 +03:00
|
|
|
blockHash *chainhash.Hash
|
2016-09-23 05:12:12 +03:00
|
|
|
blockHeight int32
|
2017-11-10 02:30:38 +03:00
|
|
|
|
|
|
|
// connected is true if this update is a new block and false if it is a
|
|
|
|
// disconnected block.
|
|
|
|
connect bool
|
2016-09-23 05:12:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// txUpdate encapsulates a transaction related notification sent from btcd to
|
|
|
|
// the registered RPC client. This struct is used as an element within an
|
|
|
|
// unbounded queue in order to avoid blocking the main rpc dispatch rule.
|
|
|
|
type txUpdate struct {
|
|
|
|
tx *btcutil.Tx
|
|
|
|
details *btcjson.BlockDetails
|
|
|
|
}
|
|
|
|
|
2017-02-21 03:31:16 +03:00
|
|
|
// TODO(roasbeef): generalize struct below:
|
|
|
|
// * move chans to config, allow outside callers to handle send conditions
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// BtcdNotifier implements the ChainNotifier interface using btcd's websockets
|
|
|
|
// notifications. Multiple concurrent clients are supported. All notifications
|
|
|
|
// are achieved via non-blocking sends on client channels.
|
2016-01-07 00:03:17 +03:00
|
|
|
type BtcdNotifier struct {
|
2017-02-21 03:31:16 +03:00
|
|
|
epochClientCounter uint64 // To be used atomically.
|
|
|
|
|
2020-04-30 12:34:47 +03:00
|
|
|
start sync.Once
|
2020-04-30 12:54:33 +03:00
|
|
|
active int32 // To be used atomically.
|
2016-02-27 04:31:07 +03:00
|
|
|
stopped int32 // To be used atomically.
|
2016-02-17 01:46:18 +03:00
|
|
|
|
2018-12-07 08:14:02 +03:00
|
|
|
chainConn *rpcclient.Client
|
|
|
|
chainParams *chaincfg.Params
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2017-02-21 03:31:16 +03:00
|
|
|
notificationCancels chan interface{}
|
2016-01-07 00:03:17 +03:00
|
|
|
notificationRegistry chan interface{}
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
txNotifier *chainntnfs.TxNotifier
|
2016-09-08 21:27:07 +03:00
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
blockEpochClients map[uint64]*blockEpochRegistration
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
bestBlock chainntnfs.BlockEpoch
|
|
|
|
|
2018-10-12 18:08:14 +03:00
|
|
|
chainUpdates *queue.ConcurrentQueue
|
|
|
|
txUpdates *queue.ConcurrentQueue
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-08-15 03:53:34 +03:00
|
|
|
// spendHintCache is a cache used to query and update the latest height
|
|
|
|
// hints for an outpoint. Each height hint represents the earliest
|
|
|
|
// height at which the outpoint could have been spent within the chain.
|
|
|
|
spendHintCache chainntnfs.SpendHintCache
|
|
|
|
|
|
|
|
// confirmHintCache is a cache used to query the latest height hints for
|
|
|
|
// a transaction. Each height hint represents the earliest height at
|
|
|
|
// which the transaction could have confirmed within the chain.
|
|
|
|
confirmHintCache chainntnfs.ConfirmHintCache
|
|
|
|
|
2016-02-17 01:46:18 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
quit chan struct{}
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Ensure BtcdNotifier implements the ChainNotifier interface at compile time.
|
2016-01-07 00:03:17 +03:00
|
|
|
var _ chainntnfs.ChainNotifier = (*BtcdNotifier)(nil)
|
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
// New returns a new BtcdNotifier instance. This function assumes the btcd node
|
2016-11-28 06:17:27 +03:00
|
|
|
// detailed in the passed configuration is already running, and willing to
|
|
|
|
// accept new websockets clients.
|
2018-12-07 08:14:02 +03:00
|
|
|
func New(config *rpcclient.ConnConfig, chainParams *chaincfg.Params,
|
|
|
|
spendHintCache chainntnfs.SpendHintCache,
|
2018-08-15 03:53:34 +03:00
|
|
|
confirmHintCache chainntnfs.ConfirmHintCache) (*BtcdNotifier, error) {
|
|
|
|
|
2016-02-27 03:30:14 +03:00
|
|
|
notifier := &BtcdNotifier{
|
2018-12-07 08:14:02 +03:00
|
|
|
chainParams: chainParams,
|
|
|
|
|
2017-02-21 03:31:16 +03:00
|
|
|
notificationCancels: make(chan interface{}),
|
2016-01-07 00:03:17 +03:00
|
|
|
notificationRegistry: make(chan interface{}),
|
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
blockEpochClients: make(map[uint64]*blockEpochRegistration),
|
2017-02-21 03:31:16 +03:00
|
|
|
|
2018-10-12 18:08:14 +03:00
|
|
|
chainUpdates: queue.NewConcurrentQueue(10),
|
|
|
|
txUpdates: queue.NewConcurrentQueue(10),
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-08-15 03:53:34 +03:00
|
|
|
spendHintCache: spendHintCache,
|
|
|
|
confirmHintCache: confirmHintCache,
|
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
quit: make(chan struct{}),
|
2016-02-27 03:30:14 +03:00
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
ntfnCallbacks := &rpcclient.NotificationHandlers{
|
2016-02-27 03:30:14 +03:00
|
|
|
OnBlockConnected: notifier.onBlockConnected,
|
|
|
|
OnBlockDisconnected: notifier.onBlockDisconnected,
|
|
|
|
OnRedeemingTx: notifier.onRedeemingTx,
|
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
// Disable connecting to btcd within the rpcclient.New method. We
|
2016-08-04 08:13:10 +03:00
|
|
|
// defer establishing the connection to our .Start() method.
|
2016-02-27 03:30:14 +03:00
|
|
|
config.DisableConnectOnNew = true
|
|
|
|
config.DisableAutoReconnect = false
|
2017-08-25 04:54:17 +03:00
|
|
|
chainConn, err := rpcclient.New(config, ntfnCallbacks)
|
2016-02-27 03:30:14 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
notifier.chainConn = chainConn
|
|
|
|
|
|
|
|
return notifier, nil
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Start connects to the running btcd node over websockets, registers for block
|
|
|
|
// notifications, and finally launches all related helper goroutines.
|
2016-01-07 00:03:17 +03:00
|
|
|
func (b *BtcdNotifier) Start() error {
|
2020-04-30 12:34:47 +03:00
|
|
|
var startErr error
|
|
|
|
b.start.Do(func() {
|
|
|
|
startErr = b.startNotifier()
|
|
|
|
})
|
|
|
|
return startErr
|
|
|
|
}
|
|
|
|
|
2020-04-30 12:54:33 +03:00
|
|
|
// Started returns true if this instance has been started, and false otherwise.
|
|
|
|
func (b *BtcdNotifier) Started() bool {
|
|
|
|
return atomic.LoadInt32(&b.active) != 0
|
|
|
|
}
|
|
|
|
|
2020-04-30 12:34:47 +03:00
|
|
|
// Stop shutsdown the BtcdNotifier.
|
|
|
|
func (b *BtcdNotifier) Stop() error {
|
|
|
|
// Already shutting down?
|
|
|
|
if atomic.AddInt32(&b.stopped, 1) != 1 {
|
2016-01-07 00:03:17 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-04-30 12:34:47 +03:00
|
|
|
// Shutdown the rpc client, this gracefully disconnects from btcd, and
|
|
|
|
// cleans up all related resources.
|
|
|
|
b.chainConn.Shutdown()
|
|
|
|
|
|
|
|
close(b.quit)
|
|
|
|
b.wg.Wait()
|
|
|
|
|
|
|
|
b.chainUpdates.Stop()
|
|
|
|
b.txUpdates.Stop()
|
|
|
|
|
|
|
|
// Notify all pending clients of our shutdown by closing the related
|
|
|
|
// notification channels.
|
|
|
|
for _, epochClient := range b.blockEpochClients {
|
|
|
|
close(epochClient.cancelChan)
|
|
|
|
epochClient.wg.Wait()
|
|
|
|
|
|
|
|
close(epochClient.epochChan)
|
|
|
|
}
|
|
|
|
b.txNotifier.TearDown()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *BtcdNotifier) startNotifier() error {
|
2019-09-12 15:34:31 +03:00
|
|
|
// Start our concurrent queues before starting the chain connection, to
|
|
|
|
// ensure onBlockConnected and onRedeemingTx callbacks won't be
|
|
|
|
// blocked.
|
|
|
|
b.chainUpdates.Start()
|
|
|
|
b.txUpdates.Start()
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Connect to btcd, and register for notifications on connected, and
|
|
|
|
// disconnected blocks.
|
2016-02-27 03:30:14 +03:00
|
|
|
if err := b.chainConn.Connect(20); err != nil {
|
2019-09-12 15:34:31 +03:00
|
|
|
b.txUpdates.Stop()
|
|
|
|
b.chainUpdates.Stop()
|
2016-02-27 03:30:14 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
currentHash, currentHeight, err := b.chainConn.GetBestBlock()
|
2016-12-25 03:42:13 +03:00
|
|
|
if err != nil {
|
2019-09-12 15:34:31 +03:00
|
|
|
b.txUpdates.Stop()
|
|
|
|
b.chainUpdates.Stop()
|
2016-12-25 03:42:13 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
b.txNotifier = chainntnfs.NewTxNotifier(
|
2018-12-05 03:58:27 +03:00
|
|
|
uint32(currentHeight), chainntnfs.ReorgSafetyLimit,
|
|
|
|
b.confirmHintCache, b.spendHintCache,
|
2018-08-15 03:53:34 +03:00
|
|
|
)
|
2017-11-13 23:42:50 +03:00
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
b.bestBlock = chainntnfs.BlockEpoch{
|
|
|
|
Height: currentHeight,
|
|
|
|
Hash: currentHash,
|
|
|
|
}
|
|
|
|
|
2019-09-12 15:34:31 +03:00
|
|
|
if err := b.chainConn.NotifyBlocks(); err != nil {
|
|
|
|
b.txUpdates.Stop()
|
|
|
|
b.chainUpdates.Stop()
|
|
|
|
return err
|
|
|
|
}
|
2017-11-10 01:59:14 +03:00
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
b.wg.Add(1)
|
2018-08-09 10:05:28 +03:00
|
|
|
go b.notificationDispatcher()
|
2020-04-30 12:54:33 +03:00
|
|
|
|
|
|
|
// Set the active flag now that we've completed the full
|
|
|
|
// startup.
|
|
|
|
atomic.StoreInt32(&b.active, 1)
|
2016-02-27 04:04:14 +03:00
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
// onBlockConnected implements on OnBlockConnected callback for rpcclient.
|
2016-09-23 05:12:12 +03:00
|
|
|
// Ingesting a block updates the wallet's internal utxo state based on the
|
|
|
|
// outputs created and destroyed within each block.
|
2017-01-06 00:56:27 +03:00
|
|
|
func (b *BtcdNotifier) onBlockConnected(hash *chainhash.Hash, height int32, t time.Time) {
|
2016-09-23 05:12:12 +03:00
|
|
|
// Append this new chain update to the end of the queue of new chain
|
|
|
|
// updates.
|
2019-09-12 15:34:31 +03:00
|
|
|
select {
|
|
|
|
case b.chainUpdates.ChanIn() <- &chainUpdate{
|
2017-11-10 02:30:38 +03:00
|
|
|
blockHash: hash,
|
|
|
|
blockHeight: height,
|
|
|
|
connect: true,
|
2019-09-12 15:34:31 +03:00
|
|
|
}:
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
2017-11-10 02:30:38 +03:00
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
}
|
|
|
|
|
2018-03-20 18:17:23 +03:00
|
|
|
// filteredBlock represents a new block which has been connected to the main
|
|
|
|
// chain. The slice of transactions will only be populated if the block
|
|
|
|
// includes a transaction that confirmed one of our watched txids, or spends
|
|
|
|
// one of the outputs currently being watched.
|
|
|
|
// TODO(halseth): this is currently used for complete blocks. Change to use
|
|
|
|
// onFilteredBlockConnected and onFilteredBlockDisconnected, making it easier
|
|
|
|
// to unify with the Neutrino implementation.
|
|
|
|
type filteredBlock struct {
|
|
|
|
hash chainhash.Hash
|
|
|
|
height uint32
|
|
|
|
txns []*btcutil.Tx
|
|
|
|
|
|
|
|
// connected is true if this update is a new block and false if it is a
|
|
|
|
// disconnected block.
|
|
|
|
connect bool
|
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
// onBlockDisconnected implements on OnBlockDisconnected callback for rpcclient.
|
2017-01-06 00:56:27 +03:00
|
|
|
func (b *BtcdNotifier) onBlockDisconnected(hash *chainhash.Hash, height int32, t time.Time) {
|
2017-11-10 02:30:38 +03:00
|
|
|
// Append this new chain update to the end of the queue of new chain
|
|
|
|
// updates.
|
2019-09-12 15:34:31 +03:00
|
|
|
select {
|
|
|
|
case b.chainUpdates.ChanIn() <- &chainUpdate{
|
2017-11-10 02:30:38 +03:00
|
|
|
blockHash: hash,
|
|
|
|
blockHeight: height,
|
|
|
|
connect: false,
|
2019-09-12 15:34:31 +03:00
|
|
|
}:
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
2017-11-10 02:30:38 +03:00
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
// onRedeemingTx implements on OnRedeemingTx callback for rpcclient.
|
2016-09-23 05:12:12 +03:00
|
|
|
func (b *BtcdNotifier) onRedeemingTx(tx *btcutil.Tx, details *btcjson.BlockDetails) {
|
2016-11-28 06:17:27 +03:00
|
|
|
// Append this new transaction update to the end of the queue of new
|
|
|
|
// chain updates.
|
2019-09-12 15:34:31 +03:00
|
|
|
select {
|
|
|
|
case b.txUpdates.ChanIn() <- &txUpdate{tx, details}:
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// notificationDispatcher is the primary goroutine which handles client
|
|
|
|
// notification registrations, as well as notification dispatches.
|
2018-08-09 10:05:28 +03:00
|
|
|
func (b *BtcdNotifier) notificationDispatcher() {
|
2016-01-07 00:03:17 +03:00
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
2017-02-21 03:31:16 +03:00
|
|
|
case cancelMsg := <-b.notificationCancels:
|
|
|
|
switch msg := cancelMsg.(type) {
|
|
|
|
case *epochCancel:
|
|
|
|
chainntnfs.Log.Infof("Cancelling epoch "+
|
|
|
|
"notification, epoch_id=%v", msg.epochID)
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// First, we'll lookup the original
|
|
|
|
// registration in order to stop the active
|
|
|
|
// queue goroutine.
|
|
|
|
reg := b.blockEpochClients[msg.epochID]
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
|
|
|
// Next, close the cancel channel for this
|
2017-06-08 03:04:27 +03:00
|
|
|
// specific client, and wait for the client to
|
|
|
|
// exit.
|
2017-05-06 01:53:09 +03:00
|
|
|
close(b.blockEpochClients[msg.epochID].cancelChan)
|
2017-06-08 03:04:27 +03:00
|
|
|
b.blockEpochClients[msg.epochID].wg.Wait()
|
|
|
|
|
|
|
|
// Once the client has exited, we can then
|
|
|
|
// safely close the channel used to send epoch
|
|
|
|
// notifications, in order to notify any
|
|
|
|
// listeners that the intent has been
|
2019-10-03 18:22:43 +03:00
|
|
|
// canceled.
|
2017-05-06 01:53:09 +03:00
|
|
|
close(b.blockEpochClients[msg.epochID].epochChan)
|
2017-02-21 03:31:16 +03:00
|
|
|
delete(b.blockEpochClients, msg.epochID)
|
|
|
|
}
|
2016-01-07 00:03:17 +03:00
|
|
|
case registerMsg := <-b.notificationRegistry:
|
|
|
|
switch msg := registerMsg.(type) {
|
2018-08-27 07:36:45 +03:00
|
|
|
case *chainntnfs.HistoricalConfDispatch:
|
2018-12-07 08:14:16 +03:00
|
|
|
// Look up whether the transaction/output script
|
|
|
|
// has already confirmed in the active chain.
|
|
|
|
// We'll do this in a goroutine to prevent
|
|
|
|
// blocking potentially long rescans.
|
2018-10-05 12:07:55 +03:00
|
|
|
//
|
|
|
|
// TODO(wilmer): add retry logic if rescan fails?
|
2018-07-27 07:32:55 +03:00
|
|
|
b.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer b.wg.Done()
|
|
|
|
|
2018-07-19 00:01:28 +03:00
|
|
|
confDetails, _, err := b.historicalConfDetails(
|
2018-12-07 08:14:16 +03:00
|
|
|
msg.ConfRequest,
|
|
|
|
msg.StartHeight, msg.EndHeight,
|
2018-07-27 07:32:55 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-08-25 05:35:17 +03:00
|
|
|
// If the historical dispatch finished
|
|
|
|
// without error, we will invoke
|
|
|
|
// UpdateConfDetails even if none were
|
|
|
|
// found. This allows the notifier to
|
|
|
|
// begin safely updating the height hint
|
|
|
|
// cache at tip, since any pending
|
|
|
|
// rescans have now completed.
|
2018-10-05 12:07:55 +03:00
|
|
|
err = b.txNotifier.UpdateConfDetails(
|
2018-12-07 08:14:16 +03:00
|
|
|
msg.ConfRequest, confDetails,
|
2018-08-25 05:35:17 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
2018-07-27 07:32:55 +03:00
|
|
|
}
|
|
|
|
}()
|
2018-02-26 21:37:58 +03:00
|
|
|
|
2016-09-08 21:27:07 +03:00
|
|
|
case *blockEpochRegistration:
|
|
|
|
chainntnfs.Log.Infof("New block epoch subscription")
|
2018-12-11 05:29:28 +03:00
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
b.blockEpochClients[msg.epochID] = msg
|
2018-08-09 10:05:29 +03:00
|
|
|
|
2018-12-11 05:29:28 +03:00
|
|
|
// If the client did not provide their best
|
|
|
|
// known block, then we'll immediately dispatch
|
|
|
|
// a notification for the current tip.
|
|
|
|
if msg.bestBlock == nil {
|
|
|
|
b.notifyBlockEpochClient(
|
|
|
|
msg, b.bestBlock.Height,
|
|
|
|
b.bestBlock.Hash,
|
|
|
|
)
|
|
|
|
|
|
|
|
msg.errorChan <- nil
|
|
|
|
continue
|
2018-08-09 10:05:29 +03:00
|
|
|
}
|
2018-12-11 05:29:28 +03:00
|
|
|
|
|
|
|
// Otherwise, we'll attempt to deliver the
|
|
|
|
// backlog of notifications from their best
|
|
|
|
// known block.
|
|
|
|
missedBlocks, err := chainntnfs.GetClientMissedBlocks(
|
|
|
|
b.chainConn, msg.bestBlock,
|
|
|
|
b.bestBlock.Height, true,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
msg.errorChan <- err
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, block := range missedBlocks {
|
|
|
|
b.notifyBlockEpochClient(
|
|
|
|
msg, block.Height, block.Hash,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
msg.errorChan <- nil
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
|
2017-11-10 01:59:14 +03:00
|
|
|
case item := <-b.chainUpdates.ChanOut():
|
|
|
|
update := item.(*chainUpdate)
|
2017-11-10 02:30:38 +03:00
|
|
|
if update.connect {
|
2018-08-09 10:05:30 +03:00
|
|
|
blockHeader, err :=
|
|
|
|
b.chainConn.GetBlockHeader(update.blockHash)
|
2017-11-10 02:30:38 +03:00
|
|
|
if err != nil {
|
2018-08-09 10:05:30 +03:00
|
|
|
chainntnfs.Log.Errorf("Unable to fetch "+
|
|
|
|
"block header: %v", err)
|
2017-11-10 02:30:38 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
if blockHeader.PrevBlock != *b.bestBlock.Hash {
|
|
|
|
// Handle the case where the notifier
|
|
|
|
// missed some blocks from its chain
|
|
|
|
// backend
|
|
|
|
chainntnfs.Log.Infof("Missed blocks, " +
|
|
|
|
"attempting to catch up")
|
|
|
|
newBestBlock, missedBlocks, err :=
|
|
|
|
chainntnfs.HandleMissedBlocks(
|
|
|
|
b.chainConn,
|
2018-10-05 12:07:55 +03:00
|
|
|
b.txNotifier,
|
2018-08-09 10:05:30 +03:00
|
|
|
b.bestBlock,
|
|
|
|
update.blockHeight,
|
|
|
|
true,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
// Set the bestBlock here in case
|
|
|
|
// a catch up partially completed.
|
|
|
|
b.bestBlock = newBestBlock
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
continue
|
|
|
|
}
|
2017-11-10 02:30:38 +03:00
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
for _, block := range missedBlocks {
|
|
|
|
err := b.handleBlockConnected(block)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
continue out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-03-20 18:17:23 +03:00
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
newBlock := chainntnfs.BlockEpoch{
|
|
|
|
Height: update.blockHeight,
|
|
|
|
Hash: update.blockHash,
|
2018-03-20 18:17:23 +03:00
|
|
|
}
|
2018-08-09 10:05:30 +03:00
|
|
|
if err := b.handleBlockConnected(newBlock); err != nil {
|
2017-11-13 23:42:50 +03:00
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
2017-12-10 21:34:49 +03:00
|
|
|
continue
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
if update.blockHeight != b.bestBlock.Height {
|
2018-08-09 10:05:29 +03:00
|
|
|
chainntnfs.Log.Infof("Missed disconnected" +
|
|
|
|
"blocks, attempting to catch up")
|
2017-12-10 21:34:49 +03:00
|
|
|
}
|
2017-11-10 02:30:38 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
newBestBlock, err := chainntnfs.RewindChain(
|
2018-10-05 12:07:55 +03:00
|
|
|
b.chainConn, b.txNotifier, b.bestBlock,
|
2018-08-09 10:05:29 +03:00
|
|
|
update.blockHeight-1,
|
|
|
|
)
|
2017-12-10 21:34:49 +03:00
|
|
|
if err != nil {
|
2018-08-09 10:05:29 +03:00
|
|
|
chainntnfs.Log.Errorf("Unable to rewind chain "+
|
|
|
|
"from height %d to height %d: %v",
|
|
|
|
b.bestBlock.Height, update.blockHeight-1, err)
|
2017-11-10 02:30:38 +03:00
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// Set the bestBlock here in case a chain rewind
|
|
|
|
// partially completed.
|
|
|
|
b.bestBlock = newBestBlock
|
|
|
|
|
2017-11-10 01:59:14 +03:00
|
|
|
case item := <-b.txUpdates.ChanOut():
|
|
|
|
newSpend := item.(*txUpdate)
|
2018-07-17 10:13:06 +03:00
|
|
|
|
|
|
|
// We only care about notifying on confirmed spends, so
|
2018-10-05 12:07:55 +03:00
|
|
|
// if this is a mempool spend, we can ignore it and wait
|
|
|
|
// for the spend to appear in on-chain.
|
2018-07-17 10:13:06 +03:00
|
|
|
if newSpend.details == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
err := b.txNotifier.ProcessRelevantSpendTx(
|
2018-12-07 08:14:25 +03:00
|
|
|
newSpend.tx, uint32(newSpend.details.Height),
|
2018-10-05 12:07:55 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Errorf("Unable to process "+
|
2018-12-07 08:14:25 +03:00
|
|
|
"transaction %v: %v",
|
|
|
|
newSpend.tx.Hash(), err)
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
case <-b.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
b.wg.Done()
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:16 +03:00
|
|
|
// historicalConfDetails looks up whether a confirmation request (txid/output
|
|
|
|
// script) has already been included in a block in the active chain and, if so,
|
|
|
|
// returns details about said block.
|
|
|
|
func (b *BtcdNotifier) historicalConfDetails(confRequest chainntnfs.ConfRequest,
|
2018-08-27 07:36:45 +03:00
|
|
|
startHeight, endHeight uint32) (*chainntnfs.TxConfirmation,
|
2018-08-24 15:13:28 +03:00
|
|
|
chainntnfs.TxConfStatus, error) {
|
2018-02-26 21:37:58 +03:00
|
|
|
|
2018-12-07 08:14:16 +03:00
|
|
|
// If a txid was not provided, then we should dispatch upon seeing the
|
|
|
|
// script on-chain, so we'll short-circuit straight to scanning manually
|
|
|
|
// as there doesn't exist a script index to query.
|
|
|
|
if confRequest.TxID == chainntnfs.ZeroHash {
|
|
|
|
return b.confDetailsManually(
|
|
|
|
confRequest, startHeight, endHeight,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise, we'll dispatch upon seeing a transaction on-chain with the
|
|
|
|
// given hash.
|
|
|
|
//
|
2018-07-19 00:01:28 +03:00
|
|
|
// We'll first attempt to retrieve the transaction using the node's
|
|
|
|
// txindex.
|
2019-06-28 05:09:44 +03:00
|
|
|
txNotFoundErr := "No information available about transaction"
|
|
|
|
txConf, txStatus, err := chainntnfs.ConfDetailsFromTxIndex(
|
2019-06-28 05:10:34 +03:00
|
|
|
b.chainConn, confRequest, txNotFoundErr,
|
2019-06-28 05:09:44 +03:00
|
|
|
)
|
2018-07-19 00:01:28 +03:00
|
|
|
|
|
|
|
// We'll then check the status of the transaction lookup returned to
|
|
|
|
// determine whether we should proceed with any fallback methods.
|
|
|
|
switch {
|
2018-08-24 15:44:41 +03:00
|
|
|
|
|
|
|
// We failed querying the index for the transaction, fall back to
|
|
|
|
// scanning manually.
|
|
|
|
case err != nil:
|
2018-12-07 08:14:16 +03:00
|
|
|
chainntnfs.Log.Debugf("Unable to determine confirmation of %v "+
|
|
|
|
"through the backend's txindex (%v), scanning manually",
|
|
|
|
confRequest.TxID, err)
|
|
|
|
|
|
|
|
return b.confDetailsManually(
|
|
|
|
confRequest, startHeight, endHeight,
|
|
|
|
)
|
2018-08-24 15:44:41 +03:00
|
|
|
|
2018-07-19 00:01:28 +03:00
|
|
|
// The transaction was found within the node's mempool.
|
2018-08-24 15:13:28 +03:00
|
|
|
case txStatus == chainntnfs.TxFoundMempool:
|
2018-07-19 00:01:28 +03:00
|
|
|
|
|
|
|
// The transaction was found within the node's txindex.
|
2018-08-24 15:13:28 +03:00
|
|
|
case txStatus == chainntnfs.TxFoundIndex:
|
2018-07-19 00:01:28 +03:00
|
|
|
|
|
|
|
// The transaction was not found within the node's mempool or txindex.
|
2018-08-24 15:44:41 +03:00
|
|
|
case txStatus == chainntnfs.TxNotFoundIndex:
|
2018-02-26 21:37:58 +03:00
|
|
|
|
2018-08-24 15:44:41 +03:00
|
|
|
// Unexpected txStatus returned.
|
2018-07-19 00:01:28 +03:00
|
|
|
default:
|
2018-08-24 15:44:41 +03:00
|
|
|
return nil, txStatus,
|
|
|
|
fmt.Errorf("Got unexpected txConfStatus: %v", txStatus)
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
|
|
|
|
2018-07-19 00:01:28 +03:00
|
|
|
return txConf, txStatus, nil
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:16 +03:00
|
|
|
// confDetailsManually looks up whether a transaction/output script has already
|
|
|
|
// been included in a block in the active chain by scanning the chain's blocks
|
|
|
|
// within the given range. If the transaction/output script is found, its
|
|
|
|
// confirmation details are returned. Otherwise, nil is returned.
|
|
|
|
func (b *BtcdNotifier) confDetailsManually(confRequest chainntnfs.ConfRequest,
|
|
|
|
startHeight, endHeight uint32) (*chainntnfs.TxConfirmation,
|
2018-08-24 15:13:28 +03:00
|
|
|
chainntnfs.TxConfStatus, error) {
|
2018-02-26 21:37:58 +03:00
|
|
|
|
|
|
|
// Begin scanning blocks at every height to determine where the
|
|
|
|
// transaction was included in.
|
2018-11-23 00:58:58 +03:00
|
|
|
for height := endHeight; height >= startHeight && height > 0; height-- {
|
2018-07-27 07:32:55 +03:00
|
|
|
// Ensure we haven't been requested to shut down before
|
|
|
|
// processing the next height.
|
|
|
|
select {
|
|
|
|
case <-b.quit:
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundManually,
|
2018-12-11 05:25:41 +03:00
|
|
|
chainntnfs.ErrChainNotifierShuttingDown
|
2018-07-27 07:32:55 +03:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
blockHash, err := b.chainConn.GetBlockHash(int64(height))
|
|
|
|
if err != nil {
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundManually,
|
|
|
|
fmt.Errorf("unable to get hash from block "+
|
|
|
|
"with height %d", height)
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: fetch the neutrino filters instead.
|
2018-12-07 08:14:16 +03:00
|
|
|
block, err := b.chainConn.GetBlock(blockHash)
|
2018-02-26 21:37:58 +03:00
|
|
|
if err != nil {
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundManually,
|
|
|
|
fmt.Errorf("unable to get block with hash "+
|
|
|
|
"%v: %v", blockHash, err)
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
2016-12-25 03:42:13 +03:00
|
|
|
|
2018-12-07 08:14:16 +03:00
|
|
|
// For every transaction in the block, check which one matches
|
|
|
|
// our request. If we find one that does, we can dispatch its
|
|
|
|
// confirmation details.
|
|
|
|
for txIndex, tx := range block.Transactions {
|
|
|
|
if !confRequest.MatchesTx(tx) {
|
|
|
|
continue
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
2018-12-07 08:14:16 +03:00
|
|
|
|
|
|
|
return &chainntnfs.TxConfirmation{
|
2018-12-11 05:24:04 +03:00
|
|
|
Tx: tx,
|
2018-12-07 08:14:16 +03:00
|
|
|
BlockHash: blockHash,
|
|
|
|
BlockHeight: height,
|
|
|
|
TxIndex: uint32(txIndex),
|
|
|
|
}, chainntnfs.TxFoundManually, nil
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
2018-02-26 21:37:58 +03:00
|
|
|
|
|
|
|
// If we reach here, then we were not able to find the transaction
|
|
|
|
// within a block, so we avoid returning an error.
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundManually, nil
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// handleBlockConnected applies a chain update for a new block. Any watched
|
2018-03-20 18:17:23 +03:00
|
|
|
// transactions included this block will processed to either send notifications
|
|
|
|
// now or after numConfirmations confs.
|
|
|
|
// TODO(halseth): this is reusing the neutrino notifier implementation, unify
|
|
|
|
// them.
|
2018-08-09 10:05:29 +03:00
|
|
|
func (b *BtcdNotifier) handleBlockConnected(epoch chainntnfs.BlockEpoch) error {
|
2018-10-12 03:30:40 +03:00
|
|
|
// First, we'll fetch the raw block as we'll need to gather all the
|
|
|
|
// transactions to determine whether any are relevant to our registered
|
|
|
|
// clients.
|
2018-08-09 10:05:29 +03:00
|
|
|
rawBlock, err := b.chainConn.GetBlock(epoch.Hash)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to get block: %v", err)
|
|
|
|
}
|
|
|
|
newBlock := &filteredBlock{
|
|
|
|
hash: *epoch.Hash,
|
|
|
|
height: uint32(epoch.Height),
|
2018-08-15 03:55:29 +03:00
|
|
|
txns: btcutil.NewBlock(rawBlock).Transactions(),
|
2018-08-09 10:05:29 +03:00
|
|
|
connect: true,
|
|
|
|
}
|
2018-08-15 03:55:29 +03:00
|
|
|
|
2018-10-12 03:30:40 +03:00
|
|
|
// We'll then extend the txNotifier's height with the information of
|
|
|
|
// this new block, which will handle all of the notification logic for
|
|
|
|
// us.
|
2018-10-05 12:07:55 +03:00
|
|
|
err = b.txNotifier.ConnectTip(
|
2018-08-15 03:55:29 +03:00
|
|
|
&newBlock.hash, newBlock.height, newBlock.txns,
|
|
|
|
)
|
2018-08-09 10:05:29 +03:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to connect tip: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-08-15 03:55:29 +03:00
|
|
|
chainntnfs.Log.Infof("New block: height=%v, sha=%v", epoch.Height,
|
|
|
|
epoch.Hash)
|
|
|
|
|
2018-10-12 03:30:40 +03:00
|
|
|
// Now that we've guaranteed the new block extends the txNotifier's
|
|
|
|
// current tip, we'll proceed to dispatch notifications to all of our
|
|
|
|
// registered clients whom have had notifications fulfilled. Before
|
|
|
|
// doing so, we'll make sure update our in memory state in order to
|
|
|
|
// satisfy any client requests based upon the new block.
|
2018-08-25 05:45:25 +03:00
|
|
|
b.bestBlock = epoch
|
|
|
|
|
2018-10-12 03:30:40 +03:00
|
|
|
b.notifyBlockEpochs(epoch.Height, epoch.Hash)
|
|
|
|
return b.txNotifier.NotifyHeight(uint32(epoch.Height))
|
2018-03-20 18:17:23 +03:00
|
|
|
}
|
|
|
|
|
2016-09-08 21:27:07 +03:00
|
|
|
// notifyBlockEpochs notifies all registered block epoch clients of the newly
|
|
|
|
// connected block to the main chain.
|
2017-01-06 00:56:27 +03:00
|
|
|
func (b *BtcdNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash) {
|
2018-08-09 10:05:29 +03:00
|
|
|
for _, client := range b.blockEpochClients {
|
|
|
|
b.notifyBlockEpochClient(client, newHeight, newSha)
|
2016-09-08 21:27:07 +03:00
|
|
|
}
|
2018-08-09 10:05:29 +03:00
|
|
|
}
|
2016-09-08 21:27:07 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// notifyBlockEpochClient sends a registered block epoch client a notification
|
|
|
|
// about a specific block.
|
|
|
|
func (b *BtcdNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration,
|
|
|
|
height int32, sha *chainhash.Hash) {
|
2017-05-06 01:53:09 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
epoch := &chainntnfs.BlockEpoch{
|
|
|
|
Height: height,
|
|
|
|
Hash: sha,
|
|
|
|
}
|
2017-06-08 03:04:27 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
select {
|
|
|
|
case epochClient.epochQueue.ChanIn() <- epoch:
|
|
|
|
case <-epochClient.cancelChan:
|
|
|
|
case <-b.quit:
|
2016-09-08 21:27:07 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-23 22:56:47 +03:00
|
|
|
// RegisterSpendNtfn registers an intent to be notified once the target
|
2018-12-07 08:14:25 +03:00
|
|
|
// outpoint/output script has been spent by a transaction on-chain. When
|
|
|
|
// intending to be notified of the spend of an output script, a nil outpoint
|
|
|
|
// must be used. The heightHint should represent the earliest height in the
|
|
|
|
// chain of the transaction that spent the outpoint/output script.
|
|
|
|
//
|
|
|
|
// Once a spend of has been detected, the details of the spending event will be
|
|
|
|
// sent across the 'Spend' channel.
|
2017-05-11 03:00:18 +03:00
|
|
|
func (b *BtcdNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
|
2018-07-18 05:02:25 +03:00
|
|
|
pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
|
2016-11-30 11:00:20 +03:00
|
|
|
|
2019-08-17 05:56:26 +03:00
|
|
|
// Register the conf notification with the TxNotifier. A non-nil value
|
|
|
|
// for `dispatch` will be returned if we are required to perform a
|
|
|
|
// manual scan for the confirmation. Otherwise the notifier will begin
|
|
|
|
// watching at tip for the transaction to confirm.
|
|
|
|
ntfn, err := b.txNotifier.RegisterSpend(outpoint, pkScript, heightHint)
|
2018-10-05 12:07:55 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:25 +03:00
|
|
|
// We'll then request the backend to notify us when it has detected the
|
|
|
|
// outpoint/output script as spent.
|
|
|
|
//
|
|
|
|
// TODO(wilmer): use LoadFilter API instead.
|
2019-08-17 05:56:26 +03:00
|
|
|
if outpoint == nil || *outpoint == chainntnfs.ZeroOutPoint {
|
|
|
|
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
|
|
|
|
pkScript, b.chainParams,
|
|
|
|
)
|
2018-12-07 08:14:25 +03:00
|
|
|
if err != nil {
|
2019-08-17 05:56:26 +03:00
|
|
|
return nil, fmt.Errorf("unable to parse script: %v", err)
|
2018-12-07 08:14:25 +03:00
|
|
|
}
|
|
|
|
if err := b.chainConn.NotifyReceived(addrs); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
2019-08-17 05:56:26 +03:00
|
|
|
ops := []*wire.OutPoint{outpoint}
|
2018-12-07 08:14:25 +03:00
|
|
|
if err := b.chainConn.NotifySpent(ops); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
// If the txNotifier didn't return any details to perform a historical
|
|
|
|
// scan of the chain, then we can return early as there's nothing left
|
|
|
|
// for us to do.
|
2019-08-17 05:56:26 +03:00
|
|
|
if ntfn.HistoricalDispatch == nil {
|
2018-10-05 12:07:55 +03:00
|
|
|
return ntfn.Event, nil
|
2016-12-15 12:07:12 +03:00
|
|
|
}
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-12-07 08:14:25 +03:00
|
|
|
// Otherwise, we'll need to dispatch a historical rescan to determine if
|
|
|
|
// the outpoint was already spent at a previous height.
|
|
|
|
//
|
|
|
|
// We'll short-circuit the path when dispatching the spend of a script,
|
|
|
|
// rather than an outpoint, as there aren't any additional checks we can
|
|
|
|
// make for scripts.
|
2019-08-17 05:56:26 +03:00
|
|
|
if outpoint == nil || *outpoint == chainntnfs.ZeroOutPoint {
|
2018-12-07 08:14:25 +03:00
|
|
|
startHash, err := b.chainConn.GetBlockHash(
|
2019-08-17 05:56:26 +03:00
|
|
|
int64(ntfn.HistoricalDispatch.StartHeight),
|
2018-12-07 08:14:25 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(wilmer): add retry logic if rescan fails?
|
2019-08-17 05:56:26 +03:00
|
|
|
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
|
|
|
|
pkScript, b.chainParams,
|
|
|
|
)
|
2018-12-07 08:14:25 +03:00
|
|
|
if err != nil {
|
2019-08-17 05:56:26 +03:00
|
|
|
return nil, fmt.Errorf("unable to parse address: %v", err)
|
2018-12-07 08:14:25 +03:00
|
|
|
}
|
2019-08-17 05:56:26 +03:00
|
|
|
|
2018-12-07 08:14:25 +03:00
|
|
|
asyncResult := b.chainConn.RescanAsync(startHash, addrs, nil)
|
|
|
|
go func() {
|
|
|
|
if rescanErr := asyncResult.Receive(); rescanErr != nil {
|
|
|
|
chainntnfs.Log.Errorf("Rescan to determine "+
|
|
|
|
"the spend details of %v failed: %v",
|
2019-08-17 05:56:26 +03:00
|
|
|
ntfn.HistoricalDispatch.SpendRequest,
|
|
|
|
rescanErr)
|
2018-12-07 08:14:25 +03:00
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return ntfn.Event, nil
|
2018-03-27 14:50:24 +03:00
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:25 +03:00
|
|
|
// When dispatching spends of outpoints, there are a number of checks we
|
|
|
|
// can make to start our rescan from a better height or completely avoid
|
|
|
|
// it.
|
|
|
|
//
|
|
|
|
// We'll start by checking the backend's UTXO set to determine whether
|
|
|
|
// the outpoint has been spent. If it hasn't, we can return to the
|
|
|
|
// caller as well.
|
2019-08-17 05:56:26 +03:00
|
|
|
txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true)
|
2016-11-30 11:00:20 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-08-15 03:55:29 +03:00
|
|
|
if txOut != nil {
|
2018-10-05 12:07:55 +03:00
|
|
|
// We'll let the txNotifier know the outpoint is still unspent
|
|
|
|
// in order to begin updating its spend hint.
|
2019-08-17 05:56:26 +03:00
|
|
|
err := b.txNotifier.UpdateSpendDetails(
|
|
|
|
ntfn.HistoricalDispatch.SpendRequest, nil,
|
|
|
|
)
|
2018-10-05 12:07:55 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return ntfn.Event, nil
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:25 +03:00
|
|
|
// Since the outpoint was spent, as it no longer exists within the UTXO
|
|
|
|
// set, we'll determine when it happened by scanning the chain. We'll
|
|
|
|
// begin by fetching the block hash of our starting height.
|
2018-10-05 12:07:55 +03:00
|
|
|
startHash, err := b.chainConn.GetBlockHash(
|
2019-08-17 05:56:26 +03:00
|
|
|
int64(ntfn.HistoricalDispatch.StartHeight),
|
2018-10-05 12:07:55 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to get block hash for height "+
|
2019-08-17 05:56:26 +03:00
|
|
|
"%d: %v", ntfn.HistoricalDispatch.StartHeight, err)
|
2018-10-05 12:07:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// As a minimal optimization, we'll query the backend's transaction
|
|
|
|
// index (if enabled) to determine if we have a better rescan starting
|
|
|
|
// height. We can do this as the GetRawTransaction call will return the
|
|
|
|
// hash of the block it was included in within the chain.
|
2019-08-17 05:56:26 +03:00
|
|
|
tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash)
|
2018-10-05 12:07:55 +03:00
|
|
|
if err != nil {
|
|
|
|
// Avoid returning an error if the transaction was not found to
|
|
|
|
// proceed with fallback methods.
|
|
|
|
jsonErr, ok := err.(*btcjson.RPCError)
|
|
|
|
if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo {
|
2018-12-07 08:14:25 +03:00
|
|
|
return nil, fmt.Errorf("unable to query for txid %v: %v",
|
2019-08-17 05:56:26 +03:00
|
|
|
outpoint.Hash, err)
|
2018-10-05 12:07:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the transaction index was enabled, we'll use the block's hash to
|
|
|
|
// retrieve its height and check whether it provides a better starting
|
|
|
|
// point for our rescan.
|
|
|
|
if tx != nil {
|
|
|
|
// If the transaction containing the outpoint hasn't confirmed
|
|
|
|
// on-chain, then there's no need to perform a rescan.
|
|
|
|
if tx.BlockHash == "" {
|
|
|
|
return ntfn.Event, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
blockHash, err := chainhash.NewHashFromStr(tx.BlockHash)
|
2018-08-15 03:55:29 +03:00
|
|
|
if err != nil {
|
2018-10-05 12:07:55 +03:00
|
|
|
return nil, err
|
2018-08-15 03:55:29 +03:00
|
|
|
}
|
2018-10-05 12:07:55 +03:00
|
|
|
blockHeader, err := b.chainConn.GetBlockHeaderVerbose(blockHash)
|
2016-11-30 11:00:20 +03:00
|
|
|
if err != nil {
|
2018-10-05 12:07:55 +03:00
|
|
|
return nil, fmt.Errorf("unable to get header for "+
|
|
|
|
"block %v: %v", blockHash, err)
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
|
|
|
|
2019-08-17 05:56:26 +03:00
|
|
|
if uint32(blockHeader.Height) > ntfn.HistoricalDispatch.StartHeight {
|
2018-10-05 12:07:55 +03:00
|
|
|
startHash, err = b.chainConn.GetBlockHash(
|
|
|
|
int64(blockHeader.Height),
|
2018-02-26 21:37:58 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
2018-10-05 12:07:55 +03:00
|
|
|
return nil, fmt.Errorf("unable to get block "+
|
|
|
|
"hash for height %d: %v",
|
|
|
|
blockHeader.Height, err)
|
2017-09-12 18:11:21 +03:00
|
|
|
}
|
2016-11-30 11:00:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:25 +03:00
|
|
|
// Now that we've determined the best starting point for our rescan,
|
|
|
|
// we can go ahead and dispatch it.
|
|
|
|
//
|
2018-10-05 12:07:55 +03:00
|
|
|
// In order to ensure that we don't block the caller on what may be a
|
|
|
|
// long rescan, we'll launch a new goroutine to handle the async result
|
|
|
|
// of the rescan. We purposefully prevent from adding this goroutine to
|
2019-05-05 01:35:37 +03:00
|
|
|
// the WaitGroup as we cannot wait for a quit signal due to the
|
2018-10-05 12:07:55 +03:00
|
|
|
// asyncResult channel not being exposed.
|
|
|
|
//
|
|
|
|
// TODO(wilmer): add retry logic if rescan fails?
|
2018-12-07 08:14:25 +03:00
|
|
|
asyncResult := b.chainConn.RescanAsync(
|
2019-08-17 05:56:26 +03:00
|
|
|
startHash, nil, []*wire.OutPoint{outpoint},
|
2018-12-07 08:14:25 +03:00
|
|
|
)
|
2018-10-05 12:07:55 +03:00
|
|
|
go func() {
|
|
|
|
if rescanErr := asyncResult.Receive(); rescanErr != nil {
|
|
|
|
chainntnfs.Log.Errorf("Rescan to determine the spend "+
|
2019-08-17 05:56:26 +03:00
|
|
|
"details of %v failed: %v", outpoint, rescanErr)
|
2018-10-05 12:07:55 +03:00
|
|
|
}
|
|
|
|
}()
|
2017-07-30 05:19:28 +03:00
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
return ntfn.Event, nil
|
2016-02-17 01:46:18 +03:00
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:16 +03:00
|
|
|
// RegisterConfirmationsNtfn registers an intent to be notified once the target
|
|
|
|
// txid/output script has reached numConfs confirmations on-chain. When
|
|
|
|
// intending to be notified of the confirmation of an output script, a nil txid
|
|
|
|
// must be used. The heightHint should represent the earliest height at which
|
|
|
|
// the txid/output script could have been included in the chain.
|
|
|
|
//
|
|
|
|
// Progress on the number of confirmations left can be read from the 'Updates'
|
|
|
|
// channel. Once it has reached all of its confirmations, a notification will be
|
|
|
|
// sent across the 'Confirmed' channel.
|
|
|
|
func (b *BtcdNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
|
|
|
|
pkScript []byte,
|
2018-02-26 21:37:58 +03:00
|
|
|
numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, error) {
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
// Register the conf notification with the TxNotifier. A non-nil value
|
2018-08-27 07:36:45 +03:00
|
|
|
// for `dispatch` will be returned if we are required to perform a
|
|
|
|
// manual scan for the confirmation. Otherwise the notifier will begin
|
|
|
|
// watching at tip for the transaction to confirm.
|
2019-08-17 05:55:26 +03:00
|
|
|
ntfn, err := b.txNotifier.RegisterConf(
|
|
|
|
txid, pkScript, numConfs, heightHint,
|
|
|
|
)
|
2018-08-27 07:36:45 +03:00
|
|
|
if err != nil {
|
2018-07-27 07:32:55 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-08-17 05:55:26 +03:00
|
|
|
if ntfn.HistoricalDispatch == nil {
|
2018-08-27 07:36:45 +03:00
|
|
|
return ntfn.Event, nil
|
|
|
|
}
|
|
|
|
|
2016-12-15 12:07:12 +03:00
|
|
|
select {
|
2019-08-17 05:55:26 +03:00
|
|
|
case b.notificationRegistry <- ntfn.HistoricalDispatch:
|
2017-11-13 23:42:50 +03:00
|
|
|
return ntfn.Event, nil
|
2018-07-27 07:32:55 +03:00
|
|
|
case <-b.quit:
|
2018-12-11 05:25:41 +03:00
|
|
|
return nil, chainntnfs.ErrChainNotifierShuttingDown
|
2016-12-15 12:07:12 +03:00
|
|
|
}
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
2016-06-21 07:31:05 +03:00
|
|
|
|
2016-09-08 21:27:07 +03:00
|
|
|
// blockEpochRegistration represents a client's intent to receive a
|
|
|
|
// notification with each newly connected block.
|
|
|
|
type blockEpochRegistration struct {
|
2017-06-08 03:04:27 +03:00
|
|
|
epochID uint64
|
|
|
|
|
2016-09-08 21:27:07 +03:00
|
|
|
epochChan chan *chainntnfs.BlockEpoch
|
2017-02-21 03:31:16 +03:00
|
|
|
|
2018-10-12 18:08:14 +03:00
|
|
|
epochQueue *queue.ConcurrentQueue
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
|
2018-08-09 10:05:27 +03:00
|
|
|
bestBlock *chainntnfs.BlockEpoch
|
|
|
|
|
|
|
|
errorChan chan error
|
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
cancelChan chan struct{}
|
|
|
|
|
2017-06-08 03:04:27 +03:00
|
|
|
wg sync.WaitGroup
|
2017-02-21 03:31:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// epochCancel is a message sent to the BtcdNotifier when a client wishes to
|
|
|
|
// cancel an outstanding epoch notification that has yet to be dispatched.
|
|
|
|
type epochCancel struct {
|
|
|
|
epochID uint64
|
2016-09-08 21:27:07 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 07:31:05 +03:00
|
|
|
// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the
|
2017-02-21 03:31:16 +03:00
|
|
|
// caller to receive notifications, of each new block connected to the main
|
2018-08-09 10:05:27 +03:00
|
|
|
// chain. Clients have the option of passing in their best known block, which
|
2018-12-11 05:29:28 +03:00
|
|
|
// the notifier uses to check if they are behind on blocks and catch them up. If
|
|
|
|
// they do not provide one, then a notification will be dispatched immediately
|
|
|
|
// for the current tip of the chain upon a successful registration.
|
2018-08-09 10:05:27 +03:00
|
|
|
func (b *BtcdNotifier) RegisterBlockEpochNtfn(
|
|
|
|
bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg := &blockEpochRegistration{
|
2018-10-12 18:08:14 +03:00
|
|
|
epochQueue: queue.NewConcurrentQueue(20),
|
2017-05-06 01:53:09 +03:00
|
|
|
epochChan: make(chan *chainntnfs.BlockEpoch, 20),
|
|
|
|
cancelChan: make(chan struct{}),
|
|
|
|
epochID: atomic.AddUint64(&b.epochClientCounter, 1),
|
2018-08-09 10:05:27 +03:00
|
|
|
bestBlock: bestBlock,
|
|
|
|
errorChan: make(chan error, 1),
|
2016-09-08 21:27:07 +03:00
|
|
|
}
|
2018-12-11 05:29:28 +03:00
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg.epochQueue.Start()
|
|
|
|
|
|
|
|
// Before we send the request to the main goroutine, we'll launch a new
|
|
|
|
// goroutine to proxy items added to our queue to the client itself.
|
|
|
|
// This ensures that all notifications are received *in order*.
|
|
|
|
reg.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer reg.wg.Done()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ntfn := <-reg.epochQueue.ChanOut():
|
|
|
|
blockNtfn := ntfn.(*chainntnfs.BlockEpoch)
|
|
|
|
select {
|
|
|
|
case reg.epochChan <- blockNtfn:
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-09-08 21:27:07 +03:00
|
|
|
|
2016-12-15 12:07:12 +03:00
|
|
|
select {
|
|
|
|
case <-b.quit:
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// As we're exiting before the registration could be sent,
|
|
|
|
// we'll stop the queue now ourselves.
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
2016-12-15 12:07:12 +03:00
|
|
|
return nil, errors.New("chainntnfs: system interrupt while " +
|
|
|
|
"attempting to register for block epoch notification.")
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case b.notificationRegistry <- reg:
|
2016-12-15 12:07:12 +03:00
|
|
|
return &chainntnfs.BlockEpochEvent{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
Epochs: reg.epochChan,
|
2017-02-21 03:31:16 +03:00
|
|
|
Cancel: func() {
|
2017-04-27 07:08:16 +03:00
|
|
|
cancel := &epochCancel{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
epochID: reg.epochID,
|
2017-04-27 07:08:16 +03:00
|
|
|
}
|
|
|
|
|
2017-07-30 05:19:28 +03:00
|
|
|
// Submit epoch cancellation to notification dispatcher.
|
2017-04-27 07:08:16 +03:00
|
|
|
select {
|
|
|
|
case b.notificationCancels <- cancel:
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// Cancellation is being handled, drain
|
|
|
|
// the epoch channel until it is closed
|
|
|
|
// before yielding to caller.
|
2017-07-30 06:28:48 +03:00
|
|
|
for {
|
|
|
|
select {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case _, ok := <-reg.epochChan:
|
2017-07-30 06:28:48 +03:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-b.quit:
|
2017-08-02 03:14:01 +03:00
|
|
|
return
|
2017-07-30 06:28:48 +03:00
|
|
|
}
|
2017-04-27 07:08:16 +03:00
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
case <-b.quit:
|
|
|
|
}
|
|
|
|
},
|
2016-12-15 12:07:12 +03:00
|
|
|
}, nil
|
|
|
|
}
|
2016-06-21 07:31:05 +03:00
|
|
|
}
|