2016-01-07 00:03:17 +03:00
|
|
|
package btcdnotify
|
|
|
|
|
|
|
|
import (
|
2016-12-15 12:07:12 +03:00
|
|
|
"errors"
|
2017-11-13 23:42:50 +03:00
|
|
|
"fmt"
|
2018-07-19 00:01:28 +03:00
|
|
|
"strings"
|
2016-01-07 00:03:17 +03:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2016-02-27 03:30:14 +03:00
|
|
|
"time"
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/btcjson"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/rpcclient"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
2018-07-18 11:31:49 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2018-10-12 18:08:14 +03:00
|
|
|
"github.com/lightningnetwork/lnd/queue"
|
2016-01-07 00:03:17 +03:00
|
|
|
)
|
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
const (
|
|
|
|
// notifierType uniquely identifies this concrete implementation of the
|
|
|
|
// ChainNotifier interface.
|
|
|
|
notifierType = "btcd"
|
2017-11-13 23:42:50 +03:00
|
|
|
|
|
|
|
// reorgSafetyLimit is assumed maximum depth of a chain reorganization.
|
|
|
|
// After this many confirmation, transaction confirmation info will be
|
|
|
|
// pruned.
|
|
|
|
reorgSafetyLimit = 100
|
2016-08-04 08:13:10 +03:00
|
|
|
)
|
|
|
|
|
2016-12-14 02:32:44 +03:00
|
|
|
var (
|
2017-02-24 16:32:33 +03:00
|
|
|
// ErrChainNotifierShuttingDown is used when we are trying to
|
|
|
|
// measure a spend notification when notifier is already stopped.
|
2016-12-14 02:32:44 +03:00
|
|
|
ErrChainNotifierShuttingDown = errors.New("chainntnfs: system interrupt " +
|
|
|
|
"while attempting to register for spend notification.")
|
|
|
|
)
|
|
|
|
|
2016-09-23 05:12:12 +03:00
|
|
|
// chainUpdate encapsulates an update to the current main chain. This struct is
|
|
|
|
// used as an element within an unbounded queue in order to avoid blocking the
|
|
|
|
// main rpc dispatch rule.
|
|
|
|
type chainUpdate struct {
|
2017-01-06 00:56:27 +03:00
|
|
|
blockHash *chainhash.Hash
|
2016-09-23 05:12:12 +03:00
|
|
|
blockHeight int32
|
2017-11-10 02:30:38 +03:00
|
|
|
|
|
|
|
// connected is true if this update is a new block and false if it is a
|
|
|
|
// disconnected block.
|
|
|
|
connect bool
|
2016-09-23 05:12:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// txUpdate encapsulates a transaction related notification sent from btcd to
|
|
|
|
// the registered RPC client. This struct is used as an element within an
|
|
|
|
// unbounded queue in order to avoid blocking the main rpc dispatch rule.
|
|
|
|
type txUpdate struct {
|
|
|
|
tx *btcutil.Tx
|
|
|
|
details *btcjson.BlockDetails
|
|
|
|
}
|
|
|
|
|
2017-02-21 03:31:16 +03:00
|
|
|
// TODO(roasbeef): generalize struct below:
|
|
|
|
// * move chans to config, allow outside callers to handle send conditions
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// BtcdNotifier implements the ChainNotifier interface using btcd's websockets
|
|
|
|
// notifications. Multiple concurrent clients are supported. All notifications
|
|
|
|
// are achieved via non-blocking sends on client channels.
|
2016-01-07 00:03:17 +03:00
|
|
|
type BtcdNotifier struct {
|
2018-07-27 07:30:15 +03:00
|
|
|
confClientCounter uint64 // To be used aotmically.
|
2017-02-21 03:31:16 +03:00
|
|
|
spendClientCounter uint64 // To be used atomically.
|
|
|
|
epochClientCounter uint64 // To be used atomically.
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
started int32 // To be used atomically.
|
|
|
|
stopped int32 // To be used atomically.
|
2016-02-17 01:46:18 +03:00
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
chainConn *rpcclient.Client
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2017-02-21 03:31:16 +03:00
|
|
|
notificationCancels chan interface{}
|
2016-01-07 00:03:17 +03:00
|
|
|
notificationRegistry chan interface{}
|
|
|
|
|
2017-02-21 03:31:16 +03:00
|
|
|
spendNotifications map[wire.OutPoint]map[uint64]*spendNotification
|
2016-09-08 21:27:07 +03:00
|
|
|
|
2017-11-13 23:42:50 +03:00
|
|
|
txConfNotifier *chainntnfs.TxConfNotifier
|
2016-09-08 21:27:07 +03:00
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
blockEpochClients map[uint64]*blockEpochRegistration
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
bestBlock chainntnfs.BlockEpoch
|
|
|
|
|
2018-10-12 18:08:14 +03:00
|
|
|
chainUpdates *queue.ConcurrentQueue
|
|
|
|
txUpdates *queue.ConcurrentQueue
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-08-15 03:53:34 +03:00
|
|
|
// spendHintCache is a cache used to query and update the latest height
|
|
|
|
// hints for an outpoint. Each height hint represents the earliest
|
|
|
|
// height at which the outpoint could have been spent within the chain.
|
|
|
|
spendHintCache chainntnfs.SpendHintCache
|
|
|
|
|
|
|
|
// confirmHintCache is a cache used to query the latest height hints for
|
|
|
|
// a transaction. Each height hint represents the earliest height at
|
|
|
|
// which the transaction could have confirmed within the chain.
|
|
|
|
confirmHintCache chainntnfs.ConfirmHintCache
|
|
|
|
|
2016-02-17 01:46:18 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
quit chan struct{}
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Ensure BtcdNotifier implements the ChainNotifier interface at compile time.
|
2016-01-07 00:03:17 +03:00
|
|
|
var _ chainntnfs.ChainNotifier = (*BtcdNotifier)(nil)
|
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
// New returns a new BtcdNotifier instance. This function assumes the btcd node
|
2016-11-28 06:17:27 +03:00
|
|
|
// detailed in the passed configuration is already running, and willing to
|
|
|
|
// accept new websockets clients.
|
2018-08-15 03:53:34 +03:00
|
|
|
func New(config *rpcclient.ConnConfig, spendHintCache chainntnfs.SpendHintCache,
|
|
|
|
confirmHintCache chainntnfs.ConfirmHintCache) (*BtcdNotifier, error) {
|
|
|
|
|
2016-02-27 03:30:14 +03:00
|
|
|
notifier := &BtcdNotifier{
|
2017-02-21 03:31:16 +03:00
|
|
|
notificationCancels: make(chan interface{}),
|
2016-01-07 00:03:17 +03:00
|
|
|
notificationRegistry: make(chan interface{}),
|
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
blockEpochClients: make(map[uint64]*blockEpochRegistration),
|
2017-02-21 03:31:16 +03:00
|
|
|
|
|
|
|
spendNotifications: make(map[wire.OutPoint]map[uint64]*spendNotification),
|
|
|
|
|
2018-10-12 18:08:14 +03:00
|
|
|
chainUpdates: queue.NewConcurrentQueue(10),
|
|
|
|
txUpdates: queue.NewConcurrentQueue(10),
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-08-15 03:53:34 +03:00
|
|
|
spendHintCache: spendHintCache,
|
|
|
|
confirmHintCache: confirmHintCache,
|
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
quit: make(chan struct{}),
|
2016-02-27 03:30:14 +03:00
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
ntfnCallbacks := &rpcclient.NotificationHandlers{
|
2016-02-27 03:30:14 +03:00
|
|
|
OnBlockConnected: notifier.onBlockConnected,
|
|
|
|
OnBlockDisconnected: notifier.onBlockDisconnected,
|
|
|
|
OnRedeemingTx: notifier.onRedeemingTx,
|
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
// Disable connecting to btcd within the rpcclient.New method. We
|
2016-08-04 08:13:10 +03:00
|
|
|
// defer establishing the connection to our .Start() method.
|
2016-02-27 03:30:14 +03:00
|
|
|
config.DisableConnectOnNew = true
|
|
|
|
config.DisableAutoReconnect = false
|
2017-08-25 04:54:17 +03:00
|
|
|
chainConn, err := rpcclient.New(config, ntfnCallbacks)
|
2016-02-27 03:30:14 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
notifier.chainConn = chainConn
|
|
|
|
|
|
|
|
return notifier, nil
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Start connects to the running btcd node over websockets, registers for block
|
|
|
|
// notifications, and finally launches all related helper goroutines.
|
2016-01-07 00:03:17 +03:00
|
|
|
func (b *BtcdNotifier) Start() error {
|
|
|
|
// Already started?
|
|
|
|
if atomic.AddInt32(&b.started, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Connect to btcd, and register for notifications on connected, and
|
|
|
|
// disconnected blocks.
|
2016-02-27 03:30:14 +03:00
|
|
|
if err := b.chainConn.Connect(20); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := b.chainConn.NotifyBlocks(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
currentHash, currentHeight, err := b.chainConn.GetBestBlock()
|
2016-12-25 03:42:13 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-13 23:42:50 +03:00
|
|
|
b.txConfNotifier = chainntnfs.NewTxConfNotifier(
|
2018-08-15 03:53:34 +03:00
|
|
|
uint32(currentHeight), reorgSafetyLimit, b.confirmHintCache,
|
|
|
|
)
|
2017-11-13 23:42:50 +03:00
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
b.bestBlock = chainntnfs.BlockEpoch{
|
|
|
|
Height: currentHeight,
|
|
|
|
Hash: currentHash,
|
|
|
|
}
|
|
|
|
|
2017-11-10 01:59:14 +03:00
|
|
|
b.chainUpdates.Start()
|
|
|
|
b.txUpdates.Start()
|
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
b.wg.Add(1)
|
2018-08-09 10:05:28 +03:00
|
|
|
go b.notificationDispatcher()
|
2016-01-07 00:03:17 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Stop shutsdown the BtcdNotifier.
|
2016-01-07 00:03:17 +03:00
|
|
|
func (b *BtcdNotifier) Stop() error {
|
|
|
|
// Already shutting down?
|
|
|
|
if atomic.AddInt32(&b.stopped, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Shutdown the rpc client, this gracefully disconnects from btcd, and
|
|
|
|
// cleans up all related resources.
|
2016-02-27 03:30:14 +03:00
|
|
|
b.chainConn.Shutdown()
|
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
close(b.quit)
|
|
|
|
b.wg.Wait()
|
|
|
|
|
2017-11-10 01:59:14 +03:00
|
|
|
b.chainUpdates.Stop()
|
|
|
|
b.txUpdates.Stop()
|
|
|
|
|
2016-02-27 04:04:14 +03:00
|
|
|
// Notify all pending clients of our shutdown by closing the related
|
|
|
|
// notification channels.
|
2016-11-28 06:17:27 +03:00
|
|
|
for _, spendClients := range b.spendNotifications {
|
|
|
|
for _, spendClient := range spendClients {
|
|
|
|
close(spendClient.spendChan)
|
|
|
|
}
|
2016-02-27 04:04:14 +03:00
|
|
|
}
|
2016-12-16 00:53:36 +03:00
|
|
|
for _, epochClient := range b.blockEpochClients {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
close(epochClient.cancelChan)
|
|
|
|
epochClient.wg.Wait()
|
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
close(epochClient.epochChan)
|
2016-12-16 00:53:36 +03:00
|
|
|
}
|
2017-11-13 23:42:50 +03:00
|
|
|
b.txConfNotifier.TearDown()
|
2016-02-27 04:04:14 +03:00
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
// onBlockConnected implements on OnBlockConnected callback for rpcclient.
|
2016-09-23 05:12:12 +03:00
|
|
|
// Ingesting a block updates the wallet's internal utxo state based on the
|
|
|
|
// outputs created and destroyed within each block.
|
2017-01-06 00:56:27 +03:00
|
|
|
func (b *BtcdNotifier) onBlockConnected(hash *chainhash.Hash, height int32, t time.Time) {
|
2016-09-23 05:12:12 +03:00
|
|
|
// Append this new chain update to the end of the queue of new chain
|
|
|
|
// updates.
|
2017-11-10 02:30:38 +03:00
|
|
|
b.chainUpdates.ChanIn() <- &chainUpdate{
|
|
|
|
blockHash: hash,
|
|
|
|
blockHeight: height,
|
|
|
|
connect: true,
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
}
|
|
|
|
|
2018-03-20 18:17:23 +03:00
|
|
|
// filteredBlock represents a new block which has been connected to the main
|
|
|
|
// chain. The slice of transactions will only be populated if the block
|
|
|
|
// includes a transaction that confirmed one of our watched txids, or spends
|
|
|
|
// one of the outputs currently being watched.
|
|
|
|
// TODO(halseth): this is currently used for complete blocks. Change to use
|
|
|
|
// onFilteredBlockConnected and onFilteredBlockDisconnected, making it easier
|
|
|
|
// to unify with the Neutrino implementation.
|
|
|
|
type filteredBlock struct {
|
|
|
|
hash chainhash.Hash
|
|
|
|
height uint32
|
|
|
|
txns []*btcutil.Tx
|
|
|
|
|
|
|
|
// connected is true if this update is a new block and false if it is a
|
|
|
|
// disconnected block.
|
|
|
|
connect bool
|
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
// onBlockDisconnected implements on OnBlockDisconnected callback for rpcclient.
|
2017-01-06 00:56:27 +03:00
|
|
|
func (b *BtcdNotifier) onBlockDisconnected(hash *chainhash.Hash, height int32, t time.Time) {
|
2017-11-10 02:30:38 +03:00
|
|
|
// Append this new chain update to the end of the queue of new chain
|
|
|
|
// updates.
|
|
|
|
b.chainUpdates.ChanIn() <- &chainUpdate{
|
|
|
|
blockHash: hash,
|
|
|
|
blockHeight: height,
|
|
|
|
connect: false,
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
// onRedeemingTx implements on OnRedeemingTx callback for rpcclient.
|
2016-09-23 05:12:12 +03:00
|
|
|
func (b *BtcdNotifier) onRedeemingTx(tx *btcutil.Tx, details *btcjson.BlockDetails) {
|
2016-11-28 06:17:27 +03:00
|
|
|
// Append this new transaction update to the end of the queue of new
|
|
|
|
// chain updates.
|
2017-11-10 01:59:14 +03:00
|
|
|
b.txUpdates.ChanIn() <- &txUpdate{tx, details}
|
2016-02-27 03:30:14 +03:00
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// notificationDispatcher is the primary goroutine which handles client
|
|
|
|
// notification registrations, as well as notification dispatches.
|
2018-08-09 10:05:28 +03:00
|
|
|
func (b *BtcdNotifier) notificationDispatcher() {
|
2016-01-07 00:03:17 +03:00
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
2017-02-21 03:31:16 +03:00
|
|
|
case cancelMsg := <-b.notificationCancels:
|
|
|
|
switch msg := cancelMsg.(type) {
|
|
|
|
case *spendCancel:
|
|
|
|
chainntnfs.Log.Infof("Cancelling spend "+
|
|
|
|
"notification for out_point=%v, "+
|
|
|
|
"spend_id=%v", msg.op, msg.spendID)
|
|
|
|
|
|
|
|
// Before we attempt to close the spendChan,
|
|
|
|
// ensure that the notification hasn't already
|
|
|
|
// yet been dispatched.
|
|
|
|
if outPointClients, ok := b.spendNotifications[msg.op]; ok {
|
|
|
|
close(outPointClients[msg.spendID].spendChan)
|
|
|
|
delete(b.spendNotifications[msg.op], msg.spendID)
|
|
|
|
}
|
2017-07-30 05:19:28 +03:00
|
|
|
|
2017-02-21 03:31:16 +03:00
|
|
|
case *epochCancel:
|
|
|
|
chainntnfs.Log.Infof("Cancelling epoch "+
|
|
|
|
"notification, epoch_id=%v", msg.epochID)
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// First, we'll lookup the original
|
|
|
|
// registration in order to stop the active
|
|
|
|
// queue goroutine.
|
|
|
|
reg := b.blockEpochClients[msg.epochID]
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
|
|
|
// Next, close the cancel channel for this
|
2017-06-08 03:04:27 +03:00
|
|
|
// specific client, and wait for the client to
|
|
|
|
// exit.
|
2017-05-06 01:53:09 +03:00
|
|
|
close(b.blockEpochClients[msg.epochID].cancelChan)
|
2017-06-08 03:04:27 +03:00
|
|
|
b.blockEpochClients[msg.epochID].wg.Wait()
|
|
|
|
|
|
|
|
// Once the client has exited, we can then
|
|
|
|
// safely close the channel used to send epoch
|
|
|
|
// notifications, in order to notify any
|
|
|
|
// listeners that the intent has been
|
|
|
|
// cancelled.
|
2017-05-06 01:53:09 +03:00
|
|
|
close(b.blockEpochClients[msg.epochID].epochChan)
|
2017-02-21 03:31:16 +03:00
|
|
|
delete(b.blockEpochClients, msg.epochID)
|
|
|
|
}
|
2016-01-07 00:03:17 +03:00
|
|
|
case registerMsg := <-b.notificationRegistry:
|
|
|
|
switch msg := registerMsg.(type) {
|
|
|
|
case *spendNotification:
|
2016-09-08 21:27:07 +03:00
|
|
|
chainntnfs.Log.Infof("New spend subscription: "+
|
|
|
|
"utxo=%v", msg.targetOutpoint)
|
2016-11-28 06:17:27 +03:00
|
|
|
op := *msg.targetOutpoint
|
2017-02-21 03:31:16 +03:00
|
|
|
|
|
|
|
if _, ok := b.spendNotifications[op]; !ok {
|
|
|
|
b.spendNotifications[op] = make(map[uint64]*spendNotification)
|
|
|
|
}
|
|
|
|
b.spendNotifications[op][msg.spendID] = msg
|
2018-07-27 07:32:55 +03:00
|
|
|
|
2018-08-27 07:36:45 +03:00
|
|
|
case *chainntnfs.HistoricalConfDispatch:
|
2018-07-27 07:32:55 +03:00
|
|
|
// Look up whether the transaction is already
|
|
|
|
// included in the active chain. We'll do this
|
|
|
|
// in a goroutine to prevent blocking
|
|
|
|
// potentially long rescans.
|
|
|
|
b.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer b.wg.Done()
|
|
|
|
|
2018-07-19 00:01:28 +03:00
|
|
|
confDetails, _, err := b.historicalConfDetails(
|
2018-08-27 07:36:45 +03:00
|
|
|
msg.TxID, msg.StartHeight, msg.EndHeight,
|
2018-07-27 07:32:55 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-08-25 05:35:17 +03:00
|
|
|
// If the historical dispatch finished
|
|
|
|
// without error, we will invoke
|
|
|
|
// UpdateConfDetails even if none were
|
|
|
|
// found. This allows the notifier to
|
|
|
|
// begin safely updating the height hint
|
|
|
|
// cache at tip, since any pending
|
|
|
|
// rescans have now completed.
|
|
|
|
err = b.txConfNotifier.UpdateConfDetails(
|
2018-08-27 07:36:45 +03:00
|
|
|
*msg.TxID, confDetails,
|
2018-08-25 05:35:17 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
2018-07-27 07:32:55 +03:00
|
|
|
}
|
|
|
|
}()
|
2018-02-26 21:37:58 +03:00
|
|
|
|
2016-09-08 21:27:07 +03:00
|
|
|
case *blockEpochRegistration:
|
|
|
|
chainntnfs.Log.Infof("New block epoch subscription")
|
2017-05-06 01:53:09 +03:00
|
|
|
b.blockEpochClients[msg.epochID] = msg
|
2018-08-09 10:05:29 +03:00
|
|
|
if msg.bestBlock != nil {
|
|
|
|
missedBlocks, err :=
|
|
|
|
chainntnfs.GetClientMissedBlocks(
|
|
|
|
b.chainConn, msg.bestBlock,
|
|
|
|
b.bestBlock.Height, true,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
msg.errorChan <- err
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, block := range missedBlocks {
|
|
|
|
b.notifyBlockEpochClient(msg,
|
|
|
|
block.Height, block.Hash)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
msg.errorChan <- nil
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
|
2017-11-10 01:59:14 +03:00
|
|
|
case item := <-b.chainUpdates.ChanOut():
|
|
|
|
update := item.(*chainUpdate)
|
2017-11-10 02:30:38 +03:00
|
|
|
if update.connect {
|
2018-08-09 10:05:30 +03:00
|
|
|
blockHeader, err :=
|
|
|
|
b.chainConn.GetBlockHeader(update.blockHash)
|
2017-11-10 02:30:38 +03:00
|
|
|
if err != nil {
|
2018-08-09 10:05:30 +03:00
|
|
|
chainntnfs.Log.Errorf("Unable to fetch "+
|
|
|
|
"block header: %v", err)
|
2017-11-10 02:30:38 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
if blockHeader.PrevBlock != *b.bestBlock.Hash {
|
|
|
|
// Handle the case where the notifier
|
|
|
|
// missed some blocks from its chain
|
|
|
|
// backend
|
|
|
|
chainntnfs.Log.Infof("Missed blocks, " +
|
|
|
|
"attempting to catch up")
|
|
|
|
newBestBlock, missedBlocks, err :=
|
|
|
|
chainntnfs.HandleMissedBlocks(
|
|
|
|
b.chainConn,
|
|
|
|
b.txConfNotifier,
|
|
|
|
b.bestBlock,
|
|
|
|
update.blockHeight,
|
|
|
|
true,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
// Set the bestBlock here in case
|
|
|
|
// a catch up partially completed.
|
|
|
|
b.bestBlock = newBestBlock
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
continue
|
|
|
|
}
|
2017-11-10 02:30:38 +03:00
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
for _, block := range missedBlocks {
|
|
|
|
err := b.handleBlockConnected(block)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
continue out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-03-20 18:17:23 +03:00
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
newBlock := chainntnfs.BlockEpoch{
|
|
|
|
Height: update.blockHeight,
|
|
|
|
Hash: update.blockHash,
|
2018-03-20 18:17:23 +03:00
|
|
|
}
|
2018-08-09 10:05:30 +03:00
|
|
|
if err := b.handleBlockConnected(newBlock); err != nil {
|
2017-11-13 23:42:50 +03:00
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
2017-12-10 21:34:49 +03:00
|
|
|
continue
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
if update.blockHeight != b.bestBlock.Height {
|
2018-08-09 10:05:29 +03:00
|
|
|
chainntnfs.Log.Infof("Missed disconnected" +
|
|
|
|
"blocks, attempting to catch up")
|
2017-12-10 21:34:49 +03:00
|
|
|
}
|
2017-11-10 02:30:38 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
newBestBlock, err := chainntnfs.RewindChain(
|
|
|
|
b.chainConn, b.txConfNotifier, b.bestBlock,
|
|
|
|
update.blockHeight-1,
|
|
|
|
)
|
2017-12-10 21:34:49 +03:00
|
|
|
if err != nil {
|
2018-08-09 10:05:29 +03:00
|
|
|
chainntnfs.Log.Errorf("Unable to rewind chain "+
|
|
|
|
"from height %d to height %d: %v",
|
|
|
|
b.bestBlock.Height, update.blockHeight-1, err)
|
2017-11-10 02:30:38 +03:00
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// Set the bestBlock here in case a chain rewind
|
|
|
|
// partially completed.
|
|
|
|
b.bestBlock = newBestBlock
|
|
|
|
|
2018-04-16 21:01:14 +03:00
|
|
|
// NOTE: we currently only use txUpdates for mempool spends and
|
|
|
|
// rescan spends. It might get removed entirely in the future.
|
2017-11-10 01:59:14 +03:00
|
|
|
case item := <-b.txUpdates.ChanOut():
|
|
|
|
newSpend := item.(*txUpdate)
|
2018-07-17 10:13:06 +03:00
|
|
|
|
|
|
|
// We only care about notifying on confirmed spends, so
|
|
|
|
// in case this is a mempool spend, we can continue,
|
|
|
|
// and wait for the spend to appear in chain.
|
|
|
|
if newSpend.details == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-09-23 05:12:12 +03:00
|
|
|
spendingTx := newSpend.tx
|
|
|
|
|
2016-02-27 03:30:14 +03:00
|
|
|
// First, check if this transaction spends an output
|
|
|
|
// that has an existing spend notification for it.
|
2016-09-23 05:12:12 +03:00
|
|
|
for i, txIn := range spendingTx.MsgTx().TxIn {
|
2016-02-27 03:30:14 +03:00
|
|
|
prevOut := txIn.PreviousOutPoint
|
|
|
|
|
|
|
|
// If this transaction indeed does spend an
|
2016-11-28 06:17:27 +03:00
|
|
|
// output which we have a registered
|
|
|
|
// notification for, then create a spend
|
|
|
|
// summary, finally sending off the details to
|
|
|
|
// the notification subscriber.
|
|
|
|
if clients, ok := b.spendNotifications[prevOut]; ok {
|
2017-01-06 00:56:27 +03:00
|
|
|
spenderSha := newSpend.tx.Hash()
|
2017-05-11 02:52:27 +03:00
|
|
|
spendDetails := &chainntnfs.SpendDetail{
|
|
|
|
SpentOutPoint: &prevOut,
|
|
|
|
SpenderTxHash: spenderSha,
|
|
|
|
SpendingTx: spendingTx.MsgTx(),
|
|
|
|
SpenderInputIndex: uint32(i),
|
|
|
|
}
|
2018-07-17 10:13:06 +03:00
|
|
|
spendDetails.SpendingHeight = newSpend.details.Height
|
2016-11-28 06:17:27 +03:00
|
|
|
|
2018-07-17 10:13:06 +03:00
|
|
|
for _, ntfn := range clients {
|
|
|
|
chainntnfs.Log.Infof("Dispatching "+
|
|
|
|
"confirmed spend "+
|
|
|
|
"notification for "+
|
2018-04-16 21:01:14 +03:00
|
|
|
"outpoint=%v at height %v",
|
2018-07-17 10:13:06 +03:00
|
|
|
ntfn.targetOutpoint,
|
2018-04-16 21:01:14 +03:00
|
|
|
spendDetails.SpendingHeight)
|
2016-11-28 06:17:27 +03:00
|
|
|
ntfn.spendChan <- spendDetails
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-07-17 10:13:06 +03:00
|
|
|
// Close spendChan to ensure
|
|
|
|
// that any calls to Cancel
|
|
|
|
// will not block. This is safe
|
|
|
|
// to do since the channel is
|
|
|
|
// buffered, and the message
|
|
|
|
// can still be read by the
|
|
|
|
// receiver.
|
2017-08-03 10:18:33 +03:00
|
|
|
close(ntfn.spendChan)
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
delete(b.spendNotifications, prevOut)
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
case <-b.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
b.wg.Done()
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2017-11-13 23:42:50 +03:00
|
|
|
// historicalConfDetails looks up whether a transaction is already included in a
|
|
|
|
// block in the active chain and, if so, returns details about the confirmation.
|
|
|
|
func (b *BtcdNotifier) historicalConfDetails(txid *chainhash.Hash,
|
2018-08-27 07:36:45 +03:00
|
|
|
startHeight, endHeight uint32) (*chainntnfs.TxConfirmation,
|
2018-08-24 15:13:28 +03:00
|
|
|
chainntnfs.TxConfStatus, error) {
|
2018-02-26 21:37:58 +03:00
|
|
|
|
2018-07-19 00:01:28 +03:00
|
|
|
// We'll first attempt to retrieve the transaction using the node's
|
|
|
|
// txindex.
|
|
|
|
txConf, txStatus, err := b.confDetailsFromTxIndex(txid)
|
|
|
|
|
|
|
|
// We'll then check the status of the transaction lookup returned to
|
|
|
|
// determine whether we should proceed with any fallback methods.
|
|
|
|
switch {
|
2018-08-24 15:44:41 +03:00
|
|
|
|
|
|
|
// We failed querying the index for the transaction, fall back to
|
|
|
|
// scanning manually.
|
|
|
|
case err != nil:
|
|
|
|
chainntnfs.Log.Debugf("Failed getting conf details from "+
|
|
|
|
"index (%v), scanning manually", err)
|
2018-08-27 07:36:45 +03:00
|
|
|
return b.confDetailsManually(txid, startHeight, endHeight)
|
2018-08-24 15:44:41 +03:00
|
|
|
|
2018-07-19 00:01:28 +03:00
|
|
|
// The transaction was found within the node's mempool.
|
2018-08-24 15:13:28 +03:00
|
|
|
case txStatus == chainntnfs.TxFoundMempool:
|
2018-07-19 00:01:28 +03:00
|
|
|
|
|
|
|
// The transaction was found within the node's txindex.
|
2018-08-24 15:13:28 +03:00
|
|
|
case txStatus == chainntnfs.TxFoundIndex:
|
2018-07-19 00:01:28 +03:00
|
|
|
|
|
|
|
// The transaction was not found within the node's mempool or txindex.
|
2018-08-24 15:44:41 +03:00
|
|
|
case txStatus == chainntnfs.TxNotFoundIndex:
|
2018-02-26 21:37:58 +03:00
|
|
|
|
2018-08-24 15:44:41 +03:00
|
|
|
// Unexpected txStatus returned.
|
2018-07-19 00:01:28 +03:00
|
|
|
default:
|
2018-08-24 15:44:41 +03:00
|
|
|
return nil, txStatus,
|
|
|
|
fmt.Errorf("Got unexpected txConfStatus: %v", txStatus)
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
|
|
|
|
2018-07-19 00:01:28 +03:00
|
|
|
return txConf, txStatus, nil
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
|
|
|
|
2018-08-24 14:54:36 +03:00
|
|
|
// confDetailsFromTxIndex looks up whether a transaction is already included in
|
|
|
|
// a block in the active chain by using the backend node's transaction index.
|
|
|
|
// If the transaction is found its TxConfStatus is returned. If it was found in
|
|
|
|
// the mempool this will be TxFoundMempool, if it is found in a block this will
|
|
|
|
// be TxFoundIndex. Otherwise TxNotFoundIndex is returned. If the tx is found
|
|
|
|
// in a block its confirmation details are also returned.
|
2018-02-26 21:37:58 +03:00
|
|
|
func (b *BtcdNotifier) confDetailsFromTxIndex(txid *chainhash.Hash,
|
2018-08-24 15:13:28 +03:00
|
|
|
) (*chainntnfs.TxConfirmation, chainntnfs.TxConfStatus, error) {
|
2016-12-27 06:45:01 +03:00
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
// If the transaction has some or all of its confirmations required,
|
2016-12-27 06:45:01 +03:00
|
|
|
// then we may be able to dispatch it immediately.
|
2017-11-13 23:42:50 +03:00
|
|
|
tx, err := b.chainConn.GetRawTransactionVerbose(txid)
|
2018-02-26 21:37:58 +03:00
|
|
|
if err != nil {
|
2018-09-06 11:48:46 +03:00
|
|
|
// If the transaction lookup was successful, but it wasn't found
|
2018-07-19 00:01:28 +03:00
|
|
|
// within the index itself, then we can exit early. We'll also
|
|
|
|
// need to look at the error message returned as the error code
|
|
|
|
// is used for multiple errors.
|
|
|
|
txNotFoundErr := "No information available about transaction"
|
2018-02-26 21:37:58 +03:00
|
|
|
jsonErr, ok := err.(*btcjson.RPCError)
|
2018-07-19 00:01:28 +03:00
|
|
|
if ok && jsonErr.Code == btcjson.ErrRPCNoTxInfo &&
|
|
|
|
strings.Contains(jsonErr.Message, txNotFoundErr) {
|
|
|
|
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundIndex, nil
|
2017-05-05 01:09:19 +03:00
|
|
|
}
|
2018-07-19 00:01:28 +03:00
|
|
|
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundIndex,
|
|
|
|
fmt.Errorf("unable to query for txid %v: %v", txid, err)
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure we actually retrieved a transaction that is included in a
|
2018-08-24 14:54:36 +03:00
|
|
|
// block. If not, the transaction must be unconfirmed (in the mempool),
|
|
|
|
// and we'll return TxFoundMempool together with a nil TxConfirmation.
|
|
|
|
if tx.BlockHash == "" {
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxFoundMempool, nil
|
2016-12-27 06:45:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// As we need to fully populate the returned TxConfirmation struct,
|
|
|
|
// grab the block in which the transaction was confirmed so we can
|
|
|
|
// locate its exact index within the block.
|
2017-01-06 00:56:27 +03:00
|
|
|
blockHash, err := chainhash.NewHashFromStr(tx.BlockHash)
|
2016-12-27 06:45:01 +03:00
|
|
|
if err != nil {
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundIndex,
|
|
|
|
fmt.Errorf("unable to get block hash %v for "+
|
|
|
|
"historical dispatch: %v", tx.BlockHash, err)
|
2016-12-27 06:45:01 +03:00
|
|
|
}
|
2018-02-26 21:37:58 +03:00
|
|
|
|
2017-05-16 03:47:03 +03:00
|
|
|
block, err := b.chainConn.GetBlockVerbose(blockHash)
|
2016-12-09 03:15:58 +03:00
|
|
|
if err != nil {
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundIndex,
|
|
|
|
fmt.Errorf("unable to get block with hash %v for "+
|
|
|
|
"historical dispatch: %v", blockHash, err)
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
// If the block was obtained, locate the transaction's index within the
|
2016-12-27 06:45:01 +03:00
|
|
|
// block so we can give the subscriber full confirmation details.
|
2017-11-13 23:42:50 +03:00
|
|
|
targetTxidStr := txid.String()
|
2018-02-26 21:37:58 +03:00
|
|
|
for txIndex, txHash := range block.Tx {
|
2017-05-16 03:47:03 +03:00
|
|
|
if txHash == targetTxidStr {
|
2018-07-19 00:01:28 +03:00
|
|
|
details := &chainntnfs.TxConfirmation{
|
2018-02-26 21:37:58 +03:00
|
|
|
BlockHash: blockHash,
|
|
|
|
BlockHeight: uint32(block.Height),
|
|
|
|
TxIndex: uint32(txIndex),
|
2018-07-19 00:01:28 +03:00
|
|
|
}
|
2018-08-24 15:13:28 +03:00
|
|
|
return details, chainntnfs.TxFoundIndex, nil
|
2016-12-27 06:45:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
// We return an error because we should have found the transaction
|
|
|
|
// within the block, but didn't.
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundIndex,
|
|
|
|
fmt.Errorf("unable to locate tx %v in block %v", txid,
|
|
|
|
blockHash)
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// confDetailsManually looks up whether a transaction is already included in a
|
|
|
|
// block in the active chain by scanning the chain's blocks, starting from the
|
|
|
|
// earliest height the transaction could have been included in, to the current
|
|
|
|
// height in the chain. If the transaction is found, its confirmation details
|
|
|
|
// are returned. Otherwise, nil is returned.
|
2018-08-27 07:36:45 +03:00
|
|
|
func (b *BtcdNotifier) confDetailsManually(txid *chainhash.Hash, startHeight,
|
|
|
|
endHeight uint32) (*chainntnfs.TxConfirmation,
|
2018-08-24 15:13:28 +03:00
|
|
|
chainntnfs.TxConfStatus, error) {
|
2018-02-26 21:37:58 +03:00
|
|
|
|
|
|
|
targetTxidStr := txid.String()
|
|
|
|
|
|
|
|
// Begin scanning blocks at every height to determine where the
|
|
|
|
// transaction was included in.
|
2018-08-27 07:36:45 +03:00
|
|
|
for height := startHeight; height <= endHeight; height++ {
|
2018-07-27 07:32:55 +03:00
|
|
|
// Ensure we haven't been requested to shut down before
|
|
|
|
// processing the next height.
|
|
|
|
select {
|
|
|
|
case <-b.quit:
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundManually,
|
|
|
|
ErrChainNotifierShuttingDown
|
2018-07-27 07:32:55 +03:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
blockHash, err := b.chainConn.GetBlockHash(int64(height))
|
|
|
|
if err != nil {
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundManually,
|
|
|
|
fmt.Errorf("unable to get hash from block "+
|
|
|
|
"with height %d", height)
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: fetch the neutrino filters instead.
|
|
|
|
block, err := b.chainConn.GetBlockVerbose(blockHash)
|
|
|
|
if err != nil {
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundManually,
|
|
|
|
fmt.Errorf("unable to get block with hash "+
|
|
|
|
"%v: %v", blockHash, err)
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
2016-12-25 03:42:13 +03:00
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
for txIndex, txHash := range block.Tx {
|
|
|
|
// If we're able to find the transaction in this block,
|
|
|
|
// return its confirmation details.
|
|
|
|
if txHash == targetTxidStr {
|
2018-07-19 00:01:28 +03:00
|
|
|
details := &chainntnfs.TxConfirmation{
|
2018-02-26 21:37:58 +03:00
|
|
|
BlockHash: blockHash,
|
|
|
|
BlockHeight: height,
|
|
|
|
TxIndex: uint32(txIndex),
|
2018-07-19 00:01:28 +03:00
|
|
|
}
|
2018-08-24 15:13:28 +03:00
|
|
|
return details, chainntnfs.TxFoundManually, nil
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
|
|
|
}
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
2018-02-26 21:37:58 +03:00
|
|
|
|
|
|
|
// If we reach here, then we were not able to find the transaction
|
|
|
|
// within a block, so we avoid returning an error.
|
2018-08-24 15:13:28 +03:00
|
|
|
return nil, chainntnfs.TxNotFoundManually, nil
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// handleBlockConnected applies a chain update for a new block. Any watched
|
2018-03-20 18:17:23 +03:00
|
|
|
// transactions included this block will processed to either send notifications
|
|
|
|
// now or after numConfirmations confs.
|
|
|
|
// TODO(halseth): this is reusing the neutrino notifier implementation, unify
|
|
|
|
// them.
|
2018-08-09 10:05:29 +03:00
|
|
|
func (b *BtcdNotifier) handleBlockConnected(epoch chainntnfs.BlockEpoch) error {
|
|
|
|
// First process the block for our internal state. A new block has
|
|
|
|
// been connected to the main chain. Send out any N confirmation
|
|
|
|
// notifications which may have been triggered by this new block.
|
|
|
|
rawBlock, err := b.chainConn.GetBlock(epoch.Hash)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to get block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
newBlock := &filteredBlock{
|
|
|
|
hash: *epoch.Hash,
|
|
|
|
height: uint32(epoch.Height),
|
2018-08-15 03:55:29 +03:00
|
|
|
txns: btcutil.NewBlock(rawBlock).Transactions(),
|
2018-08-09 10:05:29 +03:00
|
|
|
connect: true,
|
|
|
|
}
|
2018-08-15 03:55:29 +03:00
|
|
|
|
|
|
|
err = b.txConfNotifier.ConnectTip(
|
|
|
|
&newBlock.hash, newBlock.height, newBlock.txns,
|
|
|
|
)
|
2018-08-09 10:05:29 +03:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to connect tip: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-08-15 03:55:29 +03:00
|
|
|
chainntnfs.Log.Infof("New block: height=%v, sha=%v", epoch.Height,
|
|
|
|
epoch.Hash)
|
|
|
|
|
2018-08-25 05:45:25 +03:00
|
|
|
// Define a helper struct for coalescing the spend notifications we will
|
|
|
|
// dispatch after trying to commit the spend hints.
|
|
|
|
type spendNtfnBatch struct {
|
|
|
|
details *chainntnfs.SpendDetail
|
|
|
|
clients map[uint64]*spendNotification
|
|
|
|
}
|
2018-03-20 18:17:23 +03:00
|
|
|
|
2018-08-15 03:55:29 +03:00
|
|
|
// Scan over the list of relevant transactions and possibly dispatch
|
|
|
|
// notifications for spends.
|
2018-08-25 05:45:25 +03:00
|
|
|
spendBatches := make(map[wire.OutPoint]spendNtfnBatch)
|
2018-03-20 18:17:23 +03:00
|
|
|
for _, tx := range newBlock.txns {
|
|
|
|
mtx := tx.MsgTx()
|
|
|
|
txSha := mtx.TxHash()
|
|
|
|
|
|
|
|
for i, txIn := range mtx.TxIn {
|
|
|
|
prevOut := txIn.PreviousOutPoint
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// If this transaction indeed does spend an output which
|
|
|
|
// we have a registered notification for, then create a
|
|
|
|
// spend summary, finally sending off the details to the
|
|
|
|
// notification subscriber.
|
2018-03-20 18:17:23 +03:00
|
|
|
clients, ok := b.spendNotifications[prevOut]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
2018-08-25 05:45:25 +03:00
|
|
|
delete(b.spendNotifications, prevOut)
|
2018-03-20 18:17:23 +03:00
|
|
|
|
|
|
|
spendDetails := &chainntnfs.SpendDetail{
|
|
|
|
SpentOutPoint: &prevOut,
|
|
|
|
SpenderTxHash: &txSha,
|
|
|
|
SpendingTx: mtx,
|
|
|
|
SpenderInputIndex: uint32(i),
|
|
|
|
SpendingHeight: int32(newBlock.height),
|
|
|
|
}
|
|
|
|
|
2018-08-25 05:45:25 +03:00
|
|
|
spendBatches[prevOut] = spendNtfnBatch{
|
|
|
|
details: spendDetails,
|
|
|
|
clients: clients,
|
2018-03-20 18:17:23 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-15 03:55:29 +03:00
|
|
|
// Finally, we'll update the spend height hint for all of our watched
|
|
|
|
// outpoints that have not been spent yet. This is safe to do as we do
|
|
|
|
// not watch already spent outpoints for spend notifications.
|
|
|
|
ops := make([]wire.OutPoint, 0, len(b.spendNotifications))
|
|
|
|
for op := range b.spendNotifications {
|
|
|
|
ops = append(ops, op)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(ops) > 0 {
|
|
|
|
err := b.spendHintCache.CommitSpendHint(
|
|
|
|
uint32(epoch.Height), ops...,
|
|
|
|
)
|
|
|
|
if err != nil {
|
2018-08-25 05:45:25 +03:00
|
|
|
// The error is not fatal since we are connecting a
|
|
|
|
// block, and advancing the spend hint is an optimistic
|
|
|
|
// optimization.
|
2018-08-15 03:55:29 +03:00
|
|
|
chainntnfs.Log.Errorf("Unable to update spend hint to "+
|
|
|
|
"%d for %v: %v", epoch.Height, ops, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-25 05:45:25 +03:00
|
|
|
// We want to set the best block before dispatching notifications
|
|
|
|
// so if any subscribers make queries based on their received
|
|
|
|
// block epoch, our state is fully updated in time.
|
|
|
|
b.bestBlock = epoch
|
|
|
|
|
|
|
|
// Next we'll notify any subscribed clients of the block.
|
|
|
|
b.notifyBlockEpochs(int32(newBlock.height), &newBlock.hash)
|
|
|
|
|
|
|
|
// Finally, send off the spend details to the notification subscribers.
|
|
|
|
for _, batch := range spendBatches {
|
|
|
|
for _, ntfn := range batch.clients {
|
|
|
|
chainntnfs.Log.Infof("Dispatching spend "+
|
|
|
|
"notification for outpoint=%v",
|
|
|
|
ntfn.targetOutpoint)
|
|
|
|
|
|
|
|
ntfn.spendChan <- batch.details
|
|
|
|
|
|
|
|
// Close spendChan to ensure that any calls to
|
|
|
|
// Cancel will not block. This is safe to do
|
|
|
|
// since the channel is buffered, and the
|
|
|
|
// message can still be read by the receiver.
|
|
|
|
close(ntfn.spendChan)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-20 18:17:23 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-09-08 21:27:07 +03:00
|
|
|
// notifyBlockEpochs notifies all registered block epoch clients of the newly
|
|
|
|
// connected block to the main chain.
|
2017-01-06 00:56:27 +03:00
|
|
|
func (b *BtcdNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash) {
|
2018-08-09 10:05:29 +03:00
|
|
|
for _, client := range b.blockEpochClients {
|
|
|
|
b.notifyBlockEpochClient(client, newHeight, newSha)
|
2016-09-08 21:27:07 +03:00
|
|
|
}
|
2018-08-09 10:05:29 +03:00
|
|
|
}
|
2016-09-08 21:27:07 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// notifyBlockEpochClient sends a registered block epoch client a notification
|
|
|
|
// about a specific block.
|
|
|
|
func (b *BtcdNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration,
|
|
|
|
height int32, sha *chainhash.Hash) {
|
2017-05-06 01:53:09 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
epoch := &chainntnfs.BlockEpoch{
|
|
|
|
Height: height,
|
|
|
|
Hash: sha,
|
|
|
|
}
|
2017-06-08 03:04:27 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
select {
|
|
|
|
case epochClient.epochQueue.ChanIn() <- epoch:
|
|
|
|
case <-epochClient.cancelChan:
|
|
|
|
case <-b.quit:
|
2016-09-08 21:27:07 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// spendNotification couples a target outpoint along with the channel used for
|
|
|
|
// notifications once a spend of the outpoint has been detected.
|
2016-01-07 00:03:17 +03:00
|
|
|
type spendNotification struct {
|
2016-02-17 01:46:18 +03:00
|
|
|
targetOutpoint *wire.OutPoint
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2016-02-17 01:46:18 +03:00
|
|
|
spendChan chan *chainntnfs.SpendDetail
|
2017-02-21 03:31:16 +03:00
|
|
|
|
|
|
|
spendID uint64
|
2018-02-26 21:37:58 +03:00
|
|
|
|
|
|
|
heightHint uint32
|
2017-02-21 03:31:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// spendCancel is a message sent to the BtcdNotifier when a client wishes to
|
|
|
|
// cancel an outstanding spend notification that has yet to be dispatched.
|
|
|
|
type spendCancel struct {
|
|
|
|
// op is the target outpoint of the notification to be cancelled.
|
|
|
|
op wire.OutPoint
|
|
|
|
|
|
|
|
// spendID the ID of the notification to cancel.
|
|
|
|
spendID uint64
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2017-02-23 22:56:47 +03:00
|
|
|
// RegisterSpendNtfn registers an intent to be notified once the target
|
2016-02-27 04:31:07 +03:00
|
|
|
// outpoint has been spent by a transaction on-chain. Once a spend of the target
|
|
|
|
// outpoint has been detected, the details of the spending event will be sent
|
2018-02-26 21:37:58 +03:00
|
|
|
// across the 'Spend' channel. The heightHint should represent the earliest
|
|
|
|
// height in the chain where the transaction could have been spent in.
|
2017-05-11 03:00:18 +03:00
|
|
|
func (b *BtcdNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
|
2018-07-18 05:02:25 +03:00
|
|
|
pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
|
2016-11-30 11:00:20 +03:00
|
|
|
|
2018-08-15 03:55:29 +03:00
|
|
|
// Before proceeding to register the notification, we'll query our
|
|
|
|
// height hint cache to determine whether a better one exists.
|
|
|
|
if hint, err := b.spendHintCache.QuerySpendHint(*outpoint); err == nil {
|
|
|
|
if hint > heightHint {
|
|
|
|
chainntnfs.Log.Debugf("Using height hint %d retrieved "+
|
|
|
|
"from cache for %v", hint, outpoint)
|
|
|
|
heightHint = hint
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct a notification request for the outpoint and send it to the
|
|
|
|
// main event loop.
|
2016-01-07 00:03:17 +03:00
|
|
|
ntfn := &spendNotification{
|
2016-02-17 01:46:18 +03:00
|
|
|
targetOutpoint: outpoint,
|
|
|
|
spendChan: make(chan *chainntnfs.SpendDetail, 1),
|
2017-02-21 03:31:16 +03:00
|
|
|
spendID: atomic.AddUint64(&b.spendClientCounter, 1),
|
2018-02-26 21:37:58 +03:00
|
|
|
heightHint: heightHint,
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2016-12-15 12:07:12 +03:00
|
|
|
select {
|
|
|
|
case <-b.quit:
|
2016-12-14 02:32:44 +03:00
|
|
|
return nil, ErrChainNotifierShuttingDown
|
2016-12-15 12:07:12 +03:00
|
|
|
case b.notificationRegistry <- ntfn:
|
|
|
|
}
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-07-18 05:02:25 +03:00
|
|
|
// TODO(roasbeef): update btcd rescan logic to also use both
|
2018-03-27 14:50:24 +03:00
|
|
|
if err := b.chainConn.NotifySpent([]*wire.OutPoint{outpoint}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
// The following conditional checks to ensure that when a spend
|
|
|
|
// notification is registered, the output hasn't already been spent. If
|
|
|
|
// the output is no longer in the UTXO set, the chain will be rescanned
|
|
|
|
// from the point where the output was added. The rescan will dispatch
|
|
|
|
// the notification.
|
|
|
|
txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true)
|
2016-11-30 11:00:20 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-08-15 03:55:29 +03:00
|
|
|
// If the output is unspent, then we'll write it to the cache with the
|
|
|
|
// given height hint. This allows us to increase the height hint as the
|
|
|
|
// chain extends and the output remains unspent.
|
|
|
|
if txOut != nil {
|
|
|
|
err := b.spendHintCache.CommitSpendHint(heightHint, *outpoint)
|
|
|
|
if err != nil {
|
|
|
|
// The error is not fatal, so we should not return an
|
|
|
|
// error to the caller.
|
|
|
|
chainntnfs.Log.Error("Unable to update spend hint to "+
|
|
|
|
"%d for %v: %v", heightHint, *outpoint, err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Otherwise, we'll determine when the output was spent.
|
|
|
|
//
|
2018-02-26 21:37:58 +03:00
|
|
|
// First, we'll attempt to retrieve the transaction's block hash
|
|
|
|
// using the backend's transaction index.
|
|
|
|
tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash)
|
2016-11-30 11:00:20 +03:00
|
|
|
if err != nil {
|
2018-02-26 21:37:58 +03:00
|
|
|
// Avoid returning an error if the transaction was not
|
|
|
|
// found to proceed with fallback methods.
|
2017-09-12 18:11:21 +03:00
|
|
|
jsonErr, ok := err.(*btcjson.RPCError)
|
2017-11-13 23:42:50 +03:00
|
|
|
if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo {
|
2018-02-26 21:37:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to query for "+
|
|
|
|
"txid %v: %v", outpoint.Hash, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var blockHash *chainhash.Hash
|
|
|
|
if tx != nil && tx.BlockHash != "" {
|
|
|
|
// If we're able to retrieve a valid block hash from the
|
|
|
|
// transaction, then we'll use it as our rescan starting
|
|
|
|
// point.
|
|
|
|
blockHash, err = chainhash.NewHashFromStr(tx.BlockHash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Otherwise, we'll attempt to retrieve the hash for the
|
|
|
|
// block at the heightHint.
|
|
|
|
blockHash, err = b.chainConn.GetBlockHash(
|
|
|
|
int64(heightHint),
|
|
|
|
)
|
|
|
|
if err != nil {
|
2017-09-12 18:11:21 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-11-30 11:00:20 +03:00
|
|
|
}
|
|
|
|
|
2018-01-29 01:46:54 +03:00
|
|
|
// We'll only request a rescan if the transaction has actually
|
|
|
|
// been included within a block. Otherwise, we'll encounter an
|
2018-02-26 21:37:58 +03:00
|
|
|
// error when scanning for blocks. This can happen in the case
|
2018-01-29 01:46:54 +03:00
|
|
|
// of a race condition, wherein the output itself is unspent,
|
|
|
|
// and only arrives in the mempool after the getxout call.
|
2018-02-26 21:37:58 +03:00
|
|
|
if blockHash != nil {
|
2017-09-12 18:11:21 +03:00
|
|
|
ops := []*wire.OutPoint{outpoint}
|
2018-07-18 11:31:49 +03:00
|
|
|
|
|
|
|
// In order to ensure that we don't block the caller on
|
|
|
|
// what may be a long rescan, we'll launch a new
|
|
|
|
// goroutine to handle the async result of the rescan.
|
|
|
|
asyncResult := b.chainConn.RescanAsync(
|
|
|
|
blockHash, nil, ops,
|
|
|
|
)
|
|
|
|
go func() {
|
|
|
|
rescanErr := asyncResult.Receive()
|
|
|
|
if rescanErr != nil {
|
|
|
|
chainntnfs.Log.Errorf("Rescan for spend "+
|
|
|
|
"notification txout(%x) "+
|
|
|
|
"failed: %v", outpoint, rescanErr)
|
|
|
|
}
|
|
|
|
}()
|
2016-11-30 11:00:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-10 02:28:32 +03:00
|
|
|
return &chainntnfs.SpendEvent{
|
|
|
|
Spend: ntfn.spendChan,
|
2017-02-21 03:31:16 +03:00
|
|
|
Cancel: func() {
|
2017-07-30 05:19:28 +03:00
|
|
|
cancel := &spendCancel{
|
2017-02-21 03:31:16 +03:00
|
|
|
op: *outpoint,
|
|
|
|
spendID: ntfn.spendID,
|
2017-07-30 05:19:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Submit spend cancellation to notification dispatcher.
|
|
|
|
select {
|
|
|
|
case b.notificationCancels <- cancel:
|
2017-07-30 06:28:48 +03:00
|
|
|
// Cancellation is being handled, drain the spend chan until it is
|
|
|
|
// closed before yielding to the caller.
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case _, ok := <-ntfn.spendChan:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-b.quit:
|
2017-08-02 03:14:01 +03:00
|
|
|
return
|
2017-07-30 06:28:48 +03:00
|
|
|
}
|
2017-07-30 05:19:28 +03:00
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
case <-b.quit:
|
|
|
|
}
|
|
|
|
},
|
2017-02-10 02:28:32 +03:00
|
|
|
}, nil
|
2016-02-17 01:46:18 +03:00
|
|
|
}
|
|
|
|
|
2017-02-23 22:56:47 +03:00
|
|
|
// RegisterConfirmationsNtfn registers a notification with BtcdNotifier
|
2016-02-27 04:31:07 +03:00
|
|
|
// which will be triggered once the txid reaches numConfs number of
|
|
|
|
// confirmations.
|
2018-05-31 08:03:23 +03:00
|
|
|
func (b *BtcdNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, _ []byte,
|
2018-02-26 21:37:58 +03:00
|
|
|
numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, error) {
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-08-15 03:54:21 +03:00
|
|
|
// Construct a notification request for the transaction and send it to
|
|
|
|
// the main event loop.
|
2018-08-25 06:09:53 +03:00
|
|
|
ntfn := &chainntnfs.ConfNtfn{
|
|
|
|
ConfID: atomic.AddUint64(&b.confClientCounter, 1),
|
|
|
|
TxID: txid,
|
|
|
|
NumConfirmations: numConfs,
|
|
|
|
Event: chainntnfs.NewConfirmationEvent(numConfs),
|
|
|
|
HeightHint: heightHint,
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2018-08-27 07:36:45 +03:00
|
|
|
chainntnfs.Log.Infof("New confirmation subscription: "+
|
|
|
|
"txid=%v, numconfs=%v", txid, numConfs)
|
|
|
|
|
|
|
|
// Register the conf notification with txconfnotifier. A non-nil value
|
|
|
|
// for `dispatch` will be returned if we are required to perform a
|
|
|
|
// manual scan for the confirmation. Otherwise the notifier will begin
|
|
|
|
// watching at tip for the transaction to confirm.
|
|
|
|
dispatch, err := b.txConfNotifier.Register(ntfn)
|
|
|
|
if err != nil {
|
2018-07-27 07:32:55 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-08-27 07:36:45 +03:00
|
|
|
if dispatch == nil {
|
|
|
|
return ntfn.Event, nil
|
|
|
|
}
|
|
|
|
|
2016-12-15 12:07:12 +03:00
|
|
|
select {
|
2018-08-27 07:36:45 +03:00
|
|
|
case b.notificationRegistry <- dispatch:
|
2017-11-13 23:42:50 +03:00
|
|
|
return ntfn.Event, nil
|
2018-07-27 07:32:55 +03:00
|
|
|
case <-b.quit:
|
|
|
|
return nil, ErrChainNotifierShuttingDown
|
2016-12-15 12:07:12 +03:00
|
|
|
}
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
2016-06-21 07:31:05 +03:00
|
|
|
|
2016-09-08 21:27:07 +03:00
|
|
|
// blockEpochRegistration represents a client's intent to receive a
|
|
|
|
// notification with each newly connected block.
|
|
|
|
type blockEpochRegistration struct {
|
2017-06-08 03:04:27 +03:00
|
|
|
epochID uint64
|
|
|
|
|
2016-09-08 21:27:07 +03:00
|
|
|
epochChan chan *chainntnfs.BlockEpoch
|
2017-02-21 03:31:16 +03:00
|
|
|
|
2018-10-12 18:08:14 +03:00
|
|
|
epochQueue *queue.ConcurrentQueue
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
|
2018-08-09 10:05:27 +03:00
|
|
|
bestBlock *chainntnfs.BlockEpoch
|
|
|
|
|
|
|
|
errorChan chan error
|
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
cancelChan chan struct{}
|
|
|
|
|
2017-06-08 03:04:27 +03:00
|
|
|
wg sync.WaitGroup
|
2017-02-21 03:31:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// epochCancel is a message sent to the BtcdNotifier when a client wishes to
|
|
|
|
// cancel an outstanding epoch notification that has yet to be dispatched.
|
|
|
|
type epochCancel struct {
|
|
|
|
epochID uint64
|
2016-09-08 21:27:07 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 07:31:05 +03:00
|
|
|
// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the
|
2017-02-21 03:31:16 +03:00
|
|
|
// caller to receive notifications, of each new block connected to the main
|
2018-08-09 10:05:27 +03:00
|
|
|
// chain. Clients have the option of passing in their best known block, which
|
|
|
|
// the notifier uses to check if they are behind on blocks and catch them up.
|
|
|
|
func (b *BtcdNotifier) RegisterBlockEpochNtfn(
|
|
|
|
bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg := &blockEpochRegistration{
|
2018-10-12 18:08:14 +03:00
|
|
|
epochQueue: queue.NewConcurrentQueue(20),
|
2017-05-06 01:53:09 +03:00
|
|
|
epochChan: make(chan *chainntnfs.BlockEpoch, 20),
|
|
|
|
cancelChan: make(chan struct{}),
|
|
|
|
epochID: atomic.AddUint64(&b.epochClientCounter, 1),
|
2018-08-09 10:05:27 +03:00
|
|
|
bestBlock: bestBlock,
|
|
|
|
errorChan: make(chan error, 1),
|
2016-09-08 21:27:07 +03:00
|
|
|
}
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg.epochQueue.Start()
|
|
|
|
|
|
|
|
// Before we send the request to the main goroutine, we'll launch a new
|
|
|
|
// goroutine to proxy items added to our queue to the client itself.
|
|
|
|
// This ensures that all notifications are received *in order*.
|
|
|
|
reg.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer reg.wg.Done()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ntfn := <-reg.epochQueue.ChanOut():
|
|
|
|
blockNtfn := ntfn.(*chainntnfs.BlockEpoch)
|
|
|
|
select {
|
|
|
|
case reg.epochChan <- blockNtfn:
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-09-08 21:27:07 +03:00
|
|
|
|
2016-12-15 12:07:12 +03:00
|
|
|
select {
|
|
|
|
case <-b.quit:
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// As we're exiting before the registration could be sent,
|
|
|
|
// we'll stop the queue now ourselves.
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
2016-12-15 12:07:12 +03:00
|
|
|
return nil, errors.New("chainntnfs: system interrupt while " +
|
|
|
|
"attempting to register for block epoch notification.")
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case b.notificationRegistry <- reg:
|
2016-12-15 12:07:12 +03:00
|
|
|
return &chainntnfs.BlockEpochEvent{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
Epochs: reg.epochChan,
|
2017-02-21 03:31:16 +03:00
|
|
|
Cancel: func() {
|
2017-04-27 07:08:16 +03:00
|
|
|
cancel := &epochCancel{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
epochID: reg.epochID,
|
2017-04-27 07:08:16 +03:00
|
|
|
}
|
|
|
|
|
2017-07-30 05:19:28 +03:00
|
|
|
// Submit epoch cancellation to notification dispatcher.
|
2017-04-27 07:08:16 +03:00
|
|
|
select {
|
|
|
|
case b.notificationCancels <- cancel:
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// Cancellation is being handled, drain
|
|
|
|
// the epoch channel until it is closed
|
|
|
|
// before yielding to caller.
|
2017-07-30 06:28:48 +03:00
|
|
|
for {
|
|
|
|
select {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case _, ok := <-reg.epochChan:
|
2017-07-30 06:28:48 +03:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-b.quit:
|
2017-08-02 03:14:01 +03:00
|
|
|
return
|
2017-07-30 06:28:48 +03:00
|
|
|
}
|
2017-04-27 07:08:16 +03:00
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
case <-b.quit:
|
|
|
|
}
|
|
|
|
},
|
2016-12-15 12:07:12 +03:00
|
|
|
}, nil
|
|
|
|
}
|
2016-06-21 07:31:05 +03:00
|
|
|
}
|