2016-01-07 00:03:17 +03:00
|
|
|
package btcdnotify
|
|
|
|
|
|
|
|
import (
|
2016-12-15 12:07:12 +03:00
|
|
|
"errors"
|
2017-11-13 23:42:50 +03:00
|
|
|
"fmt"
|
2016-01-07 00:03:17 +03:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2016-02-27 03:30:14 +03:00
|
|
|
"time"
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2016-05-15 17:17:44 +03:00
|
|
|
"github.com/roasbeef/btcd/btcjson"
|
2017-01-06 00:56:27 +03:00
|
|
|
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
2017-08-25 04:54:17 +03:00
|
|
|
"github.com/roasbeef/btcd/rpcclient"
|
2016-05-15 17:17:44 +03:00
|
|
|
"github.com/roasbeef/btcd/wire"
|
|
|
|
"github.com/roasbeef/btcutil"
|
2016-01-07 00:03:17 +03:00
|
|
|
)
|
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
const (
|
|
|
|
|
|
|
|
// notifierType uniquely identifies this concrete implementation of the
|
|
|
|
// ChainNotifier interface.
|
|
|
|
notifierType = "btcd"
|
2017-11-13 23:42:50 +03:00
|
|
|
|
|
|
|
// reorgSafetyLimit is assumed maximum depth of a chain reorganization.
|
|
|
|
// After this many confirmation, transaction confirmation info will be
|
|
|
|
// pruned.
|
|
|
|
reorgSafetyLimit = 100
|
2016-08-04 08:13:10 +03:00
|
|
|
)
|
|
|
|
|
2016-12-14 02:32:44 +03:00
|
|
|
var (
|
2017-02-24 16:32:33 +03:00
|
|
|
// ErrChainNotifierShuttingDown is used when we are trying to
|
|
|
|
// measure a spend notification when notifier is already stopped.
|
2016-12-14 02:32:44 +03:00
|
|
|
ErrChainNotifierShuttingDown = errors.New("chainntnfs: system interrupt " +
|
|
|
|
"while attempting to register for spend notification.")
|
|
|
|
)
|
|
|
|
|
2016-09-23 05:12:12 +03:00
|
|
|
// chainUpdate encapsulates an update to the current main chain. This struct is
|
|
|
|
// used as an element within an unbounded queue in order to avoid blocking the
|
|
|
|
// main rpc dispatch rule.
|
|
|
|
type chainUpdate struct {
|
2017-01-06 00:56:27 +03:00
|
|
|
blockHash *chainhash.Hash
|
2016-09-23 05:12:12 +03:00
|
|
|
blockHeight int32
|
2017-11-10 02:30:38 +03:00
|
|
|
|
|
|
|
// connected is true if this update is a new block and false if it is a
|
|
|
|
// disconnected block.
|
|
|
|
connect bool
|
2016-09-23 05:12:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// txUpdate encapsulates a transaction related notification sent from btcd to
|
|
|
|
// the registered RPC client. This struct is used as an element within an
|
|
|
|
// unbounded queue in order to avoid blocking the main rpc dispatch rule.
|
|
|
|
type txUpdate struct {
|
|
|
|
tx *btcutil.Tx
|
|
|
|
details *btcjson.BlockDetails
|
|
|
|
}
|
|
|
|
|
2017-02-21 03:31:16 +03:00
|
|
|
// TODO(roasbeef): generalize struct below:
|
|
|
|
// * move chans to config, allow outside callers to handle send conditions
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// BtcdNotifier implements the ChainNotifier interface using btcd's websockets
|
|
|
|
// notifications. Multiple concurrent clients are supported. All notifications
|
|
|
|
// are achieved via non-blocking sends on client channels.
|
2016-01-07 00:03:17 +03:00
|
|
|
type BtcdNotifier struct {
|
2017-02-21 03:31:16 +03:00
|
|
|
spendClientCounter uint64 // To be used atomically.
|
|
|
|
epochClientCounter uint64 // To be used atomically.
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
started int32 // To be used atomically.
|
|
|
|
stopped int32 // To be used atomically.
|
2016-02-17 01:46:18 +03:00
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
chainConn *rpcclient.Client
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2017-02-21 03:31:16 +03:00
|
|
|
notificationCancels chan interface{}
|
2016-01-07 00:03:17 +03:00
|
|
|
notificationRegistry chan interface{}
|
|
|
|
|
2017-02-21 03:31:16 +03:00
|
|
|
spendNotifications map[wire.OutPoint]map[uint64]*spendNotification
|
2016-09-08 21:27:07 +03:00
|
|
|
|
2017-11-13 23:42:50 +03:00
|
|
|
txConfNotifier *chainntnfs.TxConfNotifier
|
2016-09-08 21:27:07 +03:00
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
blockEpochClients map[uint64]*blockEpochRegistration
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2017-11-10 01:59:14 +03:00
|
|
|
chainUpdates *chainntnfs.ConcurrentQueue
|
|
|
|
txUpdates *chainntnfs.ConcurrentQueue
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2016-02-17 01:46:18 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
quit chan struct{}
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Ensure BtcdNotifier implements the ChainNotifier interface at compile time.
|
2016-01-07 00:03:17 +03:00
|
|
|
var _ chainntnfs.ChainNotifier = (*BtcdNotifier)(nil)
|
|
|
|
|
2016-08-04 08:13:10 +03:00
|
|
|
// New returns a new BtcdNotifier instance. This function assumes the btcd node
|
2016-11-28 06:17:27 +03:00
|
|
|
// detailed in the passed configuration is already running, and willing to
|
|
|
|
// accept new websockets clients.
|
2017-08-25 04:54:17 +03:00
|
|
|
func New(config *rpcclient.ConnConfig) (*BtcdNotifier, error) {
|
2016-02-27 03:30:14 +03:00
|
|
|
notifier := &BtcdNotifier{
|
2017-02-21 03:31:16 +03:00
|
|
|
notificationCancels: make(chan interface{}),
|
2016-01-07 00:03:17 +03:00
|
|
|
notificationRegistry: make(chan interface{}),
|
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
blockEpochClients: make(map[uint64]*blockEpochRegistration),
|
2017-02-21 03:31:16 +03:00
|
|
|
|
|
|
|
spendNotifications: make(map[wire.OutPoint]map[uint64]*spendNotification),
|
|
|
|
|
2017-11-10 01:59:14 +03:00
|
|
|
chainUpdates: chainntnfs.NewConcurrentQueue(10),
|
|
|
|
txUpdates: chainntnfs.NewConcurrentQueue(10),
|
2016-01-07 00:03:17 +03:00
|
|
|
|
|
|
|
quit: make(chan struct{}),
|
2016-02-27 03:30:14 +03:00
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
ntfnCallbacks := &rpcclient.NotificationHandlers{
|
2016-02-27 03:30:14 +03:00
|
|
|
OnBlockConnected: notifier.onBlockConnected,
|
|
|
|
OnBlockDisconnected: notifier.onBlockDisconnected,
|
|
|
|
OnRedeemingTx: notifier.onRedeemingTx,
|
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
// Disable connecting to btcd within the rpcclient.New method. We
|
2016-08-04 08:13:10 +03:00
|
|
|
// defer establishing the connection to our .Start() method.
|
2016-02-27 03:30:14 +03:00
|
|
|
config.DisableConnectOnNew = true
|
|
|
|
config.DisableAutoReconnect = false
|
2017-08-25 04:54:17 +03:00
|
|
|
chainConn, err := rpcclient.New(config, ntfnCallbacks)
|
2016-02-27 03:30:14 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
notifier.chainConn = chainConn
|
|
|
|
|
|
|
|
return notifier, nil
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Start connects to the running btcd node over websockets, registers for block
|
|
|
|
// notifications, and finally launches all related helper goroutines.
|
2016-01-07 00:03:17 +03:00
|
|
|
func (b *BtcdNotifier) Start() error {
|
|
|
|
// Already started?
|
|
|
|
if atomic.AddInt32(&b.started, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Connect to btcd, and register for notifications on connected, and
|
|
|
|
// disconnected blocks.
|
2016-02-27 03:30:14 +03:00
|
|
|
if err := b.chainConn.Connect(20); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := b.chainConn.NotifyBlocks(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-25 03:42:13 +03:00
|
|
|
_, currentHeight, err := b.chainConn.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-11-13 23:42:50 +03:00
|
|
|
b.txConfNotifier = chainntnfs.NewTxConfNotifier(
|
|
|
|
uint32(currentHeight), reorgSafetyLimit)
|
|
|
|
|
2017-11-10 01:59:14 +03:00
|
|
|
b.chainUpdates.Start()
|
|
|
|
b.txUpdates.Start()
|
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
b.wg.Add(1)
|
2016-12-25 03:42:13 +03:00
|
|
|
go b.notificationDispatcher(currentHeight)
|
2016-01-07 00:03:17 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Stop shutsdown the BtcdNotifier.
|
2016-01-07 00:03:17 +03:00
|
|
|
func (b *BtcdNotifier) Stop() error {
|
|
|
|
// Already shutting down?
|
|
|
|
if atomic.AddInt32(&b.stopped, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// Shutdown the rpc client, this gracefully disconnects from btcd, and
|
|
|
|
// cleans up all related resources.
|
2016-02-27 03:30:14 +03:00
|
|
|
b.chainConn.Shutdown()
|
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
close(b.quit)
|
|
|
|
b.wg.Wait()
|
|
|
|
|
2017-11-10 01:59:14 +03:00
|
|
|
b.chainUpdates.Stop()
|
|
|
|
b.txUpdates.Stop()
|
|
|
|
|
2016-02-27 04:04:14 +03:00
|
|
|
// Notify all pending clients of our shutdown by closing the related
|
|
|
|
// notification channels.
|
2016-11-28 06:17:27 +03:00
|
|
|
for _, spendClients := range b.spendNotifications {
|
|
|
|
for _, spendClient := range spendClients {
|
|
|
|
close(spendClient.spendChan)
|
|
|
|
}
|
2016-02-27 04:04:14 +03:00
|
|
|
}
|
2016-12-16 00:53:36 +03:00
|
|
|
for _, epochClient := range b.blockEpochClients {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
close(epochClient.cancelChan)
|
|
|
|
epochClient.wg.Wait()
|
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
close(epochClient.epochChan)
|
2016-12-16 00:53:36 +03:00
|
|
|
}
|
2017-11-13 23:42:50 +03:00
|
|
|
b.txConfNotifier.TearDown()
|
2016-02-27 04:04:14 +03:00
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
// onBlockConnected implements on OnBlockConnected callback for rpcclient.
|
2016-09-23 05:12:12 +03:00
|
|
|
// Ingesting a block updates the wallet's internal utxo state based on the
|
|
|
|
// outputs created and destroyed within each block.
|
2017-01-06 00:56:27 +03:00
|
|
|
func (b *BtcdNotifier) onBlockConnected(hash *chainhash.Hash, height int32, t time.Time) {
|
2016-09-23 05:12:12 +03:00
|
|
|
// Append this new chain update to the end of the queue of new chain
|
|
|
|
// updates.
|
2017-11-10 02:30:38 +03:00
|
|
|
b.chainUpdates.ChanIn() <- &chainUpdate{
|
|
|
|
blockHash: hash,
|
|
|
|
blockHeight: height,
|
|
|
|
connect: true,
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
}
|
|
|
|
|
2018-03-20 18:17:23 +03:00
|
|
|
// filteredBlock represents a new block which has been connected to the main
|
|
|
|
// chain. The slice of transactions will only be populated if the block
|
|
|
|
// includes a transaction that confirmed one of our watched txids, or spends
|
|
|
|
// one of the outputs currently being watched.
|
|
|
|
// TODO(halseth): this is currently used for complete blocks. Change to use
|
|
|
|
// onFilteredBlockConnected and onFilteredBlockDisconnected, making it easier
|
|
|
|
// to unify with the Neutrino implementation.
|
|
|
|
type filteredBlock struct {
|
|
|
|
hash chainhash.Hash
|
|
|
|
height uint32
|
|
|
|
txns []*btcutil.Tx
|
|
|
|
|
|
|
|
// connected is true if this update is a new block and false if it is a
|
|
|
|
// disconnected block.
|
|
|
|
connect bool
|
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
// onBlockDisconnected implements on OnBlockDisconnected callback for rpcclient.
|
2017-01-06 00:56:27 +03:00
|
|
|
func (b *BtcdNotifier) onBlockDisconnected(hash *chainhash.Hash, height int32, t time.Time) {
|
2017-11-10 02:30:38 +03:00
|
|
|
// Append this new chain update to the end of the queue of new chain
|
|
|
|
// updates.
|
|
|
|
b.chainUpdates.ChanIn() <- &chainUpdate{
|
|
|
|
blockHash: hash,
|
|
|
|
blockHeight: height,
|
|
|
|
connect: false,
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
}
|
|
|
|
|
2017-08-25 04:54:17 +03:00
|
|
|
// onRedeemingTx implements on OnRedeemingTx callback for rpcclient.
|
2016-09-23 05:12:12 +03:00
|
|
|
func (b *BtcdNotifier) onRedeemingTx(tx *btcutil.Tx, details *btcjson.BlockDetails) {
|
2016-11-28 06:17:27 +03:00
|
|
|
// Append this new transaction update to the end of the queue of new
|
|
|
|
// chain updates.
|
2017-11-10 01:59:14 +03:00
|
|
|
b.txUpdates.ChanIn() <- &txUpdate{tx, details}
|
2016-02-27 03:30:14 +03:00
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// notificationDispatcher is the primary goroutine which handles client
|
|
|
|
// notification registrations, as well as notification dispatches.
|
2016-12-25 03:42:13 +03:00
|
|
|
func (b *BtcdNotifier) notificationDispatcher(currentHeight int32) {
|
2016-01-07 00:03:17 +03:00
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
2017-02-21 03:31:16 +03:00
|
|
|
case cancelMsg := <-b.notificationCancels:
|
|
|
|
switch msg := cancelMsg.(type) {
|
|
|
|
case *spendCancel:
|
|
|
|
chainntnfs.Log.Infof("Cancelling spend "+
|
|
|
|
"notification for out_point=%v, "+
|
|
|
|
"spend_id=%v", msg.op, msg.spendID)
|
|
|
|
|
|
|
|
// Before we attempt to close the spendChan,
|
|
|
|
// ensure that the notification hasn't already
|
|
|
|
// yet been dispatched.
|
|
|
|
if outPointClients, ok := b.spendNotifications[msg.op]; ok {
|
|
|
|
close(outPointClients[msg.spendID].spendChan)
|
|
|
|
delete(b.spendNotifications[msg.op], msg.spendID)
|
|
|
|
}
|
2017-07-30 05:19:28 +03:00
|
|
|
|
2017-02-21 03:31:16 +03:00
|
|
|
case *epochCancel:
|
|
|
|
chainntnfs.Log.Infof("Cancelling epoch "+
|
|
|
|
"notification, epoch_id=%v", msg.epochID)
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// First, we'll lookup the original
|
|
|
|
// registration in order to stop the active
|
|
|
|
// queue goroutine.
|
|
|
|
reg := b.blockEpochClients[msg.epochID]
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
|
|
|
// Next, close the cancel channel for this
|
2017-06-08 03:04:27 +03:00
|
|
|
// specific client, and wait for the client to
|
|
|
|
// exit.
|
2017-05-06 01:53:09 +03:00
|
|
|
close(b.blockEpochClients[msg.epochID].cancelChan)
|
2017-06-08 03:04:27 +03:00
|
|
|
b.blockEpochClients[msg.epochID].wg.Wait()
|
|
|
|
|
|
|
|
// Once the client has exited, we can then
|
|
|
|
// safely close the channel used to send epoch
|
|
|
|
// notifications, in order to notify any
|
|
|
|
// listeners that the intent has been
|
|
|
|
// cancelled.
|
2017-05-06 01:53:09 +03:00
|
|
|
close(b.blockEpochClients[msg.epochID].epochChan)
|
2017-02-21 03:31:16 +03:00
|
|
|
delete(b.blockEpochClients, msg.epochID)
|
|
|
|
}
|
2016-01-07 00:03:17 +03:00
|
|
|
case registerMsg := <-b.notificationRegistry:
|
|
|
|
switch msg := registerMsg.(type) {
|
|
|
|
case *spendNotification:
|
2016-09-08 21:27:07 +03:00
|
|
|
chainntnfs.Log.Infof("New spend subscription: "+
|
|
|
|
"utxo=%v", msg.targetOutpoint)
|
2016-11-28 06:17:27 +03:00
|
|
|
op := *msg.targetOutpoint
|
2017-02-21 03:31:16 +03:00
|
|
|
|
|
|
|
if _, ok := b.spendNotifications[op]; !ok {
|
|
|
|
b.spendNotifications[op] = make(map[uint64]*spendNotification)
|
|
|
|
}
|
|
|
|
b.spendNotifications[op][msg.spendID] = msg
|
2016-01-07 00:03:17 +03:00
|
|
|
case *confirmationsNotification:
|
2016-06-21 07:33:41 +03:00
|
|
|
chainntnfs.Log.Infof("New confirmations "+
|
|
|
|
"subscription: txid=%v, numconfs=%v",
|
2017-11-13 23:42:50 +03:00
|
|
|
msg.TxID, msg.NumConfirmations)
|
2016-12-09 03:15:58 +03:00
|
|
|
|
2017-11-13 23:42:50 +03:00
|
|
|
// Lookup whether the transaction is already included in the
|
|
|
|
// active chain.
|
|
|
|
txConf, err := b.historicalConfDetails(msg.TxID)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
2017-12-05 00:30:33 +03:00
|
|
|
err = b.txConfNotifier.Register(&msg.ConfNtfn, txConf)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
2016-09-08 21:27:07 +03:00
|
|
|
case *blockEpochRegistration:
|
|
|
|
chainntnfs.Log.Infof("New block epoch subscription")
|
2017-05-06 01:53:09 +03:00
|
|
|
b.blockEpochClients[msg.epochID] = msg
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
|
2017-11-10 01:59:14 +03:00
|
|
|
case item := <-b.chainUpdates.ChanOut():
|
|
|
|
update := item.(*chainUpdate)
|
2017-11-10 02:30:38 +03:00
|
|
|
if update.connect {
|
|
|
|
if update.blockHeight != currentHeight+1 {
|
|
|
|
chainntnfs.Log.Warnf("Received blocks out of order: "+
|
|
|
|
"current height=%d, new height=%d",
|
|
|
|
currentHeight, update.blockHeight)
|
2017-12-10 21:34:49 +03:00
|
|
|
continue
|
2017-11-10 02:30:38 +03:00
|
|
|
}
|
2016-12-09 03:15:58 +03:00
|
|
|
|
2017-11-10 02:30:38 +03:00
|
|
|
currentHeight = update.blockHeight
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2017-11-13 23:42:50 +03:00
|
|
|
rawBlock, err := b.chainConn.GetBlock(update.blockHash)
|
2017-11-10 02:30:38 +03:00
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Errorf("Unable to get block: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
chainntnfs.Log.Infof("New block: height=%v, sha=%v",
|
|
|
|
update.blockHeight, update.blockHash)
|
|
|
|
|
2017-11-13 23:42:50 +03:00
|
|
|
txns := btcutil.NewBlock(rawBlock).Transactions()
|
2018-03-20 18:17:23 +03:00
|
|
|
|
|
|
|
block := &filteredBlock{
|
|
|
|
hash: *update.blockHash,
|
|
|
|
height: uint32(update.blockHeight),
|
|
|
|
txns: txns,
|
|
|
|
connect: true,
|
|
|
|
}
|
|
|
|
if err := b.handleBlockConnected(block); err != nil {
|
2017-11-13 23:42:50 +03:00
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
2017-12-10 21:34:49 +03:00
|
|
|
continue
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
|
2017-12-10 21:34:49 +03:00
|
|
|
if update.blockHeight != currentHeight {
|
|
|
|
chainntnfs.Log.Warnf("Received blocks out of order: "+
|
|
|
|
"current height=%d, disconnected height=%d",
|
|
|
|
currentHeight, update.blockHeight)
|
|
|
|
continue
|
|
|
|
}
|
2017-11-10 02:30:38 +03:00
|
|
|
|
2017-12-10 21:34:49 +03:00
|
|
|
currentHeight = update.blockHeight - 1
|
2017-11-13 23:42:50 +03:00
|
|
|
|
2017-12-10 21:34:49 +03:00
|
|
|
chainntnfs.Log.Infof("Block disconnected from main chain: "+
|
|
|
|
"height=%v, sha=%v", update.blockHeight, update.blockHash)
|
|
|
|
|
|
|
|
err := b.txConfNotifier.DisconnectTip(uint32(update.blockHeight))
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
2017-11-10 02:30:38 +03:00
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
|
2018-03-20 18:17:23 +03:00
|
|
|
// NOTE: we currently only use txUpdates for mempool spends. It
|
|
|
|
// might get removed entirely in the future.
|
2017-11-10 01:59:14 +03:00
|
|
|
case item := <-b.txUpdates.ChanOut():
|
|
|
|
newSpend := item.(*txUpdate)
|
2016-09-23 05:12:12 +03:00
|
|
|
spendingTx := newSpend.tx
|
|
|
|
|
2016-02-27 03:30:14 +03:00
|
|
|
// First, check if this transaction spends an output
|
|
|
|
// that has an existing spend notification for it.
|
2016-09-23 05:12:12 +03:00
|
|
|
for i, txIn := range spendingTx.MsgTx().TxIn {
|
2016-02-27 03:30:14 +03:00
|
|
|
prevOut := txIn.PreviousOutPoint
|
|
|
|
|
|
|
|
// If this transaction indeed does spend an
|
2016-11-28 06:17:27 +03:00
|
|
|
// output which we have a registered
|
|
|
|
// notification for, then create a spend
|
|
|
|
// summary, finally sending off the details to
|
|
|
|
// the notification subscriber.
|
|
|
|
if clients, ok := b.spendNotifications[prevOut]; ok {
|
2017-01-06 00:56:27 +03:00
|
|
|
spenderSha := newSpend.tx.Hash()
|
2017-05-11 02:52:27 +03:00
|
|
|
spendDetails := &chainntnfs.SpendDetail{
|
|
|
|
SpentOutPoint: &prevOut,
|
|
|
|
SpenderTxHash: spenderSha,
|
|
|
|
SpendingTx: spendingTx.MsgTx(),
|
|
|
|
SpenderInputIndex: uint32(i),
|
|
|
|
}
|
|
|
|
// TODO(roasbeef): after change to
|
|
|
|
// loadfilter, only notify on block
|
|
|
|
// inclusion?
|
|
|
|
if newSpend.details != nil {
|
|
|
|
spendDetails.SpendingHeight = newSpend.details.Height
|
|
|
|
} else {
|
2017-05-24 04:18:45 +03:00
|
|
|
spendDetails.SpendingHeight = currentHeight + 1
|
2017-05-11 02:52:27 +03:00
|
|
|
}
|
2016-11-28 06:17:27 +03:00
|
|
|
|
2018-03-20 18:17:23 +03:00
|
|
|
// Keep spendNotifications that are
|
|
|
|
// waiting for a confirmation around.
|
|
|
|
// They will be notified when we find
|
|
|
|
// the spend within a block.
|
|
|
|
rem := make(map[uint64]*spendNotification)
|
|
|
|
for c, ntfn := range clients {
|
|
|
|
// If this client didn't want
|
|
|
|
// to be notified on mempool
|
|
|
|
// spends, store it for later.
|
|
|
|
if !ntfn.mempool {
|
|
|
|
rem[c] = ntfn
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-11-28 06:17:27 +03:00
|
|
|
chainntnfs.Log.Infof("Dispatching "+
|
|
|
|
"spend notification for "+
|
|
|
|
"outpoint=%v", ntfn.targetOutpoint)
|
|
|
|
ntfn.spendChan <- spendDetails
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2017-08-03 10:18:33 +03:00
|
|
|
// Close spendChan to ensure that any calls to Cancel will not
|
|
|
|
// block. This is safe to do since the channel is buffered, and the
|
|
|
|
// message can still be read by the receiver.
|
|
|
|
close(ntfn.spendChan)
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
delete(b.spendNotifications, prevOut)
|
2018-03-20 18:17:23 +03:00
|
|
|
|
|
|
|
// If we had any clients left, add them
|
|
|
|
// back to the map.
|
|
|
|
if len(rem) > 0 {
|
|
|
|
b.spendNotifications[prevOut] = rem
|
|
|
|
}
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
case <-b.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
2016-02-27 03:30:14 +03:00
|
|
|
b.wg.Done()
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2017-11-13 23:42:50 +03:00
|
|
|
// historicalConfDetails looks up whether a transaction is already included in a
|
|
|
|
// block in the active chain and, if so, returns details about the confirmation.
|
|
|
|
func (b *BtcdNotifier) historicalConfDetails(txid *chainhash.Hash,
|
|
|
|
) (*chainntnfs.TxConfirmation, error) {
|
2016-12-27 06:45:01 +03:00
|
|
|
|
2016-12-09 03:15:58 +03:00
|
|
|
// If the transaction already has some or all of the confirmations,
|
2016-12-27 06:45:01 +03:00
|
|
|
// then we may be able to dispatch it immediately.
|
2017-11-13 23:42:50 +03:00
|
|
|
tx, err := b.chainConn.GetRawTransactionVerbose(txid)
|
2017-01-06 00:56:27 +03:00
|
|
|
if err != nil || tx == nil || tx.BlockHash == "" {
|
2017-11-13 23:42:50 +03:00
|
|
|
if err == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
// Do not return an error if the transaction was not found.
|
|
|
|
if jsonErr, ok := err.(*btcjson.RPCError); ok {
|
|
|
|
if jsonErr.Code == btcjson.ErrRPCNoTxInfo {
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-05-05 01:09:19 +03:00
|
|
|
}
|
2017-11-13 23:42:50 +03:00
|
|
|
return nil, fmt.Errorf("unable to query for txid(%v): %v", txid, err)
|
2016-12-27 06:45:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// As we need to fully populate the returned TxConfirmation struct,
|
|
|
|
// grab the block in which the transaction was confirmed so we can
|
|
|
|
// locate its exact index within the block.
|
2017-01-06 00:56:27 +03:00
|
|
|
blockHash, err := chainhash.NewHashFromStr(tx.BlockHash)
|
2016-12-27 06:45:01 +03:00
|
|
|
if err != nil {
|
2017-11-13 23:42:50 +03:00
|
|
|
return nil, fmt.Errorf("unable to get block hash %v for historical "+
|
|
|
|
"dispatch: %v", tx.BlockHash, err)
|
2016-12-27 06:45:01 +03:00
|
|
|
}
|
2017-05-16 03:47:03 +03:00
|
|
|
block, err := b.chainConn.GetBlockVerbose(blockHash)
|
2016-12-09 03:15:58 +03:00
|
|
|
if err != nil {
|
2017-11-13 23:42:50 +03:00
|
|
|
return nil, fmt.Errorf("unable to get block hash: %v", err)
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
|
|
|
|
2016-12-27 06:45:01 +03:00
|
|
|
// If the block obtained, locate the transaction's index within the
|
|
|
|
// block so we can give the subscriber full confirmation details.
|
2017-11-13 23:42:50 +03:00
|
|
|
txIndex := -1
|
|
|
|
targetTxidStr := txid.String()
|
2017-05-16 03:47:03 +03:00
|
|
|
for i, txHash := range block.Tx {
|
|
|
|
if txHash == targetTxidStr {
|
2017-11-13 23:42:50 +03:00
|
|
|
txIndex = i
|
2017-01-03 17:13:56 +03:00
|
|
|
break
|
2016-12-27 06:45:01 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-13 23:42:50 +03:00
|
|
|
if txIndex == -1 {
|
|
|
|
return nil, fmt.Errorf("unable to locate tx %v in block %v",
|
|
|
|
txid, blockHash)
|
2016-12-25 03:42:13 +03:00
|
|
|
}
|
|
|
|
|
2017-11-13 23:42:50 +03:00
|
|
|
txConf := chainntnfs.TxConfirmation{
|
|
|
|
BlockHash: blockHash,
|
|
|
|
BlockHeight: uint32(block.Height),
|
|
|
|
TxIndex: uint32(txIndex),
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
2017-11-13 23:42:50 +03:00
|
|
|
return &txConf, nil
|
2016-12-09 03:15:58 +03:00
|
|
|
}
|
|
|
|
|
2018-03-20 18:17:23 +03:00
|
|
|
// handleBlocksConnected applies a chain update for a new block. Any watched
|
|
|
|
// transactions included this block will processed to either send notifications
|
|
|
|
// now or after numConfirmations confs.
|
|
|
|
// TODO(halseth): this is reusing the neutrino notifier implementation, unify
|
|
|
|
// them.
|
|
|
|
func (b *BtcdNotifier) handleBlockConnected(newBlock *filteredBlock) error {
|
|
|
|
// First we'll notify any subscribed clients of the block.
|
|
|
|
b.notifyBlockEpochs(int32(newBlock.height), &newBlock.hash)
|
|
|
|
|
|
|
|
// Next, we'll scan over the list of relevant transactions and possibly
|
|
|
|
// dispatch notifications for confirmations and spends.
|
|
|
|
for _, tx := range newBlock.txns {
|
|
|
|
mtx := tx.MsgTx()
|
|
|
|
txSha := mtx.TxHash()
|
|
|
|
|
|
|
|
for i, txIn := range mtx.TxIn {
|
|
|
|
prevOut := txIn.PreviousOutPoint
|
|
|
|
|
|
|
|
// If this transaction indeed does spend an output which we have a
|
|
|
|
// registered notification for, then create a spend summary, finally
|
|
|
|
// sending off the details to the notification subscriber.
|
|
|
|
clients, ok := b.spendNotifications[prevOut]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(roasbeef): many integration tests expect spend to be
|
|
|
|
// notified within the mempool.
|
|
|
|
spendDetails := &chainntnfs.SpendDetail{
|
|
|
|
SpentOutPoint: &prevOut,
|
|
|
|
SpenderTxHash: &txSha,
|
|
|
|
SpendingTx: mtx,
|
|
|
|
SpenderInputIndex: uint32(i),
|
|
|
|
SpendingHeight: int32(newBlock.height),
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ntfn := range clients {
|
|
|
|
chainntnfs.Log.Infof("Dispatching spend notification for "+
|
|
|
|
"outpoint=%v", ntfn.targetOutpoint)
|
|
|
|
ntfn.spendChan <- spendDetails
|
|
|
|
|
|
|
|
// Close spendChan to ensure that any calls to Cancel will not
|
|
|
|
// block. This is safe to do since the channel is buffered, and
|
|
|
|
// the message can still be read by the receiver.
|
|
|
|
close(ntfn.spendChan)
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(b.spendNotifications, prevOut)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A new block has been connected to the main chain.
|
|
|
|
// Send out any N confirmation notifications which may
|
|
|
|
// have been triggered by this new block.
|
|
|
|
b.txConfNotifier.ConnectTip(&newBlock.hash, newBlock.height, newBlock.txns)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-09-08 21:27:07 +03:00
|
|
|
// notifyBlockEpochs notifies all registered block epoch clients of the newly
|
|
|
|
// connected block to the main chain.
|
2017-01-06 00:56:27 +03:00
|
|
|
func (b *BtcdNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash) {
|
2016-09-08 21:27:07 +03:00
|
|
|
epoch := &chainntnfs.BlockEpoch{
|
|
|
|
Height: newHeight,
|
|
|
|
Hash: newSha,
|
|
|
|
}
|
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
for _, epochClient := range b.blockEpochClients {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
select {
|
2017-05-06 01:53:09 +03:00
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case epochClient.epochQueue.ChanIn() <- epoch:
|
2017-05-06 01:53:09 +03:00
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case <-epochClient.cancelChan:
|
2017-06-08 03:04:27 +03:00
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case <-b.quit:
|
|
|
|
}
|
2016-09-08 21:27:07 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// spendNotification couples a target outpoint along with the channel used for
|
|
|
|
// notifications once a spend of the outpoint has been detected.
|
2016-01-07 00:03:17 +03:00
|
|
|
type spendNotification struct {
|
2016-02-17 01:46:18 +03:00
|
|
|
targetOutpoint *wire.OutPoint
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2016-02-17 01:46:18 +03:00
|
|
|
spendChan chan *chainntnfs.SpendDetail
|
2017-02-21 03:31:16 +03:00
|
|
|
|
|
|
|
spendID uint64
|
2018-03-20 18:17:23 +03:00
|
|
|
mempool bool
|
2017-02-21 03:31:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// spendCancel is a message sent to the BtcdNotifier when a client wishes to
|
|
|
|
// cancel an outstanding spend notification that has yet to be dispatched.
|
|
|
|
type spendCancel struct {
|
|
|
|
// op is the target outpoint of the notification to be cancelled.
|
|
|
|
op wire.OutPoint
|
|
|
|
|
|
|
|
// spendID the ID of the notification to cancel.
|
|
|
|
spendID uint64
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2017-02-23 22:56:47 +03:00
|
|
|
// RegisterSpendNtfn registers an intent to be notified once the target
|
2016-02-27 04:31:07 +03:00
|
|
|
// outpoint has been spent by a transaction on-chain. Once a spend of the target
|
|
|
|
// outpoint has been detected, the details of the spending event will be sent
|
|
|
|
// across the 'Spend' channel.
|
2017-05-11 03:00:18 +03:00
|
|
|
func (b *BtcdNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
|
2018-03-20 18:17:23 +03:00
|
|
|
_ uint32, mempool bool) (*chainntnfs.SpendEvent, error) {
|
2016-11-30 11:00:20 +03:00
|
|
|
|
2016-01-07 00:03:17 +03:00
|
|
|
ntfn := &spendNotification{
|
2016-02-17 01:46:18 +03:00
|
|
|
targetOutpoint: outpoint,
|
|
|
|
spendChan: make(chan *chainntnfs.SpendDetail, 1),
|
2017-02-21 03:31:16 +03:00
|
|
|
spendID: atomic.AddUint64(&b.spendClientCounter, 1),
|
2018-03-20 18:17:23 +03:00
|
|
|
mempool: mempool,
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2016-12-15 12:07:12 +03:00
|
|
|
select {
|
|
|
|
case <-b.quit:
|
2016-12-14 02:32:44 +03:00
|
|
|
return nil, ErrChainNotifierShuttingDown
|
2016-12-15 12:07:12 +03:00
|
|
|
case b.notificationRegistry <- ntfn:
|
|
|
|
}
|
2016-01-07 00:03:17 +03:00
|
|
|
|
2018-03-27 14:50:24 +03:00
|
|
|
if err := b.chainConn.NotifySpent([]*wire.OutPoint{outpoint}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-11-30 11:00:20 +03:00
|
|
|
// The following conditional checks to ensure that when a spend notification
|
|
|
|
// is registered, the output hasn't already been spent. If the output
|
|
|
|
// is no longer in the UTXO set, the chain will be rescanned from the point
|
|
|
|
// where the output was added. The rescan will dispatch the notification.
|
|
|
|
txout, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if txout == nil {
|
|
|
|
transaction, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash)
|
|
|
|
if err != nil {
|
2017-09-12 18:11:21 +03:00
|
|
|
jsonErr, ok := err.(*btcjson.RPCError)
|
2017-11-13 23:42:50 +03:00
|
|
|
if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo {
|
2017-09-12 18:11:21 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-11-30 11:00:20 +03:00
|
|
|
}
|
|
|
|
|
2018-01-29 01:46:54 +03:00
|
|
|
// We'll only request a rescan if the transaction has actually
|
|
|
|
// been included within a block. Otherwise, we'll encounter an
|
|
|
|
// error when scanning for blocks. This can happens in the case
|
|
|
|
// of a race condition, wherein the output itself is unspent,
|
|
|
|
// and only arrives in the mempool after the getxout call.
|
|
|
|
if transaction != nil && transaction.BlockHash != "" {
|
2017-09-12 18:11:21 +03:00
|
|
|
blockhash, err := chainhash.NewHashFromStr(transaction.BlockHash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-11-30 11:00:20 +03:00
|
|
|
|
2017-09-12 18:11:21 +03:00
|
|
|
ops := []*wire.OutPoint{outpoint}
|
|
|
|
if err := b.chainConn.Rescan(blockhash, nil, ops); err != nil {
|
|
|
|
chainntnfs.Log.Errorf("Rescan for spend "+
|
|
|
|
"notification txout failed: %v", err)
|
|
|
|
return nil, err
|
|
|
|
}
|
2016-11-30 11:00:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-10 02:28:32 +03:00
|
|
|
return &chainntnfs.SpendEvent{
|
|
|
|
Spend: ntfn.spendChan,
|
2017-02-21 03:31:16 +03:00
|
|
|
Cancel: func() {
|
2017-07-30 05:19:28 +03:00
|
|
|
cancel := &spendCancel{
|
2017-02-21 03:31:16 +03:00
|
|
|
op: *outpoint,
|
|
|
|
spendID: ntfn.spendID,
|
2017-07-30 05:19:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Submit spend cancellation to notification dispatcher.
|
|
|
|
select {
|
|
|
|
case b.notificationCancels <- cancel:
|
2017-07-30 06:28:48 +03:00
|
|
|
// Cancellation is being handled, drain the spend chan until it is
|
|
|
|
// closed before yielding to the caller.
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case _, ok := <-ntfn.spendChan:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-b.quit:
|
2017-08-02 03:14:01 +03:00
|
|
|
return
|
2017-07-30 06:28:48 +03:00
|
|
|
}
|
2017-07-30 05:19:28 +03:00
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
case <-b.quit:
|
|
|
|
}
|
|
|
|
},
|
2017-02-10 02:28:32 +03:00
|
|
|
}, nil
|
2016-02-17 01:46:18 +03:00
|
|
|
}
|
|
|
|
|
2016-02-27 04:31:07 +03:00
|
|
|
// confirmationNotification represents a client's intent to receive a
|
|
|
|
// notification once the target txid reaches numConfirmations confirmations.
|
2016-02-17 01:46:18 +03:00
|
|
|
type confirmationsNotification struct {
|
2017-11-13 23:42:50 +03:00
|
|
|
chainntnfs.ConfNtfn
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2017-02-23 22:56:47 +03:00
|
|
|
// RegisterConfirmationsNtfn registers a notification with BtcdNotifier
|
2016-02-27 04:31:07 +03:00
|
|
|
// which will be triggered once the txid reaches numConfs number of
|
|
|
|
// confirmations.
|
2017-01-06 00:56:27 +03:00
|
|
|
func (b *BtcdNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
|
2017-05-11 03:00:18 +03:00
|
|
|
numConfs, _ uint32) (*chainntnfs.ConfirmationEvent, error) {
|
2016-01-07 00:03:17 +03:00
|
|
|
|
|
|
|
ntfn := &confirmationsNotification{
|
2017-11-13 23:42:50 +03:00
|
|
|
chainntnfs.ConfNtfn{
|
|
|
|
TxID: txid,
|
|
|
|
NumConfirmations: numConfs,
|
2018-03-19 21:48:44 +03:00
|
|
|
Event: chainntnfs.NewConfirmationEvent(numConfs),
|
2017-11-13 23:42:50 +03:00
|
|
|
},
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
|
|
|
|
2016-12-15 12:07:12 +03:00
|
|
|
select {
|
|
|
|
case <-b.quit:
|
2016-12-14 02:32:44 +03:00
|
|
|
return nil, ErrChainNotifierShuttingDown
|
2016-12-15 12:07:12 +03:00
|
|
|
case b.notificationRegistry <- ntfn:
|
2017-11-13 23:42:50 +03:00
|
|
|
return ntfn.Event, nil
|
2016-12-15 12:07:12 +03:00
|
|
|
}
|
2016-01-07 00:03:17 +03:00
|
|
|
}
|
2016-06-21 07:31:05 +03:00
|
|
|
|
2016-09-08 21:27:07 +03:00
|
|
|
// blockEpochRegistration represents a client's intent to receive a
|
|
|
|
// notification with each newly connected block.
|
|
|
|
type blockEpochRegistration struct {
|
2017-06-08 03:04:27 +03:00
|
|
|
epochID uint64
|
|
|
|
|
2016-09-08 21:27:07 +03:00
|
|
|
epochChan chan *chainntnfs.BlockEpoch
|
2017-02-21 03:31:16 +03:00
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
epochQueue *chainntnfs.ConcurrentQueue
|
|
|
|
|
2017-05-06 01:53:09 +03:00
|
|
|
cancelChan chan struct{}
|
|
|
|
|
2017-06-08 03:04:27 +03:00
|
|
|
wg sync.WaitGroup
|
2017-02-21 03:31:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// epochCancel is a message sent to the BtcdNotifier when a client wishes to
|
|
|
|
// cancel an outstanding epoch notification that has yet to be dispatched.
|
|
|
|
type epochCancel struct {
|
|
|
|
epochID uint64
|
2016-09-08 21:27:07 +03:00
|
|
|
}
|
|
|
|
|
2016-06-21 07:31:05 +03:00
|
|
|
// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the
|
2017-02-21 03:31:16 +03:00
|
|
|
// caller to receive notifications, of each new block connected to the main
|
2016-06-21 07:31:05 +03:00
|
|
|
// chain.
|
2016-09-08 21:27:07 +03:00
|
|
|
func (b *BtcdNotifier) RegisterBlockEpochNtfn() (*chainntnfs.BlockEpochEvent, error) {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg := &blockEpochRegistration{
|
|
|
|
epochQueue: chainntnfs.NewConcurrentQueue(20),
|
2017-05-06 01:53:09 +03:00
|
|
|
epochChan: make(chan *chainntnfs.BlockEpoch, 20),
|
|
|
|
cancelChan: make(chan struct{}),
|
|
|
|
epochID: atomic.AddUint64(&b.epochClientCounter, 1),
|
2016-09-08 21:27:07 +03:00
|
|
|
}
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg.epochQueue.Start()
|
|
|
|
|
|
|
|
// Before we send the request to the main goroutine, we'll launch a new
|
|
|
|
// goroutine to proxy items added to our queue to the client itself.
|
|
|
|
// This ensures that all notifications are received *in order*.
|
|
|
|
reg.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer reg.wg.Done()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ntfn := <-reg.epochQueue.ChanOut():
|
|
|
|
blockNtfn := ntfn.(*chainntnfs.BlockEpoch)
|
|
|
|
select {
|
|
|
|
case reg.epochChan <- blockNtfn:
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-09-08 21:27:07 +03:00
|
|
|
|
2016-12-15 12:07:12 +03:00
|
|
|
select {
|
|
|
|
case <-b.quit:
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// As we're exiting before the registration could be sent,
|
|
|
|
// we'll stop the queue now ourselves.
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
2016-12-15 12:07:12 +03:00
|
|
|
return nil, errors.New("chainntnfs: system interrupt while " +
|
|
|
|
"attempting to register for block epoch notification.")
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case b.notificationRegistry <- reg:
|
2016-12-15 12:07:12 +03:00
|
|
|
return &chainntnfs.BlockEpochEvent{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
Epochs: reg.epochChan,
|
2017-02-21 03:31:16 +03:00
|
|
|
Cancel: func() {
|
2017-04-27 07:08:16 +03:00
|
|
|
cancel := &epochCancel{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
epochID: reg.epochID,
|
2017-04-27 07:08:16 +03:00
|
|
|
}
|
|
|
|
|
2017-07-30 05:19:28 +03:00
|
|
|
// Submit epoch cancellation to notification dispatcher.
|
2017-04-27 07:08:16 +03:00
|
|
|
select {
|
|
|
|
case b.notificationCancels <- cancel:
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// Cancellation is being handled, drain
|
|
|
|
// the epoch channel until it is closed
|
|
|
|
// before yielding to caller.
|
2017-07-30 06:28:48 +03:00
|
|
|
for {
|
|
|
|
select {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case _, ok := <-reg.epochChan:
|
2017-07-30 06:28:48 +03:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-b.quit:
|
2017-08-02 03:14:01 +03:00
|
|
|
return
|
2017-07-30 06:28:48 +03:00
|
|
|
}
|
2017-04-27 07:08:16 +03:00
|
|
|
}
|
2017-02-21 03:31:16 +03:00
|
|
|
case <-b.quit:
|
|
|
|
}
|
|
|
|
},
|
2016-12-15 12:07:12 +03:00
|
|
|
}, nil
|
|
|
|
}
|
2016-06-21 07:31:05 +03:00
|
|
|
}
|