2017-05-24 04:13:45 +03:00
|
|
|
package neutrinonotify
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2017-11-14 02:49:58 +03:00
|
|
|
"fmt"
|
2018-02-03 04:59:11 +03:00
|
|
|
"strings"
|
2017-05-24 04:13:45 +03:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2017-07-05 01:54:35 +03:00
|
|
|
"time"
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2017-06-06 05:44:54 +03:00
|
|
|
"github.com/lightninglabs/neutrino"
|
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2017-05-24 04:13:45 +03:00
|
|
|
"github.com/roasbeef/btcd/chaincfg/chainhash"
|
2017-08-25 04:54:17 +03:00
|
|
|
"github.com/roasbeef/btcd/rpcclient"
|
2017-05-24 04:13:45 +03:00
|
|
|
"github.com/roasbeef/btcd/wire"
|
|
|
|
"github.com/roasbeef/btcutil"
|
|
|
|
"github.com/roasbeef/btcutil/gcs/builder"
|
|
|
|
"github.com/roasbeef/btcwallet/waddrmgr"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
|
|
|
// notifierType uniquely identifies this concrete implementation of the
|
|
|
|
// ChainNotifier interface.
|
|
|
|
notifierType = "neutrino"
|
2017-11-14 02:49:58 +03:00
|
|
|
|
|
|
|
// reorgSafetyLimit is the chain depth beyond which it is assumed a block
|
|
|
|
// will not be reorganized out of the chain. This is used to determine when
|
|
|
|
// to prune old confirmation requests so that reorgs are handled correctly.
|
|
|
|
// The coinbase maturity period is a reasonable value to use.
|
|
|
|
reorgSafetyLimit = 100
|
2017-05-24 04:13:45 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// ErrChainNotifierShuttingDown is used when we are trying to
|
|
|
|
// measure a spend notification when notifier is already stopped.
|
|
|
|
ErrChainNotifierShuttingDown = errors.New("chainntnfs: system interrupt " +
|
|
|
|
"while attempting to register for spend notification.")
|
|
|
|
)
|
|
|
|
|
|
|
|
// NeutrinoNotifier is a version of ChainNotifier that's backed by the neutrino
|
|
|
|
// Bitcoin light client. Unlike other implementations, this implementation
|
|
|
|
// speaks directly to the p2p network. As a result, this implementation of the
|
|
|
|
// ChainNotifier interface is much more light weight that other implementation
|
|
|
|
// which rely of receiving notification over an RPC interface backed by a
|
|
|
|
// running full node.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): heavily consolidate with NeutrinoNotifier code
|
|
|
|
// * maybe combine into single package?
|
|
|
|
type NeutrinoNotifier struct {
|
|
|
|
started int32 // To be used atomically.
|
|
|
|
stopped int32 // To be used atomically.
|
|
|
|
|
|
|
|
spendClientCounter uint64 // To be used atomically.
|
|
|
|
epochClientCounter uint64 // To be used atomically.
|
|
|
|
|
|
|
|
heightMtx sync.RWMutex
|
|
|
|
bestHeight uint32
|
|
|
|
|
|
|
|
p2pNode *neutrino.ChainService
|
|
|
|
chainView neutrino.Rescan
|
|
|
|
|
|
|
|
notificationCancels chan interface{}
|
|
|
|
notificationRegistry chan interface{}
|
|
|
|
|
|
|
|
spendNotifications map[wire.OutPoint]map[uint64]*spendNotification
|
|
|
|
|
2017-11-14 02:49:58 +03:00
|
|
|
txConfNotifier *chainntnfs.TxConfNotifier
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
blockEpochClients map[uint64]*blockEpochRegistration
|
|
|
|
|
|
|
|
rescanErr <-chan error
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
chainUpdates *chainntnfs.ConcurrentQueue
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
wg sync.WaitGroup
|
|
|
|
quit chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure NeutrinoNotifier implements the ChainNotifier interface at compile time.
|
|
|
|
var _ chainntnfs.ChainNotifier = (*NeutrinoNotifier)(nil)
|
|
|
|
|
|
|
|
// New creates a new instance of the NeutrinoNotifier concrete implementation
|
|
|
|
// of the ChainNotifier interface.
|
|
|
|
//
|
|
|
|
// NOTE: The passed neutrino node should already be running and active before
|
|
|
|
// being passed into this function.
|
|
|
|
func New(node *neutrino.ChainService) (*NeutrinoNotifier, error) {
|
|
|
|
notifier := &NeutrinoNotifier{
|
|
|
|
notificationCancels: make(chan interface{}),
|
|
|
|
notificationRegistry: make(chan interface{}),
|
|
|
|
|
|
|
|
blockEpochClients: make(map[uint64]*blockEpochRegistration),
|
|
|
|
|
|
|
|
spendNotifications: make(map[wire.OutPoint]map[uint64]*spendNotification),
|
|
|
|
|
|
|
|
p2pNode: node,
|
|
|
|
|
|
|
|
rescanErr: make(chan error),
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
chainUpdates: chainntnfs.NewConcurrentQueue(10),
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
quit: make(chan struct{}),
|
|
|
|
}
|
|
|
|
|
|
|
|
return notifier, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start contacts the running neutrino light client and kicks off an initial
|
|
|
|
// empty rescan.
|
|
|
|
func (n *NeutrinoNotifier) Start() error {
|
|
|
|
// Already started?
|
|
|
|
if atomic.AddInt32(&n.started, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// First, we'll obtain the latest block height of the p2p node. We'll
|
|
|
|
// start the auto-rescan from this point. Once a caller actually wishes
|
|
|
|
// to register a chain view, the rescan state will be rewound
|
|
|
|
// accordingly.
|
2017-06-05 07:06:52 +03:00
|
|
|
bestHeader, bestHeight, err := n.p2pNode.BlockHeaders.ChainTip()
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
startingPoint := &waddrmgr.BlockStamp{
|
|
|
|
Height: int32(bestHeight),
|
|
|
|
Hash: bestHeader.BlockHash(),
|
|
|
|
}
|
|
|
|
n.bestHeight = bestHeight
|
|
|
|
|
|
|
|
// Next, we'll create our set of rescan options. Currently it's
|
2018-04-18 05:02:04 +03:00
|
|
|
// required that a user MUST set an addr/outpoint/txid when creating a
|
2017-05-24 04:13:45 +03:00
|
|
|
// rescan. To get around this, we'll add a "zero" outpoint, that won't
|
|
|
|
// actually be matched.
|
|
|
|
var zeroHash chainhash.Hash
|
|
|
|
rescanOptions := []neutrino.RescanOption{
|
|
|
|
neutrino.StartBlock(startingPoint),
|
|
|
|
neutrino.QuitChan(n.quit),
|
|
|
|
neutrino.NotificationHandlers(
|
2017-08-25 04:54:17 +03:00
|
|
|
rpcclient.NotificationHandlers{
|
2017-05-24 04:13:45 +03:00
|
|
|
OnFilteredBlockConnected: n.onFilteredBlockConnected,
|
|
|
|
OnFilteredBlockDisconnected: n.onFilteredBlockDisconnected,
|
|
|
|
},
|
|
|
|
),
|
|
|
|
neutrino.WatchTxIDs(zeroHash),
|
|
|
|
}
|
|
|
|
|
2017-11-14 02:49:58 +03:00
|
|
|
n.txConfNotifier = chainntnfs.NewTxConfNotifier(
|
|
|
|
bestHeight, reorgSafetyLimit)
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// Finally, we'll create our rescan struct, start it, and launch all
|
|
|
|
// the goroutines we need to operate this ChainNotifier instance.
|
|
|
|
n.chainView = n.p2pNode.NewRescan(rescanOptions...)
|
|
|
|
n.rescanErr = n.chainView.Start()
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
n.chainUpdates.Start()
|
2017-09-29 22:10:38 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
n.wg.Add(1)
|
|
|
|
go n.notificationDispatcher()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-18 05:40:05 +03:00
|
|
|
// Stop shuts down the NeutrinoNotifier.
|
2017-05-24 04:13:45 +03:00
|
|
|
func (n *NeutrinoNotifier) Stop() error {
|
|
|
|
// Already shutting down?
|
|
|
|
if atomic.AddInt32(&n.stopped, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
close(n.quit)
|
|
|
|
n.wg.Wait()
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
n.chainUpdates.Stop()
|
2017-09-29 22:10:38 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// Notify all pending clients of our shutdown by closing the related
|
|
|
|
// notification channels.
|
|
|
|
for _, spendClients := range n.spendNotifications {
|
|
|
|
for _, spendClient := range spendClients {
|
|
|
|
close(spendClient.spendChan)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, epochClient := range n.blockEpochClients {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
close(epochClient.cancelChan)
|
|
|
|
epochClient.wg.Wait()
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
close(epochClient.epochChan)
|
|
|
|
}
|
2017-11-14 02:49:58 +03:00
|
|
|
n.txConfNotifier.TearDown()
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// filteredBlock represents a new block which has been connected to the main
|
|
|
|
// chain. The slice of transactions will only be populated if the block
|
|
|
|
// includes a transaction that confirmed one of our watched txids, or spends
|
|
|
|
// one of the outputs currently being watched.
|
|
|
|
type filteredBlock struct {
|
|
|
|
hash chainhash.Hash
|
|
|
|
height uint32
|
|
|
|
txns []*btcutil.Tx
|
2017-11-10 22:01:36 +03:00
|
|
|
|
|
|
|
// connected is true if this update is a new block and false if it is a
|
|
|
|
// disconnected block.
|
|
|
|
connect bool
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// onFilteredBlockConnected is a callback which is executed each a new block is
|
|
|
|
// connected to the end of the main chain.
|
|
|
|
func (n *NeutrinoNotifier) onFilteredBlockConnected(height int32,
|
|
|
|
header *wire.BlockHeader, txns []*btcutil.Tx) {
|
|
|
|
|
2017-06-08 03:05:16 +03:00
|
|
|
// Append this new chain update to the end of the queue of new chain
|
|
|
|
// updates.
|
2017-11-10 22:01:36 +03:00
|
|
|
n.chainUpdates.ChanIn() <- &filteredBlock{
|
|
|
|
hash: header.BlockHash(),
|
|
|
|
height: uint32(height),
|
|
|
|
txns: txns,
|
|
|
|
connect: true,
|
2017-09-29 22:10:38 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// onFilteredBlockDisconnected is a callback which is executed each time a new
|
|
|
|
// block has been disconnected from the end of the mainchain due to a re-org.
|
|
|
|
func (n *NeutrinoNotifier) onFilteredBlockDisconnected(height int32,
|
|
|
|
header *wire.BlockHeader) {
|
|
|
|
|
2017-06-08 03:05:16 +03:00
|
|
|
// Append this new chain update to the end of the queue of new chain
|
|
|
|
// disconnects.
|
2017-11-10 22:01:36 +03:00
|
|
|
n.chainUpdates.ChanIn() <- &filteredBlock{
|
|
|
|
hash: header.BlockHash(),
|
|
|
|
height: uint32(height),
|
|
|
|
connect: false,
|
2017-09-29 22:10:38 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// notificationDispatcher is the primary goroutine which handles client
|
|
|
|
// notification registrations, as well as notification dispatches.
|
|
|
|
func (n *NeutrinoNotifier) notificationDispatcher() {
|
|
|
|
defer n.wg.Done()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case cancelMsg := <-n.notificationCancels:
|
|
|
|
switch msg := cancelMsg.(type) {
|
|
|
|
case *spendCancel:
|
|
|
|
chainntnfs.Log.Infof("Cancelling spend "+
|
|
|
|
"notification for out_point=%v, "+
|
|
|
|
"spend_id=%v", msg.op, msg.spendID)
|
|
|
|
|
|
|
|
// Before we attempt to close the spendChan,
|
|
|
|
// ensure that the notification hasn't already
|
|
|
|
// yet been dispatched.
|
|
|
|
if outPointClients, ok := n.spendNotifications[msg.op]; ok {
|
|
|
|
close(outPointClients[msg.spendID].spendChan)
|
|
|
|
delete(n.spendNotifications[msg.op], msg.spendID)
|
|
|
|
}
|
2017-07-30 05:19:28 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
case *epochCancel:
|
|
|
|
chainntnfs.Log.Infof("Cancelling epoch "+
|
|
|
|
"notification, epoch_id=%v", msg.epochID)
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// First, we'll lookup the original
|
|
|
|
// registration in order to stop the active
|
|
|
|
// queue goroutine.
|
|
|
|
reg := n.blockEpochClients[msg.epochID]
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
|
|
|
// Next, close the cancel channel for this
|
2017-06-08 03:05:16 +03:00
|
|
|
// specific client, and wait for the client to
|
|
|
|
// exit.
|
2017-05-24 04:13:45 +03:00
|
|
|
close(n.blockEpochClients[msg.epochID].cancelChan)
|
2017-06-08 03:05:16 +03:00
|
|
|
n.blockEpochClients[msg.epochID].wg.Wait()
|
|
|
|
|
|
|
|
// Once the client has exited, we can then
|
|
|
|
// safely close the channel used to send epoch
|
|
|
|
// notifications, in order to notify any
|
|
|
|
// listeners that the intent has been
|
|
|
|
// cancelled.
|
2017-05-24 04:13:45 +03:00
|
|
|
close(n.blockEpochClients[msg.epochID].epochChan)
|
|
|
|
delete(n.blockEpochClients, msg.epochID)
|
|
|
|
}
|
|
|
|
|
|
|
|
case registerMsg := <-n.notificationRegistry:
|
|
|
|
switch msg := registerMsg.(type) {
|
|
|
|
case *spendNotification:
|
|
|
|
chainntnfs.Log.Infof("New spend subscription: "+
|
2018-03-02 03:49:19 +03:00
|
|
|
"utxo=%v, height_hint=%v",
|
|
|
|
msg.targetOutpoint, msg.heightHint)
|
2017-05-24 04:13:45 +03:00
|
|
|
op := *msg.targetOutpoint
|
|
|
|
|
|
|
|
if _, ok := n.spendNotifications[op]; !ok {
|
|
|
|
n.spendNotifications[op] = make(map[uint64]*spendNotification)
|
|
|
|
}
|
|
|
|
n.spendNotifications[op][msg.spendID] = msg
|
|
|
|
|
|
|
|
case *confirmationsNotification:
|
2017-11-14 02:49:58 +03:00
|
|
|
chainntnfs.Log.Infof("New confirmations subscription: "+
|
|
|
|
"txid=%v, numconfs=%v, height_hint=%v",
|
|
|
|
msg.TxID, msg.NumConfirmations, msg.heightHint)
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
// If the notification can be partially or
|
|
|
|
// fully dispatched, then we can skip the first
|
|
|
|
// phase for ntfns.
|
|
|
|
n.heightMtx.RLock()
|
|
|
|
currentHeight := n.bestHeight
|
|
|
|
n.heightMtx.RUnlock()
|
|
|
|
|
2017-11-14 02:49:58 +03:00
|
|
|
// Lookup whether the transaction is already included in the
|
|
|
|
// active chain.
|
|
|
|
txConf, err := n.historicalConfDetails(msg.TxID, currentHeight,
|
|
|
|
msg.heightHint)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
2017-11-14 02:49:58 +03:00
|
|
|
|
|
|
|
if txConf == nil {
|
|
|
|
// If we can't fully dispatch confirmation,
|
|
|
|
// then we'll update our filter so we can be
|
|
|
|
// notified of its future initial confirmation.
|
|
|
|
rescanUpdate := []neutrino.UpdateOption{
|
|
|
|
neutrino.AddTxIDs(*msg.TxID),
|
|
|
|
neutrino.Rewind(currentHeight),
|
|
|
|
}
|
|
|
|
if err := n.chainView.Update(rescanUpdate...); err != nil {
|
|
|
|
chainntnfs.Log.Errorf("unable to update rescan: %v", err)
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2017-12-05 00:30:33 +03:00
|
|
|
err = n.txConfNotifier.Register(&msg.ConfNtfn, txConf)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
case *blockEpochRegistration:
|
|
|
|
chainntnfs.Log.Infof("New block epoch subscription")
|
|
|
|
n.blockEpochClients[msg.epochID] = msg
|
|
|
|
}
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
case item := <-n.chainUpdates.ChanOut():
|
|
|
|
update := item.(*filteredBlock)
|
|
|
|
if update.connect {
|
|
|
|
n.heightMtx.Lock()
|
|
|
|
if update.height != n.bestHeight+1 {
|
|
|
|
chainntnfs.Log.Warnf("Received blocks out of order: "+
|
|
|
|
"current height=%d, new height=%d",
|
|
|
|
n.bestHeight, update.height)
|
2017-12-10 21:34:49 +03:00
|
|
|
n.heightMtx.Unlock()
|
|
|
|
continue
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
n.bestHeight = update.height
|
|
|
|
n.heightMtx.Unlock()
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
chainntnfs.Log.Infof("New block: height=%v, sha=%v",
|
|
|
|
update.height, update.hash)
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
err := n.handleBlockConnected(update)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
2017-12-10 21:34:49 +03:00
|
|
|
continue
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2017-12-10 21:34:49 +03:00
|
|
|
n.heightMtx.Lock()
|
|
|
|
if update.height != n.bestHeight {
|
|
|
|
chainntnfs.Log.Warnf("Received blocks out of order: "+
|
|
|
|
"current height=%d, disconnected height=%d",
|
|
|
|
n.bestHeight, update.height)
|
2017-11-10 22:01:36 +03:00
|
|
|
n.heightMtx.Unlock()
|
2017-12-10 21:34:49 +03:00
|
|
|
continue
|
|
|
|
}
|
2017-11-10 22:01:36 +03:00
|
|
|
|
2017-12-10 21:34:49 +03:00
|
|
|
n.bestHeight = update.height - 1
|
|
|
|
n.heightMtx.Unlock()
|
2017-11-14 02:49:58 +03:00
|
|
|
|
2017-12-10 21:34:49 +03:00
|
|
|
chainntnfs.Log.Infof("Block disconnected from main chain: "+
|
|
|
|
"height=%v, sha=%v", update.height, update.hash)
|
|
|
|
|
|
|
|
err := n.txConfNotifier.DisconnectTip(update.height)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
2017-11-10 22:01:36 +03:00
|
|
|
}
|
2017-12-10 21:34:49 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
case err := <-n.rescanErr:
|
|
|
|
chainntnfs.Log.Errorf("Error during rescan: %v", err)
|
|
|
|
|
|
|
|
case <-n.quit:
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-14 02:49:58 +03:00
|
|
|
// historicalConfDetails looks up whether a transaction is already included in a
|
|
|
|
// block in the active chain and, if so, returns details about the confirmation.
|
|
|
|
func (n *NeutrinoNotifier) historicalConfDetails(targetHash *chainhash.Hash,
|
|
|
|
currentHeight, heightHint uint32) (*chainntnfs.TxConfirmation, error) {
|
2017-05-25 03:26:45 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// Starting from the height hint, we'll walk forwards in the chain to
|
|
|
|
// see if this transaction has already been confirmed.
|
|
|
|
for scanHeight := heightHint; scanHeight <= currentHeight; scanHeight++ {
|
|
|
|
// First, we'll fetch the block header for this height so we
|
|
|
|
// can compute the current block hash.
|
2017-06-05 07:06:52 +03:00
|
|
|
header, err := n.p2pNode.BlockHeaders.FetchHeaderByHeight(scanHeight)
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to get header for height=%v: %v",
|
|
|
|
scanHeight, err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
blockHash := header.BlockHash()
|
|
|
|
|
2017-10-11 22:27:38 +03:00
|
|
|
// With the hash computed, we can now fetch the basic filter
|
2017-05-24 04:13:45 +03:00
|
|
|
// for this height.
|
2017-10-11 22:27:38 +03:00
|
|
|
regFilter, err := n.p2pNode.GetCFilter(blockHash,
|
|
|
|
wire.GCSFilterRegular)
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to retrieve regular filter for "+
|
|
|
|
"height=%v: %v", scanHeight, err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// If the block has no transactions other than the coinbase
|
|
|
|
// transaction, then the filter may be nil, so we'll continue
|
|
|
|
// forward int that case.
|
2017-10-11 22:27:38 +03:00
|
|
|
if regFilter == nil {
|
2017-05-24 04:13:45 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// In the case that the filter exists, we'll attempt to see if
|
|
|
|
// any element in it match our target txid.
|
|
|
|
key := builder.DeriveKey(&blockHash)
|
2017-10-11 22:27:38 +03:00
|
|
|
match, err := regFilter.Match(key, targetHash[:])
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to query filter: %v", err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// If there's no match, then we can continue forward to the
|
|
|
|
// next block.
|
|
|
|
if !match {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// In the case that we do have a match, we'll fetch the block
|
|
|
|
// from the network so we can find the positional data required
|
|
|
|
// to send the proper response.
|
|
|
|
block, err := n.p2pNode.GetBlockFromNetwork(blockHash)
|
|
|
|
if err != nil {
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to get block from network: %v", err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
for j, tx := range block.Transactions() {
|
|
|
|
txHash := tx.Hash()
|
|
|
|
if txHash.IsEqual(targetHash) {
|
2017-11-14 02:49:58 +03:00
|
|
|
confDetails := chainntnfs.TxConfirmation{
|
2017-05-24 04:13:45 +03:00
|
|
|
BlockHash: &blockHash,
|
|
|
|
BlockHeight: scanHeight,
|
|
|
|
TxIndex: uint32(j),
|
|
|
|
}
|
2017-11-14 02:49:58 +03:00
|
|
|
return &confDetails, nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
// handleBlocksConnected applies a chain update for a new block. Any watched
|
|
|
|
// transactions included this block will processed to either send notifications
|
|
|
|
// now or after numConfirmations confs.
|
|
|
|
func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) error {
|
|
|
|
// First we'll notify any subscribed clients of the block.
|
|
|
|
n.notifyBlockEpochs(int32(newBlock.height), &newBlock.hash)
|
|
|
|
|
|
|
|
// Next, we'll scan over the list of relevant transactions and possibly
|
|
|
|
// dispatch notifications for confirmations and spends.
|
|
|
|
for _, tx := range newBlock.txns {
|
|
|
|
mtx := tx.MsgTx()
|
|
|
|
txSha := mtx.TxHash()
|
|
|
|
|
|
|
|
for i, txIn := range mtx.TxIn {
|
|
|
|
prevOut := txIn.PreviousOutPoint
|
|
|
|
|
|
|
|
// If this transaction indeed does spend an output which we have a
|
|
|
|
// registered notification for, then create a spend summary, finally
|
|
|
|
// sending off the details to the notification subscriber.
|
|
|
|
clients, ok := n.spendNotifications[prevOut]
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(roasbeef): many integration tests expect spend to be
|
|
|
|
// notified within the mempool.
|
|
|
|
spendDetails := &chainntnfs.SpendDetail{
|
|
|
|
SpentOutPoint: &prevOut,
|
|
|
|
SpenderTxHash: &txSha,
|
|
|
|
SpendingTx: mtx,
|
|
|
|
SpenderInputIndex: uint32(i),
|
|
|
|
SpendingHeight: int32(newBlock.height),
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ntfn := range clients {
|
|
|
|
chainntnfs.Log.Infof("Dispatching spend notification for "+
|
|
|
|
"outpoint=%v", ntfn.targetOutpoint)
|
|
|
|
ntfn.spendChan <- spendDetails
|
|
|
|
|
|
|
|
// Close spendChan to ensure that any calls to Cancel will not
|
|
|
|
// block. This is safe to do since the channel is buffered, and
|
|
|
|
// the message can still be read by the receiver.
|
|
|
|
close(ntfn.spendChan)
|
|
|
|
}
|
|
|
|
|
|
|
|
delete(n.spendNotifications, prevOut)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// A new block has been connected to the main chain.
|
|
|
|
// Send out any N confirmation notifications which may
|
|
|
|
// have been triggered by this new block.
|
2017-11-14 02:49:58 +03:00
|
|
|
n.txConfNotifier.ConnectTip(&newBlock.hash, newBlock.height, newBlock.txns)
|
2017-11-10 22:01:36 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// notifyBlockEpochs notifies all registered block epoch clients of the newly
|
|
|
|
// connected block to the main chain.
|
|
|
|
func (n *NeutrinoNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash) {
|
|
|
|
epoch := &chainntnfs.BlockEpoch{
|
|
|
|
Height: newHeight,
|
|
|
|
Hash: newSha,
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, epochClient := range n.blockEpochClients {
|
2018-03-05 18:18:14 +03:00
|
|
|
select {
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-03-05 18:18:14 +03:00
|
|
|
case epochClient.epochQueue.ChanIn() <- epoch:
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-03-05 18:18:14 +03:00
|
|
|
case <-epochClient.cancelChan:
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-03-05 18:18:14 +03:00
|
|
|
case <-n.quit:
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// spendNotification couples a target outpoint along with the channel used for
|
|
|
|
// notifications once a spend of the outpoint has been detected.
|
|
|
|
type spendNotification struct {
|
|
|
|
targetOutpoint *wire.OutPoint
|
|
|
|
|
|
|
|
spendChan chan *chainntnfs.SpendDetail
|
|
|
|
|
|
|
|
spendID uint64
|
2018-03-02 03:49:19 +03:00
|
|
|
|
|
|
|
heightHint uint32
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// spendCancel is a message sent to the NeutrinoNotifier when a client wishes
|
|
|
|
// to cancel an outstanding spend notification that has yet to be dispatched.
|
|
|
|
type spendCancel struct {
|
|
|
|
// op is the target outpoint of the notification to be cancelled.
|
|
|
|
op wire.OutPoint
|
|
|
|
|
|
|
|
// spendID the ID of the notification to cancel.
|
|
|
|
spendID uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterSpendNtfn registers an intent to be notified once the target
|
|
|
|
// outpoint has been spent by a transaction on-chain. Once a spend of the
|
|
|
|
// target outpoint has been detected, the details of the spending event will be
|
|
|
|
// sent across the 'Spend' channel.
|
|
|
|
func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
|
2018-03-29 16:30:12 +03:00
|
|
|
heightHint uint32, _ bool) (*chainntnfs.SpendEvent, error) {
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
n.heightMtx.RLock()
|
|
|
|
currentHeight := n.bestHeight
|
|
|
|
n.heightMtx.RUnlock()
|
|
|
|
|
2017-05-25 03:26:45 +03:00
|
|
|
chainntnfs.Log.Infof("New spend notification for outpoint=%v, "+
|
|
|
|
"height_hint=%v", outpoint, heightHint)
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
ntfn := &spendNotification{
|
|
|
|
targetOutpoint: outpoint,
|
|
|
|
spendChan: make(chan *chainntnfs.SpendDetail, 1),
|
|
|
|
spendID: atomic.AddUint64(&n.spendClientCounter, 1),
|
2018-03-02 03:49:19 +03:00
|
|
|
heightHint: heightHint,
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
spendEvent := &chainntnfs.SpendEvent{
|
|
|
|
Spend: ntfn.spendChan,
|
|
|
|
Cancel: func() {
|
2017-07-30 05:19:28 +03:00
|
|
|
cancel := &spendCancel{
|
2017-05-24 04:13:45 +03:00
|
|
|
op: *outpoint,
|
|
|
|
spendID: ntfn.spendID,
|
2017-07-30 05:19:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Submit spend cancellation to notification dispatcher.
|
|
|
|
select {
|
|
|
|
case n.notificationCancels <- cancel:
|
2017-07-30 06:28:48 +03:00
|
|
|
// Cancellation is being handled, drain the spend chan until it is
|
|
|
|
// closed before yielding to the caller.
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case _, ok := <-ntfn.spendChan:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-n.quit:
|
2017-08-02 03:14:01 +03:00
|
|
|
return
|
2017-07-30 06:28:48 +03:00
|
|
|
}
|
2017-07-30 05:19:28 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
case <-n.quit:
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-07-05 01:54:35 +03:00
|
|
|
// Ensure that neutrino is caught up to the height hint before we
|
|
|
|
// attempt to fetch the utxo fromt the chain. If we're behind, then we
|
|
|
|
// may miss a notification dispatch.
|
|
|
|
for {
|
|
|
|
n.heightMtx.RLock()
|
|
|
|
currentHeight := n.bestHeight
|
|
|
|
n.heightMtx.RUnlock()
|
|
|
|
|
|
|
|
if currentHeight < heightHint {
|
|
|
|
time.Sleep(time.Millisecond * 200)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// Before sending off the notification request, we'll attempt to see if
|
|
|
|
// this output is still spent or not at this point in the chain.
|
|
|
|
spendReport, err := n.p2pNode.GetUtxo(
|
|
|
|
neutrino.WatchOutPoints(*outpoint),
|
|
|
|
neutrino.StartBlock(&waddrmgr.BlockStamp{
|
|
|
|
Height: int32(heightHint),
|
|
|
|
}),
|
|
|
|
)
|
2018-02-03 04:59:11 +03:00
|
|
|
if err != nil && !strings.Contains(err.Error(), "not found") {
|
2017-05-24 04:13:45 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If a spend report was returned, and the transaction is present, then
|
|
|
|
// this means that the output is already spent.
|
|
|
|
if spendReport != nil && spendReport.SpendingTx != nil {
|
|
|
|
// As a result, we'll launch a goroutine to immediately
|
|
|
|
// dispatch the notification with a normal response.
|
|
|
|
go func() {
|
|
|
|
txSha := spendReport.SpendingTx.TxHash()
|
|
|
|
select {
|
|
|
|
case ntfn.spendChan <- &chainntnfs.SpendDetail{
|
|
|
|
SpentOutPoint: outpoint,
|
|
|
|
SpenderTxHash: &txSha,
|
|
|
|
SpendingTx: spendReport.SpendingTx,
|
|
|
|
SpenderInputIndex: spendReport.SpendingInputIndex,
|
|
|
|
SpendingHeight: int32(spendReport.SpendingTxHeight),
|
|
|
|
}:
|
|
|
|
case <-n.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
}()
|
|
|
|
|
|
|
|
return spendEvent, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the output is still unspent, then we'll update our rescan's
|
|
|
|
// filter, and send the request to the dispatcher goroutine.
|
|
|
|
rescanUpdate := []neutrino.UpdateOption{
|
|
|
|
neutrino.AddOutPoints(*outpoint),
|
|
|
|
neutrino.Rewind(currentHeight),
|
|
|
|
}
|
2017-07-05 01:54:35 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
if err := n.chainView.Update(rescanUpdate...); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case n.notificationRegistry <- ntfn:
|
|
|
|
case <-n.quit:
|
|
|
|
return nil, ErrChainNotifierShuttingDown
|
|
|
|
}
|
|
|
|
|
|
|
|
return spendEvent, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// confirmationNotification represents a client's intent to receive a
|
|
|
|
// notification once the target txid reaches numConfirmations confirmations.
|
|
|
|
type confirmationsNotification struct {
|
2017-11-14 02:49:58 +03:00
|
|
|
chainntnfs.ConfNtfn
|
2017-05-24 04:13:45 +03:00
|
|
|
heightHint uint32
|
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterConfirmationsNtfn registers a notification with NeutrinoNotifier
|
|
|
|
// which will be triggered once the txid reaches numConfs number of
|
|
|
|
// confirmations.
|
|
|
|
func (n *NeutrinoNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
|
|
|
|
numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, error) {
|
|
|
|
|
|
|
|
ntfn := &confirmationsNotification{
|
2017-11-14 02:49:58 +03:00
|
|
|
ConfNtfn: chainntnfs.ConfNtfn{
|
|
|
|
TxID: txid,
|
|
|
|
NumConfirmations: numConfs,
|
2018-03-19 21:48:44 +03:00
|
|
|
Event: chainntnfs.NewConfirmationEvent(numConfs),
|
2017-11-14 02:49:58 +03:00
|
|
|
},
|
|
|
|
heightHint: heightHint,
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-n.quit:
|
|
|
|
return nil, ErrChainNotifierShuttingDown
|
|
|
|
case n.notificationRegistry <- ntfn:
|
2017-11-14 02:49:58 +03:00
|
|
|
return ntfn.Event, nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockEpochRegistration represents a client's intent to receive a
|
|
|
|
// notification with each newly connected block.
|
|
|
|
type blockEpochRegistration struct {
|
2017-06-08 03:05:16 +03:00
|
|
|
epochID uint64
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
epochChan chan *chainntnfs.BlockEpoch
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
epochQueue *chainntnfs.ConcurrentQueue
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
cancelChan chan struct{}
|
|
|
|
|
2017-06-08 03:05:16 +03:00
|
|
|
wg sync.WaitGroup
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// epochCancel is a message sent to the NeutrinoNotifier when a client wishes
|
|
|
|
// to cancel an outstanding epoch notification that has yet to be dispatched.
|
|
|
|
type epochCancel struct {
|
|
|
|
epochID uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the caller
|
|
|
|
// to receive notifications, of each new block connected to the main chain.
|
|
|
|
func (n *NeutrinoNotifier) RegisterBlockEpochNtfn() (*chainntnfs.BlockEpochEvent, error) {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg := &blockEpochRegistration{
|
|
|
|
epochQueue: chainntnfs.NewConcurrentQueue(20),
|
2017-05-24 04:13:45 +03:00
|
|
|
epochChan: make(chan *chainntnfs.BlockEpoch, 20),
|
|
|
|
cancelChan: make(chan struct{}),
|
|
|
|
epochID: atomic.AddUint64(&n.epochClientCounter, 1),
|
|
|
|
}
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg.epochQueue.Start()
|
|
|
|
|
|
|
|
// Before we send the request to the main goroutine, we'll launch a new
|
|
|
|
// goroutine to proxy items added to our queue to the client itself.
|
|
|
|
// This ensures that all notifications are received *in order*.
|
|
|
|
reg.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer reg.wg.Done()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ntfn := <-reg.epochQueue.ChanOut():
|
|
|
|
blockNtfn := ntfn.(*chainntnfs.BlockEpoch)
|
|
|
|
select {
|
|
|
|
case reg.epochChan <- blockNtfn:
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-n.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-n.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-n.quit:
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// As we're exiting before the registration could be sent,
|
|
|
|
// we'll stop the queue now ourselves.
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
return nil, errors.New("chainntnfs: system interrupt while " +
|
|
|
|
"attempting to register for block epoch notification.")
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case n.notificationRegistry <- reg:
|
2017-05-24 04:13:45 +03:00
|
|
|
return &chainntnfs.BlockEpochEvent{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
Epochs: reg.epochChan,
|
2017-05-24 04:13:45 +03:00
|
|
|
Cancel: func() {
|
|
|
|
cancel := &epochCancel{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
epochID: reg.epochID,
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2017-07-30 05:19:28 +03:00
|
|
|
// Submit epoch cancellation to notification dispatcher.
|
2017-05-24 04:13:45 +03:00
|
|
|
select {
|
|
|
|
case n.notificationCancels <- cancel:
|
2017-07-30 06:28:48 +03:00
|
|
|
// Cancellation is being handled, drain the epoch channel until it is
|
|
|
|
// closed before yielding to caller.
|
|
|
|
for {
|
|
|
|
select {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case _, ok := <-reg.epochChan:
|
2017-07-30 06:28:48 +03:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-n.quit:
|
2017-08-02 03:14:01 +03:00
|
|
|
return
|
2017-07-30 06:28:48 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
case <-n.quit:
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
}
|