2017-05-24 04:13:45 +03:00
|
|
|
package neutrinonotify
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2017-11-14 02:49:58 +03:00
|
|
|
"fmt"
|
2018-02-03 04:59:11 +03:00
|
|
|
"strings"
|
2017-05-24 04:13:45 +03:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2017-07-05 01:54:35 +03:00
|
|
|
"time"
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
"github.com/btcsuite/btcd/btcjson"
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/rpcclient"
|
2018-08-01 07:28:27 +03:00
|
|
|
"github.com/btcsuite/btcd/txscript"
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
|
|
|
"github.com/btcsuite/btcutil/gcs/builder"
|
|
|
|
"github.com/btcsuite/btcwallet/waddrmgr"
|
2018-07-12 03:28:46 +03:00
|
|
|
"github.com/lightninglabs/neutrino"
|
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2018-10-12 18:08:14 +03:00
|
|
|
"github.com/lightningnetwork/lnd/queue"
|
2017-05-24 04:13:45 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// notifierType uniquely identifies this concrete implementation of the
|
|
|
|
// ChainNotifier interface.
|
|
|
|
notifierType = "neutrino"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// ErrChainNotifierShuttingDown is used when we are trying to
|
|
|
|
// measure a spend notification when notifier is already stopped.
|
|
|
|
ErrChainNotifierShuttingDown = errors.New("chainntnfs: system interrupt " +
|
|
|
|
"while attempting to register for spend notification.")
|
|
|
|
)
|
|
|
|
|
|
|
|
// NeutrinoNotifier is a version of ChainNotifier that's backed by the neutrino
|
|
|
|
// Bitcoin light client. Unlike other implementations, this implementation
|
|
|
|
// speaks directly to the p2p network. As a result, this implementation of the
|
|
|
|
// ChainNotifier interface is much more light weight that other implementation
|
|
|
|
// which rely of receiving notification over an RPC interface backed by a
|
|
|
|
// running full node.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): heavily consolidate with NeutrinoNotifier code
|
|
|
|
// * maybe combine into single package?
|
|
|
|
type NeutrinoNotifier struct {
|
2018-07-27 07:30:15 +03:00
|
|
|
confClientCounter uint64 // To be used atomically.
|
2017-05-24 04:13:45 +03:00
|
|
|
spendClientCounter uint64 // To be used atomically.
|
|
|
|
epochClientCounter uint64 // To be used atomically.
|
|
|
|
|
2018-07-27 07:30:15 +03:00
|
|
|
started int32 // To be used atomically.
|
|
|
|
stopped int32 // To be used atomically.
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
heightMtx sync.RWMutex
|
|
|
|
bestHeight uint32
|
|
|
|
|
|
|
|
p2pNode *neutrino.ChainService
|
2018-07-12 03:28:46 +03:00
|
|
|
chainView *neutrino.Rescan
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
chainConn *NeutrinoChainConn
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
notificationCancels chan interface{}
|
|
|
|
notificationRegistry chan interface{}
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
txNotifier *chainntnfs.TxNotifier
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
blockEpochClients map[uint64]*blockEpochRegistration
|
|
|
|
|
|
|
|
rescanErr <-chan error
|
|
|
|
|
2018-10-12 18:08:14 +03:00
|
|
|
chainUpdates *queue.ConcurrentQueue
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-15 03:53:34 +03:00
|
|
|
// spendHintCache is a cache used to query and update the latest height
|
|
|
|
// hints for an outpoint. Each height hint represents the earliest
|
|
|
|
// height at which the outpoint could have been spent within the chain.
|
|
|
|
spendHintCache chainntnfs.SpendHintCache
|
|
|
|
|
|
|
|
// confirmHintCache is a cache used to query the latest height hints for
|
|
|
|
// a transaction. Each height hint represents the earliest height at
|
|
|
|
// which the transaction could have confirmed within the chain.
|
|
|
|
confirmHintCache chainntnfs.ConfirmHintCache
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
quit chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure NeutrinoNotifier implements the ChainNotifier interface at compile time.
|
|
|
|
var _ chainntnfs.ChainNotifier = (*NeutrinoNotifier)(nil)
|
|
|
|
|
|
|
|
// New creates a new instance of the NeutrinoNotifier concrete implementation
|
|
|
|
// of the ChainNotifier interface.
|
|
|
|
//
|
|
|
|
// NOTE: The passed neutrino node should already be running and active before
|
|
|
|
// being passed into this function.
|
2018-08-15 03:53:34 +03:00
|
|
|
func New(node *neutrino.ChainService, spendHintCache chainntnfs.SpendHintCache,
|
|
|
|
confirmHintCache chainntnfs.ConfirmHintCache) (*NeutrinoNotifier, error) {
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
notifier := &NeutrinoNotifier{
|
|
|
|
notificationCancels: make(chan interface{}),
|
|
|
|
notificationRegistry: make(chan interface{}),
|
|
|
|
|
|
|
|
blockEpochClients: make(map[uint64]*blockEpochRegistration),
|
|
|
|
|
|
|
|
p2pNode: node,
|
|
|
|
|
|
|
|
rescanErr: make(chan error),
|
|
|
|
|
2018-10-12 18:08:14 +03:00
|
|
|
chainUpdates: queue.NewConcurrentQueue(10),
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-15 03:53:34 +03:00
|
|
|
spendHintCache: spendHintCache,
|
|
|
|
confirmHintCache: confirmHintCache,
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
quit: make(chan struct{}),
|
|
|
|
}
|
|
|
|
|
|
|
|
return notifier, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start contacts the running neutrino light client and kicks off an initial
|
|
|
|
// empty rescan.
|
|
|
|
func (n *NeutrinoNotifier) Start() error {
|
|
|
|
// Already started?
|
|
|
|
if atomic.AddInt32(&n.started, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// First, we'll obtain the latest block height of the p2p node. We'll
|
|
|
|
// start the auto-rescan from this point. Once a caller actually wishes
|
|
|
|
// to register a chain view, the rescan state will be rewound
|
|
|
|
// accordingly.
|
2018-09-10 13:45:59 +03:00
|
|
|
startingPoint, err := n.p2pNode.BestBlock()
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-09-10 13:45:59 +03:00
|
|
|
|
|
|
|
n.bestHeight = uint32(startingPoint.Height)
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
// Next, we'll create our set of rescan options. Currently it's
|
2018-04-18 05:02:04 +03:00
|
|
|
// required that a user MUST set an addr/outpoint/txid when creating a
|
2017-05-24 04:13:45 +03:00
|
|
|
// rescan. To get around this, we'll add a "zero" outpoint, that won't
|
|
|
|
// actually be matched.
|
2018-07-18 05:03:26 +03:00
|
|
|
var zeroInput neutrino.InputWithScript
|
2017-05-24 04:13:45 +03:00
|
|
|
rescanOptions := []neutrino.RescanOption{
|
|
|
|
neutrino.StartBlock(startingPoint),
|
|
|
|
neutrino.QuitChan(n.quit),
|
|
|
|
neutrino.NotificationHandlers(
|
2017-08-25 04:54:17 +03:00
|
|
|
rpcclient.NotificationHandlers{
|
2017-05-24 04:13:45 +03:00
|
|
|
OnFilteredBlockConnected: n.onFilteredBlockConnected,
|
|
|
|
OnFilteredBlockDisconnected: n.onFilteredBlockDisconnected,
|
|
|
|
},
|
|
|
|
),
|
2018-07-18 05:03:26 +03:00
|
|
|
neutrino.WatchInputs(zeroInput),
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
n.txNotifier = chainntnfs.NewTxNotifier(
|
2018-12-05 03:58:27 +03:00
|
|
|
n.bestHeight, chainntnfs.ReorgSafetyLimit, n.confirmHintCache,
|
2018-10-05 12:07:55 +03:00
|
|
|
n.spendHintCache,
|
2018-07-18 05:03:26 +03:00
|
|
|
)
|
2017-11-14 02:49:58 +03:00
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
n.chainConn = &NeutrinoChainConn{n.p2pNode}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// Finally, we'll create our rescan struct, start it, and launch all
|
|
|
|
// the goroutines we need to operate this ChainNotifier instance.
|
|
|
|
n.chainView = n.p2pNode.NewRescan(rescanOptions...)
|
|
|
|
n.rescanErr = n.chainView.Start()
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
n.chainUpdates.Start()
|
2017-09-29 22:10:38 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
n.wg.Add(1)
|
|
|
|
go n.notificationDispatcher()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-18 05:40:05 +03:00
|
|
|
// Stop shuts down the NeutrinoNotifier.
|
2017-05-24 04:13:45 +03:00
|
|
|
func (n *NeutrinoNotifier) Stop() error {
|
|
|
|
// Already shutting down?
|
|
|
|
if atomic.AddInt32(&n.stopped, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
close(n.quit)
|
|
|
|
n.wg.Wait()
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
n.chainUpdates.Stop()
|
2017-09-29 22:10:38 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// Notify all pending clients of our shutdown by closing the related
|
|
|
|
// notification channels.
|
|
|
|
for _, epochClient := range n.blockEpochClients {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
close(epochClient.cancelChan)
|
|
|
|
epochClient.wg.Wait()
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
close(epochClient.epochChan)
|
|
|
|
}
|
2018-10-05 12:07:55 +03:00
|
|
|
n.txNotifier.TearDown()
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// filteredBlock represents a new block which has been connected to the main
|
|
|
|
// chain. The slice of transactions will only be populated if the block
|
|
|
|
// includes a transaction that confirmed one of our watched txids, or spends
|
|
|
|
// one of the outputs currently being watched.
|
|
|
|
type filteredBlock struct {
|
|
|
|
hash chainhash.Hash
|
|
|
|
height uint32
|
|
|
|
txns []*btcutil.Tx
|
2017-11-10 22:01:36 +03:00
|
|
|
|
|
|
|
// connected is true if this update is a new block and false if it is a
|
|
|
|
// disconnected block.
|
|
|
|
connect bool
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-10-21 02:30:57 +03:00
|
|
|
// rescanFilterUpdate represents a request that will be sent to the
|
|
|
|
// notificaionRegistry in order to prevent race conditions between the filter
|
|
|
|
// update and new block notifications.
|
|
|
|
type rescanFilterUpdate struct {
|
|
|
|
updateOptions []neutrino.UpdateOption
|
|
|
|
errChan chan error
|
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// onFilteredBlockConnected is a callback which is executed each a new block is
|
|
|
|
// connected to the end of the main chain.
|
|
|
|
func (n *NeutrinoNotifier) onFilteredBlockConnected(height int32,
|
|
|
|
header *wire.BlockHeader, txns []*btcutil.Tx) {
|
|
|
|
|
2017-06-08 03:05:16 +03:00
|
|
|
// Append this new chain update to the end of the queue of new chain
|
|
|
|
// updates.
|
2017-11-10 22:01:36 +03:00
|
|
|
n.chainUpdates.ChanIn() <- &filteredBlock{
|
|
|
|
hash: header.BlockHash(),
|
|
|
|
height: uint32(height),
|
|
|
|
txns: txns,
|
|
|
|
connect: true,
|
2017-09-29 22:10:38 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// onFilteredBlockDisconnected is a callback which is executed each time a new
|
|
|
|
// block has been disconnected from the end of the mainchain due to a re-org.
|
|
|
|
func (n *NeutrinoNotifier) onFilteredBlockDisconnected(height int32,
|
|
|
|
header *wire.BlockHeader) {
|
|
|
|
|
2017-06-08 03:05:16 +03:00
|
|
|
// Append this new chain update to the end of the queue of new chain
|
|
|
|
// disconnects.
|
2017-11-10 22:01:36 +03:00
|
|
|
n.chainUpdates.ChanIn() <- &filteredBlock{
|
|
|
|
hash: header.BlockHash(),
|
|
|
|
height: uint32(height),
|
|
|
|
connect: false,
|
2017-09-29 22:10:38 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// notificationDispatcher is the primary goroutine which handles client
|
|
|
|
// notification registrations, as well as notification dispatches.
|
|
|
|
func (n *NeutrinoNotifier) notificationDispatcher() {
|
|
|
|
defer n.wg.Done()
|
2018-08-09 10:05:30 +03:00
|
|
|
out:
|
2017-05-24 04:13:45 +03:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case cancelMsg := <-n.notificationCancels:
|
|
|
|
switch msg := cancelMsg.(type) {
|
|
|
|
case *epochCancel:
|
|
|
|
chainntnfs.Log.Infof("Cancelling epoch "+
|
|
|
|
"notification, epoch_id=%v", msg.epochID)
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// First, we'll lookup the original
|
|
|
|
// registration in order to stop the active
|
|
|
|
// queue goroutine.
|
|
|
|
reg := n.blockEpochClients[msg.epochID]
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
|
|
|
// Next, close the cancel channel for this
|
2017-06-08 03:05:16 +03:00
|
|
|
// specific client, and wait for the client to
|
|
|
|
// exit.
|
2017-05-24 04:13:45 +03:00
|
|
|
close(n.blockEpochClients[msg.epochID].cancelChan)
|
2017-06-08 03:05:16 +03:00
|
|
|
n.blockEpochClients[msg.epochID].wg.Wait()
|
|
|
|
|
|
|
|
// Once the client has exited, we can then
|
|
|
|
// safely close the channel used to send epoch
|
|
|
|
// notifications, in order to notify any
|
|
|
|
// listeners that the intent has been
|
|
|
|
// cancelled.
|
2017-05-24 04:13:45 +03:00
|
|
|
close(n.blockEpochClients[msg.epochID].epochChan)
|
|
|
|
delete(n.blockEpochClients, msg.epochID)
|
|
|
|
}
|
|
|
|
|
|
|
|
case registerMsg := <-n.notificationRegistry:
|
|
|
|
switch msg := registerMsg.(type) {
|
2018-08-27 07:37:10 +03:00
|
|
|
case *chainntnfs.HistoricalConfDispatch:
|
2018-10-21 02:30:57 +03:00
|
|
|
// We'll start a historical rescan chain of the
|
|
|
|
// chain asynchronously to prevent blocking
|
2018-07-27 07:32:55 +03:00
|
|
|
// potentially long rescans.
|
|
|
|
n.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer n.wg.Done()
|
|
|
|
|
|
|
|
confDetails, err := n.historicalConfDetails(
|
2018-08-27 07:37:10 +03:00
|
|
|
msg.TxID, msg.PkScript,
|
|
|
|
msg.StartHeight, msg.EndHeight,
|
2018-08-01 07:28:27 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
|
|
|
|
2018-08-25 05:35:17 +03:00
|
|
|
// If the historical dispatch finished
|
|
|
|
// without error, we will invoke
|
|
|
|
// UpdateConfDetails even if none were
|
|
|
|
// found. This allows the notifier to
|
|
|
|
// begin safely updating the height hint
|
|
|
|
// cache at tip, since any pending
|
|
|
|
// rescans have now completed.
|
2018-10-05 12:07:55 +03:00
|
|
|
err = n.txNotifier.UpdateConfDetails(
|
2018-08-27 07:37:10 +03:00
|
|
|
*msg.TxID, confDetails,
|
2018-08-25 05:35:17 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
2018-07-27 07:32:55 +03:00
|
|
|
}()
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
case *blockEpochRegistration:
|
|
|
|
chainntnfs.Log.Infof("New block epoch subscription")
|
|
|
|
n.blockEpochClients[msg.epochID] = msg
|
2018-08-09 10:05:29 +03:00
|
|
|
if msg.bestBlock != nil {
|
|
|
|
n.heightMtx.Lock()
|
|
|
|
bestHeight := int32(n.bestHeight)
|
|
|
|
n.heightMtx.Unlock()
|
|
|
|
missedBlocks, err :=
|
|
|
|
chainntnfs.GetClientMissedBlocks(
|
|
|
|
n.chainConn, msg.bestBlock,
|
|
|
|
bestHeight, false,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
msg.errorChan <- err
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, block := range missedBlocks {
|
|
|
|
n.notifyBlockEpochClient(msg,
|
|
|
|
block.Height, block.Hash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
msg.errorChan <- nil
|
2018-10-21 02:30:57 +03:00
|
|
|
|
|
|
|
case *rescanFilterUpdate:
|
|
|
|
err := n.chainView.Update(msg.updateOptions...)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Errorf("Unable to "+
|
|
|
|
"update rescan filter: %v", err)
|
|
|
|
}
|
|
|
|
msg.errChan <- err
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
case item := <-n.chainUpdates.ChanOut():
|
|
|
|
update := item.(*filteredBlock)
|
|
|
|
if update.connect {
|
|
|
|
n.heightMtx.Lock()
|
2018-08-09 10:05:30 +03:00
|
|
|
// Since neutrino has no way of knowing what
|
|
|
|
// height to rewind to in the case of a reorged
|
|
|
|
// best known height, there is no point in
|
|
|
|
// checking that the previous hash matches the
|
|
|
|
// the hash from our best known height the way
|
|
|
|
// the other notifiers do when they receive
|
|
|
|
// a new connected block. Therefore, we just
|
|
|
|
// compare the heights.
|
2017-11-10 22:01:36 +03:00
|
|
|
if update.height != n.bestHeight+1 {
|
2018-08-09 10:05:30 +03:00
|
|
|
// Handle the case where the notifier
|
|
|
|
// missed some blocks from its chain
|
|
|
|
// backend
|
|
|
|
chainntnfs.Log.Infof("Missed blocks, " +
|
|
|
|
"attempting to catch up")
|
|
|
|
bestBlock := chainntnfs.BlockEpoch{
|
|
|
|
Height: int32(n.bestHeight),
|
|
|
|
Hash: nil,
|
|
|
|
}
|
|
|
|
_, missedBlocks, err :=
|
|
|
|
chainntnfs.HandleMissedBlocks(
|
|
|
|
n.chainConn,
|
2018-10-05 12:07:55 +03:00
|
|
|
n.txNotifier,
|
2018-08-09 10:05:30 +03:00
|
|
|
bestBlock,
|
|
|
|
int32(update.height),
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
n.heightMtx.Unlock()
|
|
|
|
continue
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
for _, block := range missedBlocks {
|
|
|
|
filteredBlock, err :=
|
|
|
|
n.getFilteredBlock(block)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
n.heightMtx.Unlock()
|
|
|
|
continue out
|
|
|
|
}
|
|
|
|
err = n.handleBlockConnected(filteredBlock)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
n.heightMtx.Unlock()
|
|
|
|
continue out
|
|
|
|
}
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
err := n.handleBlockConnected(update)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
2018-08-09 10:05:30 +03:00
|
|
|
n.heightMtx.Unlock()
|
2017-12-10 21:34:49 +03:00
|
|
|
continue
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2017-12-10 21:34:49 +03:00
|
|
|
n.heightMtx.Lock()
|
2018-08-09 10:05:29 +03:00
|
|
|
if update.height != uint32(n.bestHeight) {
|
2018-10-19 02:16:30 +03:00
|
|
|
chainntnfs.Log.Infof("Missed disconnected " +
|
2018-08-09 10:05:29 +03:00
|
|
|
"blocks, attempting to catch up")
|
2017-12-10 21:34:49 +03:00
|
|
|
}
|
2017-11-10 22:01:36 +03:00
|
|
|
|
2018-09-10 13:45:59 +03:00
|
|
|
hash, err := n.p2pNode.GetBlockHash(int64(n.bestHeight))
|
2017-12-10 21:34:49 +03:00
|
|
|
if err != nil {
|
2018-10-19 02:16:30 +03:00
|
|
|
chainntnfs.Log.Errorf("Unable to fetch block hash "+
|
2018-08-09 10:05:29 +03:00
|
|
|
"for height %d: %v", n.bestHeight, err)
|
2018-08-09 10:05:30 +03:00
|
|
|
n.heightMtx.Unlock()
|
|
|
|
continue
|
2017-11-10 22:01:36 +03:00
|
|
|
}
|
2018-08-09 10:05:30 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
notifierBestBlock := chainntnfs.BlockEpoch{
|
|
|
|
Height: int32(n.bestHeight),
|
2018-09-10 13:45:59 +03:00
|
|
|
Hash: hash,
|
2018-08-09 10:05:29 +03:00
|
|
|
}
|
|
|
|
newBestBlock, err := chainntnfs.RewindChain(
|
2018-10-05 12:07:55 +03:00
|
|
|
n.chainConn, n.txNotifier, notifierBestBlock,
|
2018-08-09 10:05:29 +03:00
|
|
|
int32(update.height-1),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Errorf("Unable to rewind chain "+
|
|
|
|
"from height %d to height %d: %v",
|
|
|
|
n.bestHeight, update.height-1, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set the bestHeight here in case a chain rewind
|
|
|
|
// partially completed.
|
|
|
|
n.bestHeight = uint32(newBestBlock.Height)
|
|
|
|
n.heightMtx.Unlock()
|
2017-12-10 21:34:49 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
case err := <-n.rescanErr:
|
|
|
|
chainntnfs.Log.Errorf("Error during rescan: %v", err)
|
|
|
|
|
|
|
|
case <-n.quit:
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-31 08:07:17 +03:00
|
|
|
// historicalConfDetails looks up whether a transaction is already included in
|
|
|
|
// a block in the active chain and, if so, returns details about the
|
|
|
|
// confirmation.
|
2017-11-14 02:49:58 +03:00
|
|
|
func (n *NeutrinoNotifier) historicalConfDetails(targetHash *chainhash.Hash,
|
2018-05-31 08:07:17 +03:00
|
|
|
pkScript []byte,
|
2018-08-27 07:37:10 +03:00
|
|
|
startHeight, endHeight uint32) (*chainntnfs.TxConfirmation, error) {
|
2017-05-25 03:26:45 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// Starting from the height hint, we'll walk forwards in the chain to
|
|
|
|
// see if this transaction has already been confirmed.
|
2018-11-23 00:58:58 +03:00
|
|
|
for scanHeight := endHeight; scanHeight >= startHeight && scanHeight > 0; scanHeight-- {
|
2018-07-27 07:32:55 +03:00
|
|
|
// Ensure we haven't been requested to shut down before
|
|
|
|
// processing the next height.
|
|
|
|
select {
|
|
|
|
case <-n.quit:
|
|
|
|
return nil, ErrChainNotifierShuttingDown
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// First, we'll fetch the block header for this height so we
|
|
|
|
// can compute the current block hash.
|
2018-09-10 13:45:59 +03:00
|
|
|
blockHash, err := n.p2pNode.GetBlockHash(int64(scanHeight))
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to get header for height=%v: %v",
|
|
|
|
scanHeight, err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2017-10-11 22:27:38 +03:00
|
|
|
// With the hash computed, we can now fetch the basic filter
|
2017-05-24 04:13:45 +03:00
|
|
|
// for this height.
|
2018-05-31 08:07:17 +03:00
|
|
|
regFilter, err := n.p2pNode.GetCFilter(
|
2018-09-10 13:45:59 +03:00
|
|
|
*blockHash, wire.GCSFilterRegular,
|
2018-05-31 08:07:17 +03:00
|
|
|
)
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to retrieve regular filter for "+
|
|
|
|
"height=%v: %v", scanHeight, err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-05-31 08:07:17 +03:00
|
|
|
// If the block has no transactions other than the Coinbase
|
2017-05-24 04:13:45 +03:00
|
|
|
// transaction, then the filter may be nil, so we'll continue
|
|
|
|
// forward int that case.
|
2017-10-11 22:27:38 +03:00
|
|
|
if regFilter == nil {
|
2017-05-24 04:13:45 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// In the case that the filter exists, we'll attempt to see if
|
2018-05-31 08:07:17 +03:00
|
|
|
// any element in it matches our target public key script.
|
2018-09-10 13:45:59 +03:00
|
|
|
key := builder.DeriveKey(blockHash)
|
2018-05-31 08:07:17 +03:00
|
|
|
match, err := regFilter.Match(key, pkScript)
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to query filter: %v", err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// If there's no match, then we can continue forward to the
|
|
|
|
// next block.
|
|
|
|
if !match {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// In the case that we do have a match, we'll fetch the block
|
|
|
|
// from the network so we can find the positional data required
|
|
|
|
// to send the proper response.
|
2018-09-10 13:45:59 +03:00
|
|
|
block, err := n.p2pNode.GetBlock(*blockHash)
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to get block from network: %v", err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
for j, tx := range block.Transactions() {
|
|
|
|
txHash := tx.Hash()
|
|
|
|
if txHash.IsEqual(targetHash) {
|
2017-11-14 02:49:58 +03:00
|
|
|
confDetails := chainntnfs.TxConfirmation{
|
2018-09-10 13:45:59 +03:00
|
|
|
BlockHash: blockHash,
|
2017-05-24 04:13:45 +03:00
|
|
|
BlockHeight: scanHeight,
|
|
|
|
TxIndex: uint32(j),
|
|
|
|
}
|
2017-11-14 02:49:58 +03:00
|
|
|
return &confDetails, nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// handleBlockConnected applies a chain update for a new block. Any watched
|
2017-11-10 22:01:36 +03:00
|
|
|
// transactions included this block will processed to either send notifications
|
|
|
|
// now or after numConfirmations confs.
|
|
|
|
func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) error {
|
2018-10-12 03:30:40 +03:00
|
|
|
// We'll extend the txNotifier's height with the information of this new
|
|
|
|
// block, which will handle all of the notification logic for us.
|
2018-10-05 12:07:55 +03:00
|
|
|
err := n.txNotifier.ConnectTip(
|
2018-08-15 03:55:29 +03:00
|
|
|
&newBlock.hash, newBlock.height, newBlock.txns,
|
|
|
|
)
|
2018-08-09 10:05:29 +03:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to connect tip: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-08-15 03:55:29 +03:00
|
|
|
chainntnfs.Log.Infof("New block: height=%v, sha=%v", newBlock.height,
|
|
|
|
newBlock.hash)
|
2018-08-09 10:05:29 +03:00
|
|
|
|
2018-10-12 03:30:40 +03:00
|
|
|
// Now that we've guaranteed the new block extends the txNotifier's
|
|
|
|
// current tip, we'll proceed to dispatch notifications to all of our
|
|
|
|
// registered clients whom have had notifications fulfilled. Before
|
|
|
|
// doing so, we'll make sure update our in memory state in order to
|
|
|
|
// satisfy any client requests based upon the new block.
|
2018-08-25 03:57:05 +03:00
|
|
|
n.bestHeight = newBlock.height
|
|
|
|
|
|
|
|
n.notifyBlockEpochs(int32(newBlock.height), &newBlock.hash)
|
2018-10-12 03:30:40 +03:00
|
|
|
return n.txNotifier.NotifyHeight(newBlock.height)
|
2017-11-10 22:01:36 +03:00
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// getFilteredBlock is a utility to retrieve the full filtered block from a block epoch.
|
|
|
|
func (n *NeutrinoNotifier) getFilteredBlock(epoch chainntnfs.BlockEpoch) (*filteredBlock, error) {
|
2018-08-24 06:19:37 +03:00
|
|
|
rawBlock, err := n.p2pNode.GetBlock(*epoch.Hash)
|
2018-08-09 10:05:29 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to get block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
txns := rawBlock.Transactions()
|
|
|
|
|
|
|
|
block := &filteredBlock{
|
|
|
|
hash: *epoch.Hash,
|
|
|
|
height: uint32(epoch.Height),
|
|
|
|
txns: txns,
|
|
|
|
connect: true,
|
|
|
|
}
|
|
|
|
return block, nil
|
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// notifyBlockEpochs notifies all registered block epoch clients of the newly
|
|
|
|
// connected block to the main chain.
|
|
|
|
func (n *NeutrinoNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash) {
|
2018-08-09 10:05:29 +03:00
|
|
|
for _, client := range n.blockEpochClients {
|
|
|
|
n.notifyBlockEpochClient(client, newHeight, newSha)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
2018-08-09 10:05:29 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// notifyBlockEpochClient sends a registered block epoch client a notification
|
|
|
|
// about a specific block.
|
|
|
|
func (n *NeutrinoNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration,
|
|
|
|
height int32, sha *chainhash.Hash) {
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
epoch := &chainntnfs.BlockEpoch{
|
|
|
|
Height: height,
|
|
|
|
Hash: sha,
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
select {
|
|
|
|
case epochClient.epochQueue.ChanIn() <- epoch:
|
|
|
|
case <-epochClient.cancelChan:
|
|
|
|
case <-n.quit:
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterSpendNtfn registers an intent to be notified once the target
|
|
|
|
// outpoint has been spent by a transaction on-chain. Once a spend of the
|
|
|
|
// target outpoint has been detected, the details of the spending event will be
|
|
|
|
// sent across the 'Spend' channel.
|
|
|
|
func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
|
2018-07-18 05:03:26 +03:00
|
|
|
pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
// First, we'll construct a spend notification request and hand it off
|
|
|
|
// to the txNotifier.
|
|
|
|
spendID := atomic.AddUint64(&n.spendClientCounter, 1)
|
|
|
|
cancel := func() {
|
|
|
|
n.txNotifier.CancelSpend(*outpoint, spendID)
|
2018-08-15 03:55:29 +03:00
|
|
|
}
|
2018-10-05 12:07:55 +03:00
|
|
|
ntfn := &chainntnfs.SpendNtfn{
|
|
|
|
SpendID: spendID,
|
|
|
|
OutPoint: *outpoint,
|
|
|
|
Event: chainntnfs.NewSpendEvent(cancel),
|
|
|
|
HeightHint: heightHint,
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
2018-08-15 03:55:29 +03:00
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
historicalDispatch, err := n.txNotifier.RegisterSpend(ntfn)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-07-30 05:19:28 +03:00
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
// If the txNotifier didn't return any details to perform a historical
|
|
|
|
// scan of the chain, then we can return early as there's nothing left
|
|
|
|
// for us to do.
|
|
|
|
if historicalDispatch == nil {
|
|
|
|
return ntfn.Event, nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-10-21 02:30:57 +03:00
|
|
|
// To determine whether this outpoint has been spent on-chain, we'll
|
|
|
|
// update our filter to watch for the transaction at tip and we'll also
|
|
|
|
// dispatch a historical rescan to determine if it has been spent in the
|
|
|
|
// past.
|
|
|
|
//
|
|
|
|
// We'll update our filter first to ensure we can immediately detect the
|
|
|
|
// spend at tip. To do so, we'll map the script into an address
|
|
|
|
// type so we can instruct neutrino to match if the transaction
|
|
|
|
// containing the script is found in a block.
|
|
|
|
inputToWatch := neutrino.InputWithScript{
|
|
|
|
OutPoint: *outpoint,
|
|
|
|
PkScript: pkScript,
|
|
|
|
}
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
select {
|
|
|
|
case n.notificationRegistry <- &rescanFilterUpdate{
|
|
|
|
updateOptions: []neutrino.UpdateOption{
|
|
|
|
neutrino.AddInputs(inputToWatch),
|
|
|
|
neutrino.Rewind(historicalDispatch.EndHeight),
|
|
|
|
neutrino.DisableDisconnectedNtfns(true),
|
|
|
|
},
|
|
|
|
errChan: errChan,
|
|
|
|
}:
|
|
|
|
case <-n.quit:
|
|
|
|
return nil, ErrChainNotifierShuttingDown
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err = <-errChan:
|
|
|
|
case <-n.quit:
|
|
|
|
return nil, ErrChainNotifierShuttingDown
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to update filter: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the filter updated, we'll dispatch our historical rescan to
|
|
|
|
// ensure we detect the spend if it happened in the past. We'll ensure
|
|
|
|
// that neutrino is caught up to the starting height before we attempt
|
|
|
|
// to fetch the UTXO from the chain. If we're behind, then we may miss a
|
|
|
|
// notification dispatch.
|
2017-07-05 01:54:35 +03:00
|
|
|
for {
|
|
|
|
n.heightMtx.RLock()
|
2018-10-05 12:07:55 +03:00
|
|
|
currentHeight := n.bestHeight
|
2017-07-05 01:54:35 +03:00
|
|
|
n.heightMtx.RUnlock()
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
if currentHeight >= historicalDispatch.StartHeight {
|
|
|
|
break
|
2017-07-05 01:54:35 +03:00
|
|
|
}
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
time.Sleep(time.Millisecond * 200)
|
2017-07-05 01:54:35 +03:00
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
spendReport, err := n.p2pNode.GetUtxo(
|
2018-07-18 05:03:26 +03:00
|
|
|
neutrino.WatchInputs(inputToWatch),
|
2017-05-24 04:13:45 +03:00
|
|
|
neutrino.StartBlock(&waddrmgr.BlockStamp{
|
2018-10-05 12:07:55 +03:00
|
|
|
Height: int32(historicalDispatch.StartHeight),
|
|
|
|
}),
|
|
|
|
neutrino.EndBlock(&waddrmgr.BlockStamp{
|
|
|
|
Height: int32(historicalDispatch.EndHeight),
|
2017-05-24 04:13:45 +03:00
|
|
|
}),
|
|
|
|
)
|
2018-02-03 04:59:11 +03:00
|
|
|
if err != nil && !strings.Contains(err.Error(), "not found") {
|
2017-05-24 04:13:45 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If a spend report was returned, and the transaction is present, then
|
|
|
|
// this means that the output is already spent.
|
2018-10-21 02:30:57 +03:00
|
|
|
var spendDetails *chainntnfs.SpendDetail
|
2017-05-24 04:13:45 +03:00
|
|
|
if spendReport != nil && spendReport.SpendingTx != nil {
|
2018-10-05 12:07:55 +03:00
|
|
|
spendingTxHash := spendReport.SpendingTx.TxHash()
|
2018-10-21 02:30:57 +03:00
|
|
|
spendDetails = &chainntnfs.SpendDetail{
|
2018-10-05 12:07:55 +03:00
|
|
|
SpentOutPoint: outpoint,
|
|
|
|
SpenderTxHash: &spendingTxHash,
|
|
|
|
SpendingTx: spendReport.SpendingTx,
|
|
|
|
SpenderInputIndex: spendReport.SpendingInputIndex,
|
|
|
|
SpendingHeight: int32(spendReport.SpendingTxHeight),
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-10-21 02:30:57 +03:00
|
|
|
// Finally, no matter whether the rescan found a spend in the past or
|
|
|
|
// not, we'll mark our historical rescan as complete to ensure the
|
|
|
|
// outpoint's spend hint gets updated upon connected/disconnected
|
|
|
|
// blocks.
|
|
|
|
err = n.txNotifier.UpdateSpendDetails(*outpoint, spendDetails)
|
2018-08-15 03:55:29 +03:00
|
|
|
if err != nil {
|
2018-10-21 02:30:57 +03:00
|
|
|
return nil, err
|
2018-08-15 03:55:29 +03:00
|
|
|
}
|
|
|
|
|
2018-10-21 02:30:57 +03:00
|
|
|
return ntfn.Event, nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterConfirmationsNtfn registers a notification with NeutrinoNotifier
|
|
|
|
// which will be triggered once the txid reaches numConfs number of
|
|
|
|
// confirmations.
|
|
|
|
func (n *NeutrinoNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
|
2018-05-31 08:07:17 +03:00
|
|
|
pkScript []byte,
|
2017-05-24 04:13:45 +03:00
|
|
|
numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, error) {
|
|
|
|
|
2018-08-15 03:54:21 +03:00
|
|
|
// Construct a notification request for the transaction and send it to
|
|
|
|
// the main event loop.
|
2018-08-27 07:37:10 +03:00
|
|
|
ntfn := &chainntnfs.ConfNtfn{
|
|
|
|
ConfID: atomic.AddUint64(&n.confClientCounter, 1),
|
|
|
|
TxID: txid,
|
|
|
|
PkScript: pkScript,
|
|
|
|
NumConfirmations: numConfs,
|
|
|
|
Event: chainntnfs.NewConfirmationEvent(numConfs),
|
|
|
|
HeightHint: heightHint,
|
|
|
|
}
|
|
|
|
|
|
|
|
chainntnfs.Log.Infof("New confirmation subscription: "+
|
|
|
|
"txid=%v, numconfs=%v", txid, numConfs)
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
// Register the conf notification with the TxNotifier. A non-nil value
|
2018-08-27 07:37:10 +03:00
|
|
|
// for `dispatch` will be returned if we are required to perform a
|
|
|
|
// manual scan for the confirmation. Otherwise the notifier will begin
|
|
|
|
// watching at tip for the transaction to confirm.
|
2018-10-05 12:07:55 +03:00
|
|
|
dispatch, err := n.txNotifier.RegisterConf(ntfn)
|
2018-08-27 07:37:10 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-08-27 07:37:10 +03:00
|
|
|
if dispatch == nil {
|
|
|
|
return ntfn.Event, nil
|
2018-07-27 07:32:55 +03:00
|
|
|
}
|
|
|
|
|
2018-10-21 02:30:57 +03:00
|
|
|
// To determine whether this transaction has confirmed on-chain, we'll
|
|
|
|
// update our filter to watch for the transaction at tip and we'll also
|
|
|
|
// dispatch a historical rescan to determine if it has confirmed in the
|
|
|
|
// past.
|
|
|
|
//
|
|
|
|
// We'll update our filter first to ensure we can immediately detect the
|
|
|
|
// confirmation at tip. To do so, we'll map the script into an address
|
|
|
|
// type so we can instruct neutrino to match if the transaction
|
|
|
|
// containing the script is found in a block.
|
|
|
|
params := n.p2pNode.ChainParams()
|
|
|
|
_, addrs, _, err := txscript.ExtractPkScriptAddrs(pkScript, ¶ms)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to extract script: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll send the filter update request to the notifier's main event
|
|
|
|
// handler and wait for its response.
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
select {
|
|
|
|
case n.notificationRegistry <- &rescanFilterUpdate{
|
|
|
|
updateOptions: []neutrino.UpdateOption{
|
|
|
|
neutrino.AddAddrs(addrs...),
|
|
|
|
neutrino.Rewind(dispatch.EndHeight),
|
|
|
|
neutrino.DisableDisconnectedNtfns(true),
|
|
|
|
},
|
|
|
|
errChan: errChan,
|
|
|
|
}:
|
|
|
|
case <-n.quit:
|
|
|
|
return nil, ErrChainNotifierShuttingDown
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err = <-errChan:
|
|
|
|
case <-n.quit:
|
|
|
|
return nil, ErrChainNotifierShuttingDown
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to update filter: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, with the filter updates, we can dispatch the historical
|
|
|
|
// rescan to ensure we can detect if the event happened in the past.
|
2017-05-24 04:13:45 +03:00
|
|
|
select {
|
2018-08-27 07:37:10 +03:00
|
|
|
case n.notificationRegistry <- dispatch:
|
2018-07-27 07:32:55 +03:00
|
|
|
case <-n.quit:
|
|
|
|
return nil, ErrChainNotifierShuttingDown
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
2018-10-21 02:30:57 +03:00
|
|
|
|
|
|
|
return ntfn.Event, nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// blockEpochRegistration represents a client's intent to receive a
|
|
|
|
// notification with each newly connected block.
|
|
|
|
type blockEpochRegistration struct {
|
2017-06-08 03:05:16 +03:00
|
|
|
epochID uint64
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
epochChan chan *chainntnfs.BlockEpoch
|
|
|
|
|
2018-10-12 18:08:14 +03:00
|
|
|
epochQueue *queue.ConcurrentQueue
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
cancelChan chan struct{}
|
|
|
|
|
2018-08-09 10:05:27 +03:00
|
|
|
bestBlock *chainntnfs.BlockEpoch
|
|
|
|
|
|
|
|
errorChan chan error
|
|
|
|
|
2017-06-08 03:05:16 +03:00
|
|
|
wg sync.WaitGroup
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// epochCancel is a message sent to the NeutrinoNotifier when a client wishes
|
|
|
|
// to cancel an outstanding epoch notification that has yet to be dispatched.
|
|
|
|
type epochCancel struct {
|
|
|
|
epochID uint64
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:27 +03:00
|
|
|
// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the
|
|
|
|
// caller to receive notifications, of each new block connected to the main
|
|
|
|
// chain. Clients have the option of passing in their best known block, which
|
|
|
|
// the notifier uses to check if they are behind on blocks and catch them up.
|
|
|
|
func (n *NeutrinoNotifier) RegisterBlockEpochNtfn(
|
|
|
|
bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg := &blockEpochRegistration{
|
2018-10-12 18:08:14 +03:00
|
|
|
epochQueue: queue.NewConcurrentQueue(20),
|
2017-05-24 04:13:45 +03:00
|
|
|
epochChan: make(chan *chainntnfs.BlockEpoch, 20),
|
|
|
|
cancelChan: make(chan struct{}),
|
|
|
|
epochID: atomic.AddUint64(&n.epochClientCounter, 1),
|
2018-08-09 10:05:27 +03:00
|
|
|
bestBlock: bestBlock,
|
|
|
|
errorChan: make(chan error, 1),
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg.epochQueue.Start()
|
|
|
|
|
|
|
|
// Before we send the request to the main goroutine, we'll launch a new
|
|
|
|
// goroutine to proxy items added to our queue to the client itself.
|
|
|
|
// This ensures that all notifications are received *in order*.
|
|
|
|
reg.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer reg.wg.Done()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ntfn := <-reg.epochQueue.ChanOut():
|
|
|
|
blockNtfn := ntfn.(*chainntnfs.BlockEpoch)
|
|
|
|
select {
|
|
|
|
case reg.epochChan <- blockNtfn:
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-n.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-n.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-n.quit:
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// As we're exiting before the registration could be sent,
|
|
|
|
// we'll stop the queue now ourselves.
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
return nil, errors.New("chainntnfs: system interrupt while " +
|
|
|
|
"attempting to register for block epoch notification.")
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case n.notificationRegistry <- reg:
|
2017-05-24 04:13:45 +03:00
|
|
|
return &chainntnfs.BlockEpochEvent{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
Epochs: reg.epochChan,
|
2017-05-24 04:13:45 +03:00
|
|
|
Cancel: func() {
|
|
|
|
cancel := &epochCancel{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
epochID: reg.epochID,
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2017-07-30 05:19:28 +03:00
|
|
|
// Submit epoch cancellation to notification dispatcher.
|
2017-05-24 04:13:45 +03:00
|
|
|
select {
|
|
|
|
case n.notificationCancels <- cancel:
|
2017-07-30 06:28:48 +03:00
|
|
|
// Cancellation is being handled, drain the epoch channel until it is
|
|
|
|
// closed before yielding to caller.
|
|
|
|
for {
|
|
|
|
select {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case _, ok := <-reg.epochChan:
|
2017-07-30 06:28:48 +03:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-n.quit:
|
2017-08-02 03:14:01 +03:00
|
|
|
return
|
2017-07-30 06:28:48 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
case <-n.quit:
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
}
|
2018-08-09 10:05:28 +03:00
|
|
|
|
|
|
|
// NeutrinoChainConn is a wrapper around neutrino's chain backend in order
|
|
|
|
// to satisfy the chainntnfs.ChainConn interface.
|
|
|
|
type NeutrinoChainConn struct {
|
|
|
|
p2pNode *neutrino.ChainService
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBlockHeader returns the block header for a hash.
|
|
|
|
func (n *NeutrinoChainConn) GetBlockHeader(blockHash *chainhash.Hash) (*wire.BlockHeader, error) {
|
2018-09-10 13:45:59 +03:00
|
|
|
return n.p2pNode.GetBlockHeader(blockHash)
|
2018-08-09 10:05:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetBlockHeaderVerbose returns a verbose block header result for a hash. This
|
|
|
|
// result only contains the height with a nil hash.
|
|
|
|
func (n *NeutrinoChainConn) GetBlockHeaderVerbose(blockHash *chainhash.Hash) (
|
|
|
|
*btcjson.GetBlockHeaderVerboseResult, error) {
|
|
|
|
|
2018-09-10 13:45:59 +03:00
|
|
|
height, err := n.p2pNode.GetBlockHeight(blockHash)
|
2018-08-09 10:05:28 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// Since only the height is used from the result, leave the hash nil.
|
|
|
|
return &btcjson.GetBlockHeaderVerboseResult{Height: int32(height)}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBlockHash returns the hash from a block height.
|
|
|
|
func (n *NeutrinoChainConn) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) {
|
2018-09-10 13:45:59 +03:00
|
|
|
return n.p2pNode.GetBlockHash(blockHeight)
|
2018-08-09 10:05:28 +03:00
|
|
|
}
|