2017-05-24 04:13:45 +03:00
|
|
|
package neutrinonotify
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2017-11-14 02:49:58 +03:00
|
|
|
"fmt"
|
2018-02-03 04:59:11 +03:00
|
|
|
"strings"
|
2017-05-24 04:13:45 +03:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2017-07-05 01:54:35 +03:00
|
|
|
"time"
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
"github.com/btcsuite/btcd/btcjson"
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/rpcclient"
|
2018-08-01 07:28:27 +03:00
|
|
|
"github.com/btcsuite/btcd/txscript"
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
|
|
|
"github.com/btcsuite/btcutil/gcs/builder"
|
|
|
|
"github.com/btcsuite/btcwallet/waddrmgr"
|
2018-07-12 03:28:46 +03:00
|
|
|
"github.com/lightninglabs/neutrino"
|
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2018-10-12 18:08:14 +03:00
|
|
|
"github.com/lightningnetwork/lnd/queue"
|
2017-05-24 04:13:45 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// notifierType uniquely identifies this concrete implementation of the
|
|
|
|
// ChainNotifier interface.
|
|
|
|
notifierType = "neutrino"
|
|
|
|
)
|
|
|
|
|
|
|
|
// NeutrinoNotifier is a version of ChainNotifier that's backed by the neutrino
|
|
|
|
// Bitcoin light client. Unlike other implementations, this implementation
|
|
|
|
// speaks directly to the p2p network. As a result, this implementation of the
|
|
|
|
// ChainNotifier interface is much more light weight that other implementation
|
|
|
|
// which rely of receiving notification over an RPC interface backed by a
|
|
|
|
// running full node.
|
|
|
|
//
|
|
|
|
// TODO(roasbeef): heavily consolidate with NeutrinoNotifier code
|
|
|
|
// * maybe combine into single package?
|
|
|
|
type NeutrinoNotifier struct {
|
2018-07-27 07:30:15 +03:00
|
|
|
confClientCounter uint64 // To be used atomically.
|
2017-05-24 04:13:45 +03:00
|
|
|
spendClientCounter uint64 // To be used atomically.
|
|
|
|
epochClientCounter uint64 // To be used atomically.
|
|
|
|
|
2018-07-27 07:30:15 +03:00
|
|
|
started int32 // To be used atomically.
|
|
|
|
stopped int32 // To be used atomically.
|
|
|
|
|
2018-12-11 05:29:25 +03:00
|
|
|
bestBlockMtx sync.RWMutex
|
|
|
|
bestBlock chainntnfs.BlockEpoch
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
p2pNode *neutrino.ChainService
|
2018-07-12 03:28:46 +03:00
|
|
|
chainView *neutrino.Rescan
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
chainConn *NeutrinoChainConn
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
notificationCancels chan interface{}
|
|
|
|
notificationRegistry chan interface{}
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
txNotifier *chainntnfs.TxNotifier
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
blockEpochClients map[uint64]*blockEpochRegistration
|
|
|
|
|
|
|
|
rescanErr <-chan error
|
|
|
|
|
2018-10-12 18:08:14 +03:00
|
|
|
chainUpdates *queue.ConcurrentQueue
|
2018-12-07 08:14:31 +03:00
|
|
|
txUpdates *queue.ConcurrentQueue
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-15 03:53:34 +03:00
|
|
|
// spendHintCache is a cache used to query and update the latest height
|
|
|
|
// hints for an outpoint. Each height hint represents the earliest
|
|
|
|
// height at which the outpoint could have been spent within the chain.
|
|
|
|
spendHintCache chainntnfs.SpendHintCache
|
|
|
|
|
|
|
|
// confirmHintCache is a cache used to query the latest height hints for
|
|
|
|
// a transaction. Each height hint represents the earliest height at
|
|
|
|
// which the transaction could have confirmed within the chain.
|
|
|
|
confirmHintCache chainntnfs.ConfirmHintCache
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
quit chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure NeutrinoNotifier implements the ChainNotifier interface at compile time.
|
|
|
|
var _ chainntnfs.ChainNotifier = (*NeutrinoNotifier)(nil)
|
|
|
|
|
|
|
|
// New creates a new instance of the NeutrinoNotifier concrete implementation
|
|
|
|
// of the ChainNotifier interface.
|
|
|
|
//
|
|
|
|
// NOTE: The passed neutrino node should already be running and active before
|
|
|
|
// being passed into this function.
|
2018-08-15 03:53:34 +03:00
|
|
|
func New(node *neutrino.ChainService, spendHintCache chainntnfs.SpendHintCache,
|
2018-12-07 08:14:07 +03:00
|
|
|
confirmHintCache chainntnfs.ConfirmHintCache) *NeutrinoNotifier {
|
2018-08-15 03:53:34 +03:00
|
|
|
|
2018-12-07 08:14:07 +03:00
|
|
|
return &NeutrinoNotifier{
|
2017-05-24 04:13:45 +03:00
|
|
|
notificationCancels: make(chan interface{}),
|
|
|
|
notificationRegistry: make(chan interface{}),
|
|
|
|
|
|
|
|
blockEpochClients: make(map[uint64]*blockEpochRegistration),
|
|
|
|
|
2018-12-07 08:14:07 +03:00
|
|
|
p2pNode: node,
|
|
|
|
chainConn: &NeutrinoChainConn{node},
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
rescanErr: make(chan error),
|
|
|
|
|
2018-10-12 18:08:14 +03:00
|
|
|
chainUpdates: queue.NewConcurrentQueue(10),
|
2018-12-07 08:14:31 +03:00
|
|
|
txUpdates: queue.NewConcurrentQueue(10),
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-15 03:53:34 +03:00
|
|
|
spendHintCache: spendHintCache,
|
|
|
|
confirmHintCache: confirmHintCache,
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
quit: make(chan struct{}),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start contacts the running neutrino light client and kicks off an initial
|
|
|
|
// empty rescan.
|
|
|
|
func (n *NeutrinoNotifier) Start() error {
|
|
|
|
// Already started?
|
|
|
|
if atomic.AddInt32(&n.started, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// First, we'll obtain the latest block height of the p2p node. We'll
|
|
|
|
// start the auto-rescan from this point. Once a caller actually wishes
|
|
|
|
// to register a chain view, the rescan state will be rewound
|
|
|
|
// accordingly.
|
2018-09-10 13:45:59 +03:00
|
|
|
startingPoint, err := n.p2pNode.BestBlock()
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-12-11 05:29:25 +03:00
|
|
|
n.bestBlock.Hash = &startingPoint.Hash
|
|
|
|
n.bestBlock.Height = startingPoint.Height
|
|
|
|
|
2018-12-07 08:14:07 +03:00
|
|
|
n.txNotifier = chainntnfs.NewTxNotifier(
|
2018-12-11 05:29:25 +03:00
|
|
|
uint32(n.bestBlock.Height), chainntnfs.ReorgSafetyLimit,
|
|
|
|
n.confirmHintCache, n.spendHintCache,
|
2018-12-07 08:14:07 +03:00
|
|
|
)
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
// Next, we'll create our set of rescan options. Currently it's
|
2018-04-18 05:02:04 +03:00
|
|
|
// required that a user MUST set an addr/outpoint/txid when creating a
|
2017-05-24 04:13:45 +03:00
|
|
|
// rescan. To get around this, we'll add a "zero" outpoint, that won't
|
|
|
|
// actually be matched.
|
2018-07-18 05:03:26 +03:00
|
|
|
var zeroInput neutrino.InputWithScript
|
2017-05-24 04:13:45 +03:00
|
|
|
rescanOptions := []neutrino.RescanOption{
|
|
|
|
neutrino.StartBlock(startingPoint),
|
|
|
|
neutrino.QuitChan(n.quit),
|
|
|
|
neutrino.NotificationHandlers(
|
2017-08-25 04:54:17 +03:00
|
|
|
rpcclient.NotificationHandlers{
|
2017-05-24 04:13:45 +03:00
|
|
|
OnFilteredBlockConnected: n.onFilteredBlockConnected,
|
|
|
|
OnFilteredBlockDisconnected: n.onFilteredBlockDisconnected,
|
2018-12-07 08:14:31 +03:00
|
|
|
OnRedeemingTx: n.onRelevantTx,
|
2017-05-24 04:13:45 +03:00
|
|
|
},
|
|
|
|
),
|
2018-07-18 05:03:26 +03:00
|
|
|
neutrino.WatchInputs(zeroInput),
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we'll create our rescan struct, start it, and launch all
|
|
|
|
// the goroutines we need to operate this ChainNotifier instance.
|
|
|
|
n.chainView = n.p2pNode.NewRescan(rescanOptions...)
|
|
|
|
n.rescanErr = n.chainView.Start()
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
n.chainUpdates.Start()
|
2018-12-07 08:14:31 +03:00
|
|
|
n.txUpdates.Start()
|
2017-09-29 22:10:38 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
n.wg.Add(1)
|
|
|
|
go n.notificationDispatcher()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-18 05:40:05 +03:00
|
|
|
// Stop shuts down the NeutrinoNotifier.
|
2017-05-24 04:13:45 +03:00
|
|
|
func (n *NeutrinoNotifier) Stop() error {
|
|
|
|
// Already shutting down?
|
|
|
|
if atomic.AddInt32(&n.stopped, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
close(n.quit)
|
|
|
|
n.wg.Wait()
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
n.chainUpdates.Stop()
|
2018-12-07 08:14:31 +03:00
|
|
|
n.txUpdates.Stop()
|
2017-09-29 22:10:38 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// Notify all pending clients of our shutdown by closing the related
|
|
|
|
// notification channels.
|
|
|
|
for _, epochClient := range n.blockEpochClients {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
close(epochClient.cancelChan)
|
|
|
|
epochClient.wg.Wait()
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
close(epochClient.epochChan)
|
|
|
|
}
|
2018-10-05 12:07:55 +03:00
|
|
|
n.txNotifier.TearDown()
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// filteredBlock represents a new block which has been connected to the main
|
|
|
|
// chain. The slice of transactions will only be populated if the block
|
|
|
|
// includes a transaction that confirmed one of our watched txids, or spends
|
|
|
|
// one of the outputs currently being watched.
|
|
|
|
type filteredBlock struct {
|
|
|
|
hash chainhash.Hash
|
|
|
|
height uint32
|
|
|
|
txns []*btcutil.Tx
|
2017-11-10 22:01:36 +03:00
|
|
|
|
|
|
|
// connected is true if this update is a new block and false if it is a
|
|
|
|
// disconnected block.
|
|
|
|
connect bool
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-10-21 02:30:57 +03:00
|
|
|
// rescanFilterUpdate represents a request that will be sent to the
|
|
|
|
// notificaionRegistry in order to prevent race conditions between the filter
|
|
|
|
// update and new block notifications.
|
|
|
|
type rescanFilterUpdate struct {
|
|
|
|
updateOptions []neutrino.UpdateOption
|
|
|
|
errChan chan error
|
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// onFilteredBlockConnected is a callback which is executed each a new block is
|
|
|
|
// connected to the end of the main chain.
|
|
|
|
func (n *NeutrinoNotifier) onFilteredBlockConnected(height int32,
|
|
|
|
header *wire.BlockHeader, txns []*btcutil.Tx) {
|
|
|
|
|
2017-06-08 03:05:16 +03:00
|
|
|
// Append this new chain update to the end of the queue of new chain
|
|
|
|
// updates.
|
2018-12-07 08:14:31 +03:00
|
|
|
select {
|
|
|
|
case n.chainUpdates.ChanIn() <- &filteredBlock{
|
2017-11-10 22:01:36 +03:00
|
|
|
hash: header.BlockHash(),
|
|
|
|
height: uint32(height),
|
|
|
|
txns: txns,
|
|
|
|
connect: true,
|
2018-12-07 08:14:31 +03:00
|
|
|
}:
|
|
|
|
case <-n.quit:
|
2017-09-29 22:10:38 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// onFilteredBlockDisconnected is a callback which is executed each time a new
|
|
|
|
// block has been disconnected from the end of the mainchain due to a re-org.
|
|
|
|
func (n *NeutrinoNotifier) onFilteredBlockDisconnected(height int32,
|
|
|
|
header *wire.BlockHeader) {
|
|
|
|
|
2017-06-08 03:05:16 +03:00
|
|
|
// Append this new chain update to the end of the queue of new chain
|
|
|
|
// disconnects.
|
2018-12-07 08:14:31 +03:00
|
|
|
select {
|
|
|
|
case n.chainUpdates.ChanIn() <- &filteredBlock{
|
2017-11-10 22:01:36 +03:00
|
|
|
hash: header.BlockHash(),
|
|
|
|
height: uint32(height),
|
|
|
|
connect: false,
|
2018-12-07 08:14:31 +03:00
|
|
|
}:
|
|
|
|
case <-n.quit:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// relevantTx represents a relevant transaction to the notifier that fulfills
|
|
|
|
// any outstanding spend requests.
|
|
|
|
type relevantTx struct {
|
|
|
|
tx *btcutil.Tx
|
|
|
|
details *btcjson.BlockDetails
|
|
|
|
}
|
|
|
|
|
|
|
|
// onRelevantTx is a callback that proxies relevant transaction notifications
|
|
|
|
// from the backend to the notifier's main event handler.
|
|
|
|
func (n *NeutrinoNotifier) onRelevantTx(tx *btcutil.Tx, details *btcjson.BlockDetails) {
|
|
|
|
select {
|
|
|
|
case n.txUpdates.ChanIn() <- &relevantTx{tx, details}:
|
|
|
|
case <-n.quit:
|
2017-09-29 22:10:38 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// notificationDispatcher is the primary goroutine which handles client
|
|
|
|
// notification registrations, as well as notification dispatches.
|
|
|
|
func (n *NeutrinoNotifier) notificationDispatcher() {
|
|
|
|
defer n.wg.Done()
|
2018-08-09 10:05:30 +03:00
|
|
|
out:
|
2017-05-24 04:13:45 +03:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case cancelMsg := <-n.notificationCancels:
|
|
|
|
switch msg := cancelMsg.(type) {
|
|
|
|
case *epochCancel:
|
|
|
|
chainntnfs.Log.Infof("Cancelling epoch "+
|
|
|
|
"notification, epoch_id=%v", msg.epochID)
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// First, we'll lookup the original
|
|
|
|
// registration in order to stop the active
|
|
|
|
// queue goroutine.
|
|
|
|
reg := n.blockEpochClients[msg.epochID]
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
|
|
|
// Next, close the cancel channel for this
|
2017-06-08 03:05:16 +03:00
|
|
|
// specific client, and wait for the client to
|
|
|
|
// exit.
|
2017-05-24 04:13:45 +03:00
|
|
|
close(n.blockEpochClients[msg.epochID].cancelChan)
|
2017-06-08 03:05:16 +03:00
|
|
|
n.blockEpochClients[msg.epochID].wg.Wait()
|
|
|
|
|
|
|
|
// Once the client has exited, we can then
|
|
|
|
// safely close the channel used to send epoch
|
|
|
|
// notifications, in order to notify any
|
|
|
|
// listeners that the intent has been
|
|
|
|
// cancelled.
|
2017-05-24 04:13:45 +03:00
|
|
|
close(n.blockEpochClients[msg.epochID].epochChan)
|
|
|
|
delete(n.blockEpochClients, msg.epochID)
|
|
|
|
}
|
|
|
|
|
|
|
|
case registerMsg := <-n.notificationRegistry:
|
|
|
|
switch msg := registerMsg.(type) {
|
2018-08-27 07:37:10 +03:00
|
|
|
case *chainntnfs.HistoricalConfDispatch:
|
2018-10-21 02:30:57 +03:00
|
|
|
// We'll start a historical rescan chain of the
|
|
|
|
// chain asynchronously to prevent blocking
|
2018-07-27 07:32:55 +03:00
|
|
|
// potentially long rescans.
|
|
|
|
n.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer n.wg.Done()
|
|
|
|
|
|
|
|
confDetails, err := n.historicalConfDetails(
|
2018-12-07 08:14:22 +03:00
|
|
|
msg.ConfRequest,
|
2018-08-27 07:37:10 +03:00
|
|
|
msg.StartHeight, msg.EndHeight,
|
2018-08-01 07:28:27 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
|
|
|
|
2018-08-25 05:35:17 +03:00
|
|
|
// If the historical dispatch finished
|
|
|
|
// without error, we will invoke
|
|
|
|
// UpdateConfDetails even if none were
|
|
|
|
// found. This allows the notifier to
|
|
|
|
// begin safely updating the height hint
|
|
|
|
// cache at tip, since any pending
|
|
|
|
// rescans have now completed.
|
2018-10-05 12:07:55 +03:00
|
|
|
err = n.txNotifier.UpdateConfDetails(
|
2018-12-07 08:14:22 +03:00
|
|
|
msg.ConfRequest, confDetails,
|
2018-08-25 05:35:17 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
2018-07-27 07:32:55 +03:00
|
|
|
}()
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
case *blockEpochRegistration:
|
|
|
|
chainntnfs.Log.Infof("New block epoch subscription")
|
2018-12-11 05:29:28 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
n.blockEpochClients[msg.epochID] = msg
|
2018-12-11 05:29:28 +03:00
|
|
|
|
|
|
|
// If the client did not provide their best
|
|
|
|
// known block, then we'll immediately dispatch
|
|
|
|
// a notification for the current tip.
|
|
|
|
if msg.bestBlock == nil {
|
|
|
|
n.notifyBlockEpochClient(
|
|
|
|
msg, n.bestBlock.Height,
|
|
|
|
n.bestBlock.Hash,
|
|
|
|
)
|
|
|
|
|
|
|
|
msg.errorChan <- nil
|
|
|
|
continue
|
2018-08-09 10:05:29 +03:00
|
|
|
}
|
2018-12-11 05:29:28 +03:00
|
|
|
|
|
|
|
// Otherwise, we'll attempt to deliver the
|
|
|
|
// backlog of notifications from their best
|
|
|
|
// known block.
|
|
|
|
n.bestBlockMtx.Lock()
|
|
|
|
bestHeight := n.bestBlock.Height
|
|
|
|
n.bestBlockMtx.Unlock()
|
|
|
|
|
|
|
|
missedBlocks, err := chainntnfs.GetClientMissedBlocks(
|
|
|
|
n.chainConn, msg.bestBlock, bestHeight,
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
msg.errorChan <- err
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, block := range missedBlocks {
|
|
|
|
n.notifyBlockEpochClient(
|
|
|
|
msg, block.Height, block.Hash,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
msg.errorChan <- nil
|
2018-10-21 02:30:57 +03:00
|
|
|
|
|
|
|
case *rescanFilterUpdate:
|
|
|
|
err := n.chainView.Update(msg.updateOptions...)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Errorf("Unable to "+
|
|
|
|
"update rescan filter: %v", err)
|
|
|
|
}
|
|
|
|
msg.errChan <- err
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
case item := <-n.chainUpdates.ChanOut():
|
|
|
|
update := item.(*filteredBlock)
|
|
|
|
if update.connect {
|
2018-12-11 05:29:25 +03:00
|
|
|
n.bestBlockMtx.Lock()
|
2018-08-09 10:05:30 +03:00
|
|
|
// Since neutrino has no way of knowing what
|
|
|
|
// height to rewind to in the case of a reorged
|
|
|
|
// best known height, there is no point in
|
|
|
|
// checking that the previous hash matches the
|
|
|
|
// the hash from our best known height the way
|
|
|
|
// the other notifiers do when they receive
|
|
|
|
// a new connected block. Therefore, we just
|
|
|
|
// compare the heights.
|
2018-12-11 05:29:25 +03:00
|
|
|
if update.height != uint32(n.bestBlock.Height+1) {
|
2018-08-09 10:05:30 +03:00
|
|
|
// Handle the case where the notifier
|
|
|
|
// missed some blocks from its chain
|
|
|
|
// backend
|
|
|
|
chainntnfs.Log.Infof("Missed blocks, " +
|
|
|
|
"attempting to catch up")
|
2018-12-11 05:29:25 +03:00
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
_, missedBlocks, err :=
|
|
|
|
chainntnfs.HandleMissedBlocks(
|
|
|
|
n.chainConn,
|
2018-10-05 12:07:55 +03:00
|
|
|
n.txNotifier,
|
2018-12-11 05:29:25 +03:00
|
|
|
n.bestBlock,
|
2018-08-09 10:05:30 +03:00
|
|
|
int32(update.height),
|
|
|
|
false,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
2018-12-11 05:29:25 +03:00
|
|
|
n.bestBlockMtx.Unlock()
|
2018-08-09 10:05:30 +03:00
|
|
|
continue
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
for _, block := range missedBlocks {
|
|
|
|
filteredBlock, err :=
|
|
|
|
n.getFilteredBlock(block)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
2018-12-11 05:29:25 +03:00
|
|
|
n.bestBlockMtx.Unlock()
|
2018-08-09 10:05:30 +03:00
|
|
|
continue out
|
|
|
|
}
|
|
|
|
err = n.handleBlockConnected(filteredBlock)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
2018-12-11 05:29:25 +03:00
|
|
|
n.bestBlockMtx.Unlock()
|
2018-08-09 10:05:30 +03:00
|
|
|
continue out
|
|
|
|
}
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2017-11-10 22:01:36 +03:00
|
|
|
err := n.handleBlockConnected(update)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
2018-12-11 05:29:25 +03:00
|
|
|
|
|
|
|
n.bestBlockMtx.Unlock()
|
2017-12-10 21:34:49 +03:00
|
|
|
continue
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-12-11 05:29:25 +03:00
|
|
|
n.bestBlockMtx.Lock()
|
|
|
|
if update.height != uint32(n.bestBlock.Height) {
|
2018-10-19 02:16:30 +03:00
|
|
|
chainntnfs.Log.Infof("Missed disconnected " +
|
2018-08-09 10:05:29 +03:00
|
|
|
"blocks, attempting to catch up")
|
2017-12-10 21:34:49 +03:00
|
|
|
}
|
2018-08-09 10:05:29 +03:00
|
|
|
newBestBlock, err := chainntnfs.RewindChain(
|
2018-12-11 05:29:25 +03:00
|
|
|
n.chainConn, n.txNotifier, n.bestBlock,
|
2018-08-09 10:05:29 +03:00
|
|
|
int32(update.height-1),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Errorf("Unable to rewind chain "+
|
|
|
|
"from height %d to height %d: %v",
|
2018-12-11 05:29:25 +03:00
|
|
|
n.bestBlock.Height, update.height-1, err)
|
2018-08-09 10:05:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set the bestHeight here in case a chain rewind
|
|
|
|
// partially completed.
|
2018-12-11 05:29:25 +03:00
|
|
|
n.bestBlock = newBestBlock
|
|
|
|
n.bestBlockMtx.Unlock()
|
2017-12-10 21:34:49 +03:00
|
|
|
|
2018-12-07 08:14:31 +03:00
|
|
|
case txUpdate := <-n.txUpdates.ChanOut():
|
|
|
|
// A new relevant transaction notification has been
|
|
|
|
// received from the backend. We'll attempt to process
|
|
|
|
// it to determine if it fulfills any outstanding
|
|
|
|
// confirmation and/or spend requests and dispatch
|
|
|
|
// notifications for them.
|
|
|
|
update := txUpdate.(*relevantTx)
|
|
|
|
err := n.txNotifier.ProcessRelevantSpendTx(
|
|
|
|
update.tx, uint32(update.details.Height),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Errorf("Unable to process "+
|
|
|
|
"transaction %v: %v", update.tx.Hash(),
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
case err := <-n.rescanErr:
|
|
|
|
chainntnfs.Log.Errorf("Error during rescan: %v", err)
|
|
|
|
|
|
|
|
case <-n.quit:
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:22 +03:00
|
|
|
// historicalConfDetails looks up whether a confirmation request (txid/output
|
|
|
|
// script) has already been included in a block in the active chain and, if so,
|
|
|
|
// returns details about said block.
|
|
|
|
func (n *NeutrinoNotifier) historicalConfDetails(confRequest chainntnfs.ConfRequest,
|
2018-08-27 07:37:10 +03:00
|
|
|
startHeight, endHeight uint32) (*chainntnfs.TxConfirmation, error) {
|
2017-05-25 03:26:45 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// Starting from the height hint, we'll walk forwards in the chain to
|
2018-12-07 08:14:22 +03:00
|
|
|
// see if this transaction/output script has already been confirmed.
|
2018-11-23 00:58:58 +03:00
|
|
|
for scanHeight := endHeight; scanHeight >= startHeight && scanHeight > 0; scanHeight-- {
|
2018-07-27 07:32:55 +03:00
|
|
|
// Ensure we haven't been requested to shut down before
|
|
|
|
// processing the next height.
|
|
|
|
select {
|
|
|
|
case <-n.quit:
|
2018-12-11 05:25:41 +03:00
|
|
|
return nil, chainntnfs.ErrChainNotifierShuttingDown
|
2018-07-27 07:32:55 +03:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// First, we'll fetch the block header for this height so we
|
|
|
|
// can compute the current block hash.
|
2018-09-10 13:45:59 +03:00
|
|
|
blockHash, err := n.p2pNode.GetBlockHash(int64(scanHeight))
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to get header for height=%v: %v",
|
|
|
|
scanHeight, err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2017-10-11 22:27:38 +03:00
|
|
|
// With the hash computed, we can now fetch the basic filter
|
2017-05-24 04:13:45 +03:00
|
|
|
// for this height.
|
2018-05-31 08:07:17 +03:00
|
|
|
regFilter, err := n.p2pNode.GetCFilter(
|
2018-09-10 13:45:59 +03:00
|
|
|
*blockHash, wire.GCSFilterRegular,
|
2018-05-31 08:07:17 +03:00
|
|
|
)
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to retrieve regular filter for "+
|
|
|
|
"height=%v: %v", scanHeight, err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-05-31 08:07:17 +03:00
|
|
|
// If the block has no transactions other than the Coinbase
|
2017-05-24 04:13:45 +03:00
|
|
|
// transaction, then the filter may be nil, so we'll continue
|
|
|
|
// forward int that case.
|
2017-10-11 22:27:38 +03:00
|
|
|
if regFilter == nil {
|
2017-05-24 04:13:45 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// In the case that the filter exists, we'll attempt to see if
|
2018-05-31 08:07:17 +03:00
|
|
|
// any element in it matches our target public key script.
|
2018-09-10 13:45:59 +03:00
|
|
|
key := builder.DeriveKey(blockHash)
|
2018-12-07 08:14:22 +03:00
|
|
|
match, err := regFilter.Match(key, confRequest.PkScript.Script())
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to query filter: %v", err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// If there's no match, then we can continue forward to the
|
|
|
|
// next block.
|
|
|
|
if !match {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// In the case that we do have a match, we'll fetch the block
|
|
|
|
// from the network so we can find the positional data required
|
|
|
|
// to send the proper response.
|
2018-09-10 13:45:59 +03:00
|
|
|
block, err := n.p2pNode.GetBlock(*blockHash)
|
2017-05-24 04:13:45 +03:00
|
|
|
if err != nil {
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to get block from network: %v", err)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
2018-12-07 08:14:22 +03:00
|
|
|
|
|
|
|
// For every transaction in the block, check which one matches
|
|
|
|
// our request. If we find one that does, we can dispatch its
|
|
|
|
// confirmation details.
|
|
|
|
for i, tx := range block.Transactions() {
|
|
|
|
if !confRequest.MatchesTx(tx.MsgTx()) {
|
|
|
|
continue
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
2018-12-07 08:14:22 +03:00
|
|
|
|
|
|
|
return &chainntnfs.TxConfirmation{
|
2018-12-11 05:24:04 +03:00
|
|
|
Tx: tx.MsgTx(),
|
2018-12-07 08:14:22 +03:00
|
|
|
BlockHash: blockHash,
|
|
|
|
BlockHeight: scanHeight,
|
|
|
|
TxIndex: uint32(i),
|
|
|
|
}, nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-14 02:49:58 +03:00
|
|
|
return nil, nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// handleBlockConnected applies a chain update for a new block. Any watched
|
2017-11-10 22:01:36 +03:00
|
|
|
// transactions included this block will processed to either send notifications
|
|
|
|
// now or after numConfirmations confs.
|
2018-12-11 05:29:25 +03:00
|
|
|
//
|
|
|
|
// NOTE: This method must be called with the bestBlockMtx lock held.
|
2017-11-10 22:01:36 +03:00
|
|
|
func (n *NeutrinoNotifier) handleBlockConnected(newBlock *filteredBlock) error {
|
2018-10-12 03:30:40 +03:00
|
|
|
// We'll extend the txNotifier's height with the information of this new
|
|
|
|
// block, which will handle all of the notification logic for us.
|
2018-10-05 12:07:55 +03:00
|
|
|
err := n.txNotifier.ConnectTip(
|
2018-08-15 03:55:29 +03:00
|
|
|
&newBlock.hash, newBlock.height, newBlock.txns,
|
|
|
|
)
|
2018-08-09 10:05:29 +03:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to connect tip: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-08-15 03:55:29 +03:00
|
|
|
chainntnfs.Log.Infof("New block: height=%v, sha=%v", newBlock.height,
|
|
|
|
newBlock.hash)
|
2018-08-09 10:05:29 +03:00
|
|
|
|
2018-10-12 03:30:40 +03:00
|
|
|
// Now that we've guaranteed the new block extends the txNotifier's
|
|
|
|
// current tip, we'll proceed to dispatch notifications to all of our
|
|
|
|
// registered clients whom have had notifications fulfilled. Before
|
|
|
|
// doing so, we'll make sure update our in memory state in order to
|
|
|
|
// satisfy any client requests based upon the new block.
|
2018-12-11 05:29:25 +03:00
|
|
|
n.bestBlock.Hash = &newBlock.hash
|
|
|
|
n.bestBlock.Height = int32(newBlock.height)
|
2018-08-25 03:57:05 +03:00
|
|
|
|
|
|
|
n.notifyBlockEpochs(int32(newBlock.height), &newBlock.hash)
|
2018-10-12 03:30:40 +03:00
|
|
|
return n.txNotifier.NotifyHeight(newBlock.height)
|
2017-11-10 22:01:36 +03:00
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// getFilteredBlock is a utility to retrieve the full filtered block from a block epoch.
|
|
|
|
func (n *NeutrinoNotifier) getFilteredBlock(epoch chainntnfs.BlockEpoch) (*filteredBlock, error) {
|
2018-08-24 06:19:37 +03:00
|
|
|
rawBlock, err := n.p2pNode.GetBlock(*epoch.Hash)
|
2018-08-09 10:05:29 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to get block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
txns := rawBlock.Transactions()
|
|
|
|
|
|
|
|
block := &filteredBlock{
|
|
|
|
hash: *epoch.Hash,
|
|
|
|
height: uint32(epoch.Height),
|
|
|
|
txns: txns,
|
|
|
|
connect: true,
|
|
|
|
}
|
|
|
|
return block, nil
|
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
// notifyBlockEpochs notifies all registered block epoch clients of the newly
|
|
|
|
// connected block to the main chain.
|
|
|
|
func (n *NeutrinoNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash) {
|
2018-08-09 10:05:29 +03:00
|
|
|
for _, client := range n.blockEpochClients {
|
|
|
|
n.notifyBlockEpochClient(client, newHeight, newSha)
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
2018-08-09 10:05:29 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// notifyBlockEpochClient sends a registered block epoch client a notification
|
|
|
|
// about a specific block.
|
|
|
|
func (n *NeutrinoNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration,
|
|
|
|
height int32, sha *chainhash.Hash) {
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
epoch := &chainntnfs.BlockEpoch{
|
|
|
|
Height: height,
|
|
|
|
Hash: sha,
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
select {
|
|
|
|
case epochClient.epochQueue.ChanIn() <- epoch:
|
|
|
|
case <-epochClient.cancelChan:
|
|
|
|
case <-n.quit:
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterSpendNtfn registers an intent to be notified once the target
|
2018-12-07 08:14:31 +03:00
|
|
|
// outpoint/output script has been spent by a transaction on-chain. When
|
|
|
|
// intending to be notified of the spend of an output script, a nil outpoint
|
|
|
|
// must be used. The heightHint should represent the earliest height in the
|
|
|
|
// chain of the transaction that spent the outpoint/output script.
|
|
|
|
//
|
|
|
|
// Once a spend of has been detected, the details of the spending event will be
|
2017-05-24 04:13:45 +03:00
|
|
|
// sent across the 'Spend' channel.
|
|
|
|
func (n *NeutrinoNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
|
2018-07-18 05:03:26 +03:00
|
|
|
pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
|
2017-05-24 04:13:45 +03:00
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
// First, we'll construct a spend notification request and hand it off
|
|
|
|
// to the txNotifier.
|
|
|
|
spendID := atomic.AddUint64(&n.spendClientCounter, 1)
|
2018-12-07 08:14:31 +03:00
|
|
|
spendRequest, err := chainntnfs.NewSpendRequest(outpoint, pkScript)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-08-15 03:55:29 +03:00
|
|
|
}
|
2018-10-05 12:07:55 +03:00
|
|
|
ntfn := &chainntnfs.SpendNtfn{
|
2018-12-07 08:14:31 +03:00
|
|
|
SpendID: spendID,
|
|
|
|
SpendRequest: spendRequest,
|
|
|
|
Event: chainntnfs.NewSpendEvent(func() {
|
|
|
|
n.txNotifier.CancelSpend(spendRequest, spendID)
|
|
|
|
}),
|
2018-10-05 12:07:55 +03:00
|
|
|
HeightHint: heightHint,
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
2018-08-15 03:55:29 +03:00
|
|
|
|
2018-12-07 08:14:31 +03:00
|
|
|
historicalDispatch, txNotifierTip, err := n.txNotifier.RegisterSpend(ntfn)
|
2018-10-05 12:07:55 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-07-30 05:19:28 +03:00
|
|
|
|
2018-10-21 02:30:57 +03:00
|
|
|
// To determine whether this outpoint has been spent on-chain, we'll
|
|
|
|
// update our filter to watch for the transaction at tip and we'll also
|
|
|
|
// dispatch a historical rescan to determine if it has been spent in the
|
|
|
|
// past.
|
|
|
|
//
|
|
|
|
// We'll update our filter first to ensure we can immediately detect the
|
2018-12-07 08:14:31 +03:00
|
|
|
// spend at tip.
|
2018-10-21 02:30:57 +03:00
|
|
|
inputToWatch := neutrino.InputWithScript{
|
2018-12-07 08:14:31 +03:00
|
|
|
OutPoint: spendRequest.OutPoint,
|
|
|
|
PkScript: spendRequest.PkScript.Script(),
|
2018-10-21 02:30:57 +03:00
|
|
|
}
|
2018-12-07 08:14:31 +03:00
|
|
|
updateOptions := []neutrino.UpdateOption{
|
|
|
|
neutrino.AddInputs(inputToWatch),
|
|
|
|
neutrino.DisableDisconnectedNtfns(true),
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll use the txNotifier's tip as the starting point of our filter
|
|
|
|
// update. In the case of an output script spend request, we'll check if
|
|
|
|
// we should perform a historical rescan and start from there, as we
|
|
|
|
// cannot do so with GetUtxo since it matches outpoints.
|
|
|
|
rewindHeight := txNotifierTip
|
|
|
|
if historicalDispatch != nil &&
|
|
|
|
spendRequest.OutPoint == chainntnfs.ZeroOutPoint {
|
|
|
|
rewindHeight = historicalDispatch.StartHeight
|
|
|
|
}
|
|
|
|
updateOptions = append(updateOptions, neutrino.Rewind(rewindHeight))
|
|
|
|
|
2018-10-21 02:30:57 +03:00
|
|
|
errChan := make(chan error, 1)
|
|
|
|
select {
|
|
|
|
case n.notificationRegistry <- &rescanFilterUpdate{
|
2018-12-07 08:14:31 +03:00
|
|
|
updateOptions: updateOptions,
|
|
|
|
errChan: errChan,
|
2018-10-21 02:30:57 +03:00
|
|
|
}:
|
|
|
|
case <-n.quit:
|
2018-12-11 05:25:41 +03:00
|
|
|
return nil, chainntnfs.ErrChainNotifierShuttingDown
|
2018-10-21 02:30:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err = <-errChan:
|
|
|
|
case <-n.quit:
|
2018-12-11 05:25:41 +03:00
|
|
|
return nil, chainntnfs.ErrChainNotifierShuttingDown
|
2018-10-21 02:30:57 +03:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to update filter: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:31 +03:00
|
|
|
// If the txNotifier didn't return any details to perform a historical
|
|
|
|
// scan of the chain, or if we already performed one like in the case of
|
|
|
|
// output script spend requests, then we can return early as there's
|
|
|
|
// nothing left for us to do.
|
|
|
|
if historicalDispatch == nil ||
|
|
|
|
spendRequest.OutPoint == chainntnfs.ZeroOutPoint {
|
|
|
|
return ntfn.Event, nil
|
|
|
|
}
|
|
|
|
|
2018-10-21 02:30:57 +03:00
|
|
|
// With the filter updated, we'll dispatch our historical rescan to
|
|
|
|
// ensure we detect the spend if it happened in the past. We'll ensure
|
|
|
|
// that neutrino is caught up to the starting height before we attempt
|
|
|
|
// to fetch the UTXO from the chain. If we're behind, then we may miss a
|
|
|
|
// notification dispatch.
|
2017-07-05 01:54:35 +03:00
|
|
|
for {
|
2018-12-11 05:29:25 +03:00
|
|
|
n.bestBlockMtx.RLock()
|
|
|
|
currentHeight := uint32(n.bestBlock.Height)
|
|
|
|
n.bestBlockMtx.RUnlock()
|
2017-07-05 01:54:35 +03:00
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
if currentHeight >= historicalDispatch.StartHeight {
|
|
|
|
break
|
2017-07-05 01:54:35 +03:00
|
|
|
}
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
time.Sleep(time.Millisecond * 200)
|
2017-07-05 01:54:35 +03:00
|
|
|
}
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
spendReport, err := n.p2pNode.GetUtxo(
|
2018-07-18 05:03:26 +03:00
|
|
|
neutrino.WatchInputs(inputToWatch),
|
2017-05-24 04:13:45 +03:00
|
|
|
neutrino.StartBlock(&waddrmgr.BlockStamp{
|
2018-10-05 12:07:55 +03:00
|
|
|
Height: int32(historicalDispatch.StartHeight),
|
|
|
|
}),
|
|
|
|
neutrino.EndBlock(&waddrmgr.BlockStamp{
|
|
|
|
Height: int32(historicalDispatch.EndHeight),
|
2017-05-24 04:13:45 +03:00
|
|
|
}),
|
|
|
|
)
|
2018-02-03 04:59:11 +03:00
|
|
|
if err != nil && !strings.Contains(err.Error(), "not found") {
|
2017-05-24 04:13:45 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// If a spend report was returned, and the transaction is present, then
|
|
|
|
// this means that the output is already spent.
|
2018-10-21 02:30:57 +03:00
|
|
|
var spendDetails *chainntnfs.SpendDetail
|
2017-05-24 04:13:45 +03:00
|
|
|
if spendReport != nil && spendReport.SpendingTx != nil {
|
2018-10-05 12:07:55 +03:00
|
|
|
spendingTxHash := spendReport.SpendingTx.TxHash()
|
2018-10-21 02:30:57 +03:00
|
|
|
spendDetails = &chainntnfs.SpendDetail{
|
2018-12-07 08:14:31 +03:00
|
|
|
SpentOutPoint: &spendRequest.OutPoint,
|
2018-10-05 12:07:55 +03:00
|
|
|
SpenderTxHash: &spendingTxHash,
|
|
|
|
SpendingTx: spendReport.SpendingTx,
|
|
|
|
SpenderInputIndex: spendReport.SpendingInputIndex,
|
|
|
|
SpendingHeight: int32(spendReport.SpendingTxHeight),
|
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-10-21 02:30:57 +03:00
|
|
|
// Finally, no matter whether the rescan found a spend in the past or
|
|
|
|
// not, we'll mark our historical rescan as complete to ensure the
|
|
|
|
// outpoint's spend hint gets updated upon connected/disconnected
|
|
|
|
// blocks.
|
2018-12-07 08:14:31 +03:00
|
|
|
err = n.txNotifier.UpdateSpendDetails(spendRequest, spendDetails)
|
2018-08-15 03:55:29 +03:00
|
|
|
if err != nil {
|
2018-10-21 02:30:57 +03:00
|
|
|
return nil, err
|
2018-08-15 03:55:29 +03:00
|
|
|
}
|
|
|
|
|
2018-10-21 02:30:57 +03:00
|
|
|
return ntfn.Event, nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:22 +03:00
|
|
|
// RegisterConfirmationsNtfn registers an intent to be notified once the target
|
|
|
|
// txid/output script has reached numConfs confirmations on-chain. When
|
|
|
|
// intending to be notified of the confirmation of an output script, a nil txid
|
|
|
|
// must be used. The heightHint should represent the earliest height at which
|
|
|
|
// the txid/output script could have been included in the chain.
|
|
|
|
//
|
|
|
|
// Progress on the number of confirmations left can be read from the 'Updates'
|
|
|
|
// channel. Once it has reached all of its confirmations, a notification will be
|
|
|
|
// sent across the 'Confirmed' channel.
|
2017-05-24 04:13:45 +03:00
|
|
|
func (n *NeutrinoNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
|
2018-05-31 08:07:17 +03:00
|
|
|
pkScript []byte,
|
2017-05-24 04:13:45 +03:00
|
|
|
numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, error) {
|
|
|
|
|
2018-08-15 03:54:21 +03:00
|
|
|
// Construct a notification request for the transaction and send it to
|
|
|
|
// the main event loop.
|
2018-12-11 05:27:25 +03:00
|
|
|
confID := atomic.AddUint64(&n.confClientCounter, 1)
|
2018-12-07 08:14:22 +03:00
|
|
|
confRequest, err := chainntnfs.NewConfRequest(txid, pkScript)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-08-27 07:37:10 +03:00
|
|
|
ntfn := &chainntnfs.ConfNtfn{
|
2018-12-11 05:27:25 +03:00
|
|
|
ConfID: confID,
|
2018-12-07 08:14:22 +03:00
|
|
|
ConfRequest: confRequest,
|
2018-08-27 07:37:10 +03:00
|
|
|
NumConfirmations: numConfs,
|
2018-12-11 05:27:25 +03:00
|
|
|
Event: chainntnfs.NewConfirmationEvent(numConfs, func() {
|
|
|
|
n.txNotifier.CancelConf(confRequest, confID)
|
|
|
|
}),
|
|
|
|
HeightHint: heightHint,
|
2018-08-27 07:37:10 +03:00
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:22 +03:00
|
|
|
chainntnfs.Log.Infof("New confirmation subscription: %v, num_confs=%v",
|
|
|
|
confRequest, numConfs)
|
2018-08-27 07:37:10 +03:00
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
// Register the conf notification with the TxNotifier. A non-nil value
|
2018-08-27 07:37:10 +03:00
|
|
|
// for `dispatch` will be returned if we are required to perform a
|
|
|
|
// manual scan for the confirmation. Otherwise the notifier will begin
|
|
|
|
// watching at tip for the transaction to confirm.
|
2018-12-07 08:14:22 +03:00
|
|
|
dispatch, txNotifierTip, err := n.txNotifier.RegisterConf(ntfn)
|
2018-08-27 07:37:10 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2018-10-21 02:30:57 +03:00
|
|
|
// To determine whether this transaction has confirmed on-chain, we'll
|
|
|
|
// update our filter to watch for the transaction at tip and we'll also
|
|
|
|
// dispatch a historical rescan to determine if it has confirmed in the
|
|
|
|
// past.
|
|
|
|
//
|
|
|
|
// We'll update our filter first to ensure we can immediately detect the
|
|
|
|
// confirmation at tip. To do so, we'll map the script into an address
|
|
|
|
// type so we can instruct neutrino to match if the transaction
|
|
|
|
// containing the script is found in a block.
|
|
|
|
params := n.p2pNode.ChainParams()
|
2018-12-07 08:14:22 +03:00
|
|
|
_, addrs, _, err := txscript.ExtractPkScriptAddrs(
|
|
|
|
confRequest.PkScript.Script(), ¶ms,
|
|
|
|
)
|
2018-10-21 02:30:57 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to extract script: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll send the filter update request to the notifier's main event
|
|
|
|
// handler and wait for its response.
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
select {
|
|
|
|
case n.notificationRegistry <- &rescanFilterUpdate{
|
|
|
|
updateOptions: []neutrino.UpdateOption{
|
|
|
|
neutrino.AddAddrs(addrs...),
|
2018-12-07 08:14:22 +03:00
|
|
|
neutrino.Rewind(txNotifierTip),
|
2018-10-21 02:30:57 +03:00
|
|
|
neutrino.DisableDisconnectedNtfns(true),
|
|
|
|
},
|
|
|
|
errChan: errChan,
|
|
|
|
}:
|
|
|
|
case <-n.quit:
|
2018-12-11 05:25:41 +03:00
|
|
|
return nil, chainntnfs.ErrChainNotifierShuttingDown
|
2018-10-21 02:30:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err = <-errChan:
|
|
|
|
case <-n.quit:
|
2018-12-11 05:25:41 +03:00
|
|
|
return nil, chainntnfs.ErrChainNotifierShuttingDown
|
2018-10-21 02:30:57 +03:00
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to update filter: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-12-07 08:14:22 +03:00
|
|
|
// If a historical rescan was not requested by the txNotifier, then we
|
|
|
|
// can return to the caller.
|
|
|
|
if dispatch == nil {
|
|
|
|
return ntfn.Event, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, with the filter updated, we can dispatch the historical
|
2018-10-21 02:30:57 +03:00
|
|
|
// rescan to ensure we can detect if the event happened in the past.
|
2017-05-24 04:13:45 +03:00
|
|
|
select {
|
2018-08-27 07:37:10 +03:00
|
|
|
case n.notificationRegistry <- dispatch:
|
2018-07-27 07:32:55 +03:00
|
|
|
case <-n.quit:
|
2018-12-11 05:25:41 +03:00
|
|
|
return nil, chainntnfs.ErrChainNotifierShuttingDown
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
2018-10-21 02:30:57 +03:00
|
|
|
|
|
|
|
return ntfn.Event, nil
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// blockEpochRegistration represents a client's intent to receive a
|
|
|
|
// notification with each newly connected block.
|
|
|
|
type blockEpochRegistration struct {
|
2017-06-08 03:05:16 +03:00
|
|
|
epochID uint64
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
epochChan chan *chainntnfs.BlockEpoch
|
|
|
|
|
2018-10-12 18:08:14 +03:00
|
|
|
epochQueue *queue.ConcurrentQueue
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
cancelChan chan struct{}
|
|
|
|
|
2018-08-09 10:05:27 +03:00
|
|
|
bestBlock *chainntnfs.BlockEpoch
|
|
|
|
|
|
|
|
errorChan chan error
|
|
|
|
|
2017-06-08 03:05:16 +03:00
|
|
|
wg sync.WaitGroup
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// epochCancel is a message sent to the NeutrinoNotifier when a client wishes
|
|
|
|
// to cancel an outstanding epoch notification that has yet to be dispatched.
|
|
|
|
type epochCancel struct {
|
|
|
|
epochID uint64
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:27 +03:00
|
|
|
// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the
|
|
|
|
// caller to receive notifications, of each new block connected to the main
|
|
|
|
// chain. Clients have the option of passing in their best known block, which
|
2018-12-11 05:29:28 +03:00
|
|
|
// the notifier uses to check if they are behind on blocks and catch them up. If
|
|
|
|
// they do not provide one, then a notification will be dispatched immediately
|
|
|
|
// for the current tip of the chain upon a successful registration.
|
2018-08-09 10:05:27 +03:00
|
|
|
func (n *NeutrinoNotifier) RegisterBlockEpochNtfn(
|
|
|
|
bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg := &blockEpochRegistration{
|
2018-10-12 18:08:14 +03:00
|
|
|
epochQueue: queue.NewConcurrentQueue(20),
|
2017-05-24 04:13:45 +03:00
|
|
|
epochChan: make(chan *chainntnfs.BlockEpoch, 20),
|
|
|
|
cancelChan: make(chan struct{}),
|
|
|
|
epochID: atomic.AddUint64(&n.epochClientCounter, 1),
|
2018-08-09 10:05:27 +03:00
|
|
|
bestBlock: bestBlock,
|
|
|
|
errorChan: make(chan error, 1),
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg.epochQueue.Start()
|
|
|
|
|
|
|
|
// Before we send the request to the main goroutine, we'll launch a new
|
|
|
|
// goroutine to proxy items added to our queue to the client itself.
|
|
|
|
// This ensures that all notifications are received *in order*.
|
|
|
|
reg.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer reg.wg.Done()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ntfn := <-reg.epochQueue.ChanOut():
|
|
|
|
blockNtfn := ntfn.(*chainntnfs.BlockEpoch)
|
|
|
|
select {
|
|
|
|
case reg.epochChan <- blockNtfn:
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-n.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-n.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2017-05-24 04:13:45 +03:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-n.quit:
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// As we're exiting before the registration could be sent,
|
|
|
|
// we'll stop the queue now ourselves.
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
2017-05-24 04:13:45 +03:00
|
|
|
return nil, errors.New("chainntnfs: system interrupt while " +
|
|
|
|
"attempting to register for block epoch notification.")
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case n.notificationRegistry <- reg:
|
2017-05-24 04:13:45 +03:00
|
|
|
return &chainntnfs.BlockEpochEvent{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
Epochs: reg.epochChan,
|
2017-05-24 04:13:45 +03:00
|
|
|
Cancel: func() {
|
|
|
|
cancel := &epochCancel{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
epochID: reg.epochID,
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
|
2017-07-30 05:19:28 +03:00
|
|
|
// Submit epoch cancellation to notification dispatcher.
|
2017-05-24 04:13:45 +03:00
|
|
|
select {
|
|
|
|
case n.notificationCancels <- cancel:
|
2017-07-30 06:28:48 +03:00
|
|
|
// Cancellation is being handled, drain the epoch channel until it is
|
|
|
|
// closed before yielding to caller.
|
|
|
|
for {
|
|
|
|
select {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case _, ok := <-reg.epochChan:
|
2017-07-30 06:28:48 +03:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-n.quit:
|
2017-08-02 03:14:01 +03:00
|
|
|
return
|
2017-07-30 06:28:48 +03:00
|
|
|
}
|
2017-05-24 04:13:45 +03:00
|
|
|
}
|
|
|
|
case <-n.quit:
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
}
|
2018-08-09 10:05:28 +03:00
|
|
|
|
|
|
|
// NeutrinoChainConn is a wrapper around neutrino's chain backend in order
|
|
|
|
// to satisfy the chainntnfs.ChainConn interface.
|
|
|
|
type NeutrinoChainConn struct {
|
|
|
|
p2pNode *neutrino.ChainService
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBlockHeader returns the block header for a hash.
|
|
|
|
func (n *NeutrinoChainConn) GetBlockHeader(blockHash *chainhash.Hash) (*wire.BlockHeader, error) {
|
2018-09-10 13:45:59 +03:00
|
|
|
return n.p2pNode.GetBlockHeader(blockHash)
|
2018-08-09 10:05:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetBlockHeaderVerbose returns a verbose block header result for a hash. This
|
|
|
|
// result only contains the height with a nil hash.
|
|
|
|
func (n *NeutrinoChainConn) GetBlockHeaderVerbose(blockHash *chainhash.Hash) (
|
|
|
|
*btcjson.GetBlockHeaderVerboseResult, error) {
|
|
|
|
|
2018-09-10 13:45:59 +03:00
|
|
|
height, err := n.p2pNode.GetBlockHeight(blockHash)
|
2018-08-09 10:05:28 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// Since only the height is used from the result, leave the hash nil.
|
|
|
|
return &btcjson.GetBlockHeaderVerboseResult{Height: int32(height)}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBlockHash returns the hash from a block height.
|
|
|
|
func (n *NeutrinoChainConn) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) {
|
2018-09-10 13:45:59 +03:00
|
|
|
return n.p2pNode.GetBlockHash(blockHeight)
|
2018-08-09 10:05:28 +03:00
|
|
|
}
|