2017-11-10 03:30:20 +03:00
|
|
|
package bitcoindnotify
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
2018-06-05 04:34:16 +03:00
|
|
|
"github.com/btcsuite/btcd/btcjson"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
|
|
|
"github.com/btcsuite/btcwallet/chain"
|
|
|
|
"github.com/btcsuite/btcwallet/wtxmgr"
|
2018-07-17 10:13:06 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2017-11-10 03:30:20 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// notifierType uniquely identifies this concrete implementation of the
|
|
|
|
// ChainNotifier interface.
|
|
|
|
notifierType = "bitcoind"
|
|
|
|
|
|
|
|
// reorgSafetyLimit is assumed maximum depth of a chain reorganization.
|
|
|
|
// After this many confirmation, transaction confirmation info will be
|
|
|
|
// pruned.
|
|
|
|
reorgSafetyLimit = 100
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
// ErrChainNotifierShuttingDown is used when we are trying to
|
|
|
|
// measure a spend notification when notifier is already stopped.
|
|
|
|
ErrChainNotifierShuttingDown = errors.New("chainntnfs: system interrupt " +
|
|
|
|
"while attempting to register for spend notification.")
|
2018-07-27 07:33:32 +03:00
|
|
|
|
|
|
|
// ErrTransactionNotFound is an error returned when we attempt to find a
|
|
|
|
// transaction by manually scanning the chain within a specific range
|
|
|
|
// but it is not found.
|
|
|
|
ErrTransactionNotFound = errors.New("transaction not found within range")
|
2017-11-10 03:30:20 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// chainUpdate encapsulates an update to the current main chain. This struct is
|
|
|
|
// used as an element within an unbounded queue in order to avoid blocking the
|
|
|
|
// main rpc dispatch rule.
|
|
|
|
type chainUpdate struct {
|
|
|
|
blockHash *chainhash.Hash
|
|
|
|
blockHeight int32
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(roasbeef): generalize struct below:
|
2018-02-26 21:37:58 +03:00
|
|
|
// * move chans to config
|
|
|
|
// * extract common code
|
|
|
|
// * allow outside callers to handle send conditions
|
2017-11-10 03:30:20 +03:00
|
|
|
|
|
|
|
// BitcoindNotifier implements the ChainNotifier interface using a bitcoind
|
|
|
|
// chain client. Multiple concurrent clients are supported. All notifications
|
|
|
|
// are achieved via non-blocking sends on client channels.
|
|
|
|
type BitcoindNotifier struct {
|
2018-07-27 07:30:15 +03:00
|
|
|
confClientCounter uint64 // To be used atomically.
|
2017-11-10 03:30:20 +03:00
|
|
|
spendClientCounter uint64 // To be used atomically.
|
|
|
|
epochClientCounter uint64 // To be used atomically.
|
|
|
|
|
|
|
|
started int32 // To be used atomically.
|
|
|
|
stopped int32 // To be used atomically.
|
|
|
|
|
|
|
|
chainConn *chain.BitcoindClient
|
|
|
|
|
|
|
|
notificationCancels chan interface{}
|
|
|
|
notificationRegistry chan interface{}
|
|
|
|
|
|
|
|
spendNotifications map[wire.OutPoint]map[uint64]*spendNotification
|
|
|
|
|
|
|
|
txConfNotifier *chainntnfs.TxConfNotifier
|
|
|
|
|
|
|
|
blockEpochClients map[uint64]*blockEpochRegistration
|
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
bestBlock chainntnfs.BlockEpoch
|
|
|
|
|
2018-08-15 03:53:34 +03:00
|
|
|
// spendHintCache is a cache used to query and update the latest height
|
|
|
|
// hints for an outpoint. Each height hint represents the earliest
|
|
|
|
// height at which the outpoint could have been spent within the chain.
|
|
|
|
spendHintCache chainntnfs.SpendHintCache
|
|
|
|
|
|
|
|
// confirmHintCache is a cache used to query the latest height hints for
|
|
|
|
// a transaction. Each height hint represents the earliest height at
|
|
|
|
// which the transaction could have confirmed within the chain.
|
|
|
|
confirmHintCache chainntnfs.ConfirmHintCache
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
wg sync.WaitGroup
|
|
|
|
quit chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure BitcoindNotifier implements the ChainNotifier interface at compile
|
|
|
|
// time.
|
|
|
|
var _ chainntnfs.ChainNotifier = (*BitcoindNotifier)(nil)
|
|
|
|
|
|
|
|
// New returns a new BitcoindNotifier instance. This function assumes the
|
|
|
|
// bitcoind node detailed in the passed configuration is already running, and
|
|
|
|
// willing to accept RPC requests and new zmq clients.
|
2018-08-15 03:53:34 +03:00
|
|
|
func New(chainConn *chain.BitcoindConn, spendHintCache chainntnfs.SpendHintCache,
|
|
|
|
confirmHintCache chainntnfs.ConfirmHintCache) *BitcoindNotifier {
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
notifier := &BitcoindNotifier{
|
|
|
|
notificationCancels: make(chan interface{}),
|
|
|
|
notificationRegistry: make(chan interface{}),
|
|
|
|
|
|
|
|
blockEpochClients: make(map[uint64]*blockEpochRegistration),
|
|
|
|
|
|
|
|
spendNotifications: make(map[wire.OutPoint]map[uint64]*spendNotification),
|
|
|
|
|
2018-08-15 03:53:34 +03:00
|
|
|
spendHintCache: spendHintCache,
|
|
|
|
confirmHintCache: confirmHintCache,
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
quit: make(chan struct{}),
|
|
|
|
}
|
|
|
|
|
2018-07-17 02:50:47 +03:00
|
|
|
notifier.chainConn = chainConn.NewBitcoindClient(time.Unix(0, 0))
|
2017-11-10 03:30:20 +03:00
|
|
|
|
2018-07-17 02:50:47 +03:00
|
|
|
return notifier
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Start connects to the running bitcoind node over websockets, registers for
|
|
|
|
// block notifications, and finally launches all related helper goroutines.
|
|
|
|
func (b *BitcoindNotifier) Start() error {
|
|
|
|
// Already started?
|
|
|
|
if atomic.AddInt32(&b.started, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Connect to bitcoind, and register for notifications on connected,
|
|
|
|
// and disconnected blocks.
|
|
|
|
if err := b.chainConn.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := b.chainConn.NotifyBlocks(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
currentHash, currentHeight, err := b.chainConn.GetBestBlock()
|
2017-11-10 03:30:20 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
b.txConfNotifier = chainntnfs.NewTxConfNotifier(
|
2018-08-15 03:53:34 +03:00
|
|
|
uint32(currentHeight), reorgSafetyLimit, b.confirmHintCache,
|
|
|
|
)
|
2017-11-10 03:30:20 +03:00
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
b.bestBlock = chainntnfs.BlockEpoch{
|
|
|
|
Height: currentHeight,
|
|
|
|
Hash: currentHash,
|
|
|
|
}
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
b.wg.Add(1)
|
2018-08-09 10:05:28 +03:00
|
|
|
go b.notificationDispatcher()
|
2017-11-10 03:30:20 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stop shutsdown the BitcoindNotifier.
|
|
|
|
func (b *BitcoindNotifier) Stop() error {
|
|
|
|
// Already shutting down?
|
|
|
|
if atomic.AddInt32(&b.stopped, 1) != 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown the rpc client, this gracefully disconnects from bitcoind,
|
|
|
|
// and cleans up all related resources.
|
|
|
|
b.chainConn.Stop()
|
|
|
|
|
|
|
|
close(b.quit)
|
|
|
|
b.wg.Wait()
|
|
|
|
|
|
|
|
// Notify all pending clients of our shutdown by closing the related
|
|
|
|
// notification channels.
|
|
|
|
for _, spendClients := range b.spendNotifications {
|
|
|
|
for _, spendClient := range spendClients {
|
|
|
|
close(spendClient.spendChan)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, epochClient := range b.blockEpochClients {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
close(epochClient.cancelChan)
|
|
|
|
epochClient.wg.Wait()
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
close(epochClient.epochChan)
|
|
|
|
}
|
|
|
|
b.txConfNotifier.TearDown()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockNtfn packages a notification of a connected/disconnected block along
|
|
|
|
// with its height at the time.
|
|
|
|
type blockNtfn struct {
|
|
|
|
sha *chainhash.Hash
|
|
|
|
height int32
|
|
|
|
}
|
|
|
|
|
|
|
|
// notificationDispatcher is the primary goroutine which handles client
|
|
|
|
// notification registrations, as well as notification dispatches.
|
2018-08-09 10:05:28 +03:00
|
|
|
func (b *BitcoindNotifier) notificationDispatcher() {
|
2017-11-10 03:30:20 +03:00
|
|
|
out:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case cancelMsg := <-b.notificationCancels:
|
|
|
|
switch msg := cancelMsg.(type) {
|
|
|
|
case *spendCancel:
|
|
|
|
chainntnfs.Log.Infof("Cancelling spend "+
|
|
|
|
"notification for out_point=%v, "+
|
|
|
|
"spend_id=%v", msg.op, msg.spendID)
|
|
|
|
|
|
|
|
// Before we attempt to close the spendChan,
|
|
|
|
// ensure that the notification hasn't already
|
|
|
|
// yet been dispatched.
|
|
|
|
if outPointClients, ok := b.spendNotifications[msg.op]; ok {
|
|
|
|
close(outPointClients[msg.spendID].spendChan)
|
|
|
|
delete(b.spendNotifications[msg.op], msg.spendID)
|
|
|
|
}
|
|
|
|
|
|
|
|
case *epochCancel:
|
|
|
|
chainntnfs.Log.Infof("Cancelling epoch "+
|
|
|
|
"notification, epoch_id=%v", msg.epochID)
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// First, we'll lookup the original
|
|
|
|
// registration in order to stop the active
|
|
|
|
// queue goroutine.
|
|
|
|
reg := b.blockEpochClients[msg.epochID]
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
|
|
|
// Next, close the cancel channel for this
|
2017-11-10 03:30:20 +03:00
|
|
|
// specific client, and wait for the client to
|
|
|
|
// exit.
|
|
|
|
close(b.blockEpochClients[msg.epochID].cancelChan)
|
|
|
|
b.blockEpochClients[msg.epochID].wg.Wait()
|
|
|
|
|
|
|
|
// Once the client has exited, we can then
|
|
|
|
// safely close the channel used to send epoch
|
|
|
|
// notifications, in order to notify any
|
|
|
|
// listeners that the intent has been
|
|
|
|
// cancelled.
|
|
|
|
close(b.blockEpochClients[msg.epochID].epochChan)
|
|
|
|
delete(b.blockEpochClients, msg.epochID)
|
|
|
|
|
|
|
|
}
|
|
|
|
case registerMsg := <-b.notificationRegistry:
|
|
|
|
switch msg := registerMsg.(type) {
|
|
|
|
case *spendNotification:
|
|
|
|
chainntnfs.Log.Infof("New spend subscription: "+
|
|
|
|
"utxo=%v", msg.targetOutpoint)
|
|
|
|
op := *msg.targetOutpoint
|
|
|
|
|
|
|
|
if _, ok := b.spendNotifications[op]; !ok {
|
|
|
|
b.spendNotifications[op] = make(map[uint64]*spendNotification)
|
|
|
|
}
|
|
|
|
b.spendNotifications[op][msg.spendID] = msg
|
2018-07-27 07:33:32 +03:00
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
case *confirmationNotification:
|
|
|
|
chainntnfs.Log.Infof("New confirmation "+
|
2017-11-10 03:30:20 +03:00
|
|
|
"subscription: txid=%v, numconfs=%v",
|
|
|
|
msg.TxID, msg.NumConfirmations)
|
|
|
|
|
2018-08-09 10:05:28 +03:00
|
|
|
currentHeight := uint32(b.bestBlock.Height)
|
2018-07-27 07:32:55 +03:00
|
|
|
|
|
|
|
// Look up whether the transaction is already
|
|
|
|
// included in the active chain. We'll do this
|
|
|
|
// in a goroutine to prevent blocking
|
|
|
|
// potentially long rescans.
|
|
|
|
b.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer b.wg.Done()
|
|
|
|
|
|
|
|
confDetails, err := b.historicalConfDetails(
|
|
|
|
msg.TxID, msg.heightHint,
|
|
|
|
currentHeight,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
return
|
|
|
|
}
|
2018-02-26 21:37:58 +03:00
|
|
|
|
2018-07-27 07:32:55 +03:00
|
|
|
if confDetails != nil {
|
|
|
|
err := b.txConfNotifier.UpdateConfDetails(
|
|
|
|
*msg.TxID, msg.ConfID,
|
|
|
|
confDetails,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2018-02-26 21:37:58 +03:00
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
case *blockEpochRegistration:
|
|
|
|
chainntnfs.Log.Infof("New block epoch subscription")
|
|
|
|
b.blockEpochClients[msg.epochID] = msg
|
2018-08-09 10:05:29 +03:00
|
|
|
if msg.bestBlock != nil {
|
|
|
|
missedBlocks, err :=
|
|
|
|
chainntnfs.GetClientMissedBlocks(
|
|
|
|
b.chainConn, msg.bestBlock,
|
|
|
|
b.bestBlock.Height, true,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
msg.errorChan <- err
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for _, block := range missedBlocks {
|
|
|
|
b.notifyBlockEpochClient(msg,
|
|
|
|
block.Height, block.Hash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
msg.errorChan <- nil
|
2018-07-27 07:32:55 +03:00
|
|
|
|
2018-04-07 04:25:41 +03:00
|
|
|
case chain.RelevantTx:
|
2018-08-09 10:05:28 +03:00
|
|
|
b.handleRelevantTx(msg, b.bestBlock.Height)
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
case ntfn := <-b.chainConn.Notifications():
|
|
|
|
switch item := ntfn.(type) {
|
|
|
|
case chain.BlockConnected:
|
2018-08-09 10:05:30 +03:00
|
|
|
blockHeader, err :=
|
|
|
|
b.chainConn.GetBlockHeader(&item.Hash)
|
2017-11-10 03:30:20 +03:00
|
|
|
if err != nil {
|
2018-08-09 10:05:30 +03:00
|
|
|
chainntnfs.Log.Errorf("Unable to fetch "+
|
|
|
|
"block header: %v", err)
|
2017-11-10 03:30:20 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
if blockHeader.PrevBlock != *b.bestBlock.Hash {
|
|
|
|
// Handle the case where the notifier
|
|
|
|
// missed some blocks from its chain
|
|
|
|
// backend.
|
|
|
|
chainntnfs.Log.Infof("Missed blocks, " +
|
|
|
|
"attempting to catch up")
|
|
|
|
newBestBlock, missedBlocks, err :=
|
|
|
|
chainntnfs.HandleMissedBlocks(
|
|
|
|
b.chainConn,
|
|
|
|
b.txConfNotifier,
|
|
|
|
b.bestBlock, item.Height,
|
|
|
|
true,
|
|
|
|
)
|
2017-11-10 03:30:20 +03:00
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
if err != nil {
|
|
|
|
// Set the bestBlock here in case
|
|
|
|
// a catch up partially completed.
|
|
|
|
b.bestBlock = newBestBlock
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
continue
|
|
|
|
}
|
2017-11-10 03:30:20 +03:00
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
for _, block := range missedBlocks {
|
|
|
|
err := b.handleBlockConnected(block)
|
|
|
|
if err != nil {
|
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
continue out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
newBlock := chainntnfs.BlockEpoch{
|
|
|
|
Height: item.Height,
|
|
|
|
Hash: &item.Hash,
|
|
|
|
}
|
|
|
|
if err := b.handleBlockConnected(newBlock); err != nil {
|
2017-11-10 03:30:20 +03:00
|
|
|
chainntnfs.Log.Error(err)
|
|
|
|
}
|
2018-08-09 10:05:30 +03:00
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
continue
|
|
|
|
|
|
|
|
case chain.BlockDisconnected:
|
2018-08-09 10:05:28 +03:00
|
|
|
if item.Height != b.bestBlock.Height {
|
2018-08-09 10:05:29 +03:00
|
|
|
chainntnfs.Log.Infof("Missed disconnected" +
|
|
|
|
"blocks, attempting to catch up")
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
newBestBlock, err := chainntnfs.RewindChain(
|
|
|
|
b.chainConn, b.txConfNotifier,
|
|
|
|
b.bestBlock, item.Height-1,
|
|
|
|
)
|
2017-11-10 03:30:20 +03:00
|
|
|
if err != nil {
|
2018-08-09 10:05:29 +03:00
|
|
|
chainntnfs.Log.Errorf("Unable to rewind chain "+
|
|
|
|
"from height %d to height %d: %v",
|
|
|
|
b.bestBlock.Height, item.Height-1, err)
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// Set the bestBlock here in case a chain
|
|
|
|
// rewind partially completed.
|
|
|
|
b.bestBlock = newBestBlock
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
case chain.RelevantTx:
|
2018-08-09 10:05:28 +03:00
|
|
|
b.handleRelevantTx(item, b.bestBlock.Height)
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
break out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
b.wg.Done()
|
|
|
|
}
|
|
|
|
|
2018-04-07 04:25:41 +03:00
|
|
|
// handleRelevantTx notifies any clients of a relevant transaction.
|
|
|
|
func (b *BitcoindNotifier) handleRelevantTx(tx chain.RelevantTx, bestHeight int32) {
|
|
|
|
msgTx := tx.TxRecord.MsgTx
|
2018-07-17 12:18:18 +03:00
|
|
|
|
|
|
|
// We only care about notifying on confirmed spends, so in case this is
|
|
|
|
// a mempool spend, we can continue, and wait for the spend to appear
|
|
|
|
// in chain.
|
|
|
|
if tx.Block == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-04-07 04:25:41 +03:00
|
|
|
// First, check if this transaction spends an output
|
|
|
|
// that has an existing spend notification for it.
|
|
|
|
for i, txIn := range msgTx.TxIn {
|
|
|
|
prevOut := txIn.PreviousOutPoint
|
|
|
|
|
|
|
|
// If this transaction indeed does spend an
|
|
|
|
// output which we have a registered
|
|
|
|
// notification for, then create a spend
|
|
|
|
// summary, finally sending off the details to
|
|
|
|
// the notification subscriber.
|
|
|
|
if clients, ok := b.spendNotifications[prevOut]; ok {
|
|
|
|
spenderSha := msgTx.TxHash()
|
|
|
|
spendDetails := &chainntnfs.SpendDetail{
|
|
|
|
SpentOutPoint: &prevOut,
|
|
|
|
SpenderTxHash: &spenderSha,
|
|
|
|
SpendingTx: &msgTx,
|
|
|
|
SpenderInputIndex: uint32(i),
|
|
|
|
}
|
2018-07-17 12:18:18 +03:00
|
|
|
spendDetails.SpendingHeight = tx.Block.Height
|
2018-04-07 04:25:41 +03:00
|
|
|
|
2018-07-17 12:18:18 +03:00
|
|
|
for _, ntfn := range clients {
|
|
|
|
chainntnfs.Log.Infof("Dispatching confirmed "+
|
|
|
|
"spend notification for outpoint=%v "+
|
|
|
|
"at height %v", ntfn.targetOutpoint,
|
2018-07-13 13:35:29 +03:00
|
|
|
spendDetails.SpendingHeight)
|
2018-04-07 04:25:41 +03:00
|
|
|
ntfn.spendChan <- spendDetails
|
|
|
|
|
2018-07-17 12:18:18 +03:00
|
|
|
// Close spendChan to ensure that any calls to
|
|
|
|
// Cancel will not block. This is safe to do
|
|
|
|
// since the channel is buffered, and the
|
2018-04-07 04:25:41 +03:00
|
|
|
// message can still be read by the receiver.
|
|
|
|
close(ntfn.spendChan)
|
|
|
|
}
|
|
|
|
delete(b.spendNotifications, prevOut)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
// historicalConfDetails looks up whether a transaction is already included in a
|
|
|
|
// block in the active chain and, if so, returns details about the confirmation.
|
|
|
|
func (b *BitcoindNotifier) historicalConfDetails(txid *chainhash.Hash,
|
2018-02-26 21:37:58 +03:00
|
|
|
heightHint, currentHeight uint32) (*chainntnfs.TxConfirmation, error) {
|
|
|
|
|
|
|
|
// First, we'll attempt to retrieve the transaction details using the
|
|
|
|
// backend node's transaction index.
|
|
|
|
txConf, err := b.confDetailsFromTxIndex(txid)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if txConf != nil {
|
|
|
|
return txConf, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the backend node's transaction index is not enabled, then we'll
|
|
|
|
// fall back to manually scanning the chain's blocks, looking for the
|
|
|
|
// block where the transaction was included in.
|
|
|
|
return b.confDetailsManually(txid, heightHint, currentHeight)
|
|
|
|
}
|
|
|
|
|
|
|
|
// confDetailsFromTxIndex looks up whether a transaction is already included
|
|
|
|
// in a block in the active chain by using the backend node's transaction index.
|
|
|
|
// If the transaction is found, its confirmation details are returned.
|
|
|
|
// Otherwise, nil is returned.
|
|
|
|
func (b *BitcoindNotifier) confDetailsFromTxIndex(txid *chainhash.Hash,
|
2017-11-10 03:30:20 +03:00
|
|
|
) (*chainntnfs.TxConfirmation, error) {
|
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
// If the transaction has some or all of its confirmations required,
|
2017-11-10 03:30:20 +03:00
|
|
|
// then we may be able to dispatch it immediately.
|
|
|
|
tx, err := b.chainConn.GetRawTransactionVerbose(txid)
|
2018-02-26 21:37:58 +03:00
|
|
|
if err != nil {
|
|
|
|
// Avoid returning an error if the transaction index is not
|
|
|
|
// enabled to proceed with fallback methods.
|
|
|
|
jsonErr, ok := err.(*btcjson.RPCError)
|
|
|
|
if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo {
|
|
|
|
return nil, fmt.Errorf("unable to query for txid "+
|
|
|
|
"%v: %v", txid, err)
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
2018-02-26 21:37:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure we actually retrieved a transaction that is included in a
|
|
|
|
// block. Without this, we won't be able to retrieve its confirmation
|
|
|
|
// details.
|
|
|
|
if tx == nil || tx.BlockHash == "" {
|
|
|
|
return nil, nil
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// As we need to fully populate the returned TxConfirmation struct,
|
|
|
|
// grab the block in which the transaction was confirmed so we can
|
|
|
|
// locate its exact index within the block.
|
|
|
|
blockHash, err := chainhash.NewHashFromStr(tx.BlockHash)
|
|
|
|
if err != nil {
|
2018-02-26 21:37:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to get block hash %v for "+
|
|
|
|
"historical dispatch: %v", tx.BlockHash, err)
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
2018-02-26 21:37:58 +03:00
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
block, err := b.chainConn.GetBlockVerbose(blockHash)
|
|
|
|
if err != nil {
|
2018-02-26 21:37:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to get block with hash %v for "+
|
|
|
|
"historical dispatch: %v", blockHash, err)
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
// If the block was obtained, locate the transaction's index within the
|
2017-11-10 03:30:20 +03:00
|
|
|
// block so we can give the subscriber full confirmation details.
|
|
|
|
targetTxidStr := txid.String()
|
2018-02-26 21:37:58 +03:00
|
|
|
for txIndex, txHash := range block.Tx {
|
2017-11-10 03:30:20 +03:00
|
|
|
if txHash == targetTxidStr {
|
2018-02-26 21:37:58 +03:00
|
|
|
return &chainntnfs.TxConfirmation{
|
|
|
|
BlockHash: blockHash,
|
|
|
|
BlockHeight: uint32(block.Height),
|
|
|
|
TxIndex: uint32(txIndex),
|
|
|
|
}, nil
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
// We return an error because we should have found the transaction
|
|
|
|
// within the block, but didn't.
|
|
|
|
return nil, fmt.Errorf("unable to locate tx %v in block %v", txid,
|
|
|
|
blockHash)
|
|
|
|
}
|
2017-11-10 03:30:20 +03:00
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
// confDetailsManually looks up whether a transaction is already included in a
|
|
|
|
// block in the active chain by scanning the chain's blocks, starting from the
|
|
|
|
// earliest height the transaction could have been included in, to the current
|
|
|
|
// height in the chain. If the transaction is found, its confirmation details
|
|
|
|
// are returned. Otherwise, nil is returned.
|
|
|
|
func (b *BitcoindNotifier) confDetailsManually(txid *chainhash.Hash,
|
|
|
|
heightHint, currentHeight uint32) (*chainntnfs.TxConfirmation, error) {
|
|
|
|
|
|
|
|
targetTxidStr := txid.String()
|
|
|
|
|
|
|
|
// Begin scanning blocks at every height to determine where the
|
|
|
|
// transaction was included in.
|
|
|
|
for height := heightHint; height <= currentHeight; height++ {
|
2018-07-27 07:32:55 +03:00
|
|
|
// Ensure we haven't been requested to shut down before
|
|
|
|
// processing the next height.
|
|
|
|
select {
|
|
|
|
case <-b.quit:
|
|
|
|
return nil, ErrChainNotifierShuttingDown
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
blockHash, err := b.chainConn.GetBlockHash(int64(height))
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to get hash from block "+
|
|
|
|
"with height %d", height)
|
|
|
|
}
|
|
|
|
|
|
|
|
block, err := b.chainConn.GetBlockVerbose(blockHash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to get block with hash "+
|
|
|
|
"%v: %v", blockHash, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for txIndex, txHash := range block.Tx {
|
|
|
|
// If we're able to find the transaction in this block,
|
|
|
|
// return its confirmation details.
|
|
|
|
if txHash == targetTxidStr {
|
|
|
|
return &chainntnfs.TxConfirmation{
|
|
|
|
BlockHash: blockHash,
|
|
|
|
BlockHeight: height,
|
|
|
|
TxIndex: uint32(txIndex),
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
}
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
2018-02-26 21:37:58 +03:00
|
|
|
|
|
|
|
// If we reach here, then we were not able to find the transaction
|
|
|
|
// within a block, so we avoid returning an error.
|
|
|
|
return nil, nil
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// handleBlockConnected applies a chain update for a new block. Any watched
|
|
|
|
// transactions included this block will processed to either send notifications
|
|
|
|
// now or after numConfirmations confs.
|
|
|
|
func (b *BitcoindNotifier) handleBlockConnected(block chainntnfs.BlockEpoch) error {
|
|
|
|
rawBlock, err := b.chainConn.GetBlock(block.Hash)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to get block: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
txns := btcutil.NewBlock(rawBlock).Transactions()
|
|
|
|
err = b.txConfNotifier.ConnectTip(
|
|
|
|
block.Hash, uint32(block.Height), txns)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("unable to connect tip: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-08-15 03:55:29 +03:00
|
|
|
chainntnfs.Log.Infof("New block: height=%v, sha=%v", block.Height,
|
|
|
|
block.Hash)
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// We want to set the best block before dispatching notifications so
|
|
|
|
// if any subscribers make queries based on their received block epoch,
|
|
|
|
// our state is fully updated in time.
|
|
|
|
b.bestBlock = block
|
|
|
|
|
|
|
|
b.notifyBlockEpochs(block.Height, block.Hash)
|
|
|
|
|
2018-08-15 03:55:29 +03:00
|
|
|
// Finally, we'll update the spend height hint for all of our watched
|
|
|
|
// outpoints that have not been spent yet. This is safe to do as we do
|
|
|
|
// not watch already spent outpoints for spend notifications.
|
|
|
|
ops := make([]wire.OutPoint, 0, len(b.spendNotifications))
|
|
|
|
for op := range b.spendNotifications {
|
|
|
|
ops = append(ops, op)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(ops) > 0 {
|
|
|
|
err := b.spendHintCache.CommitSpendHint(
|
|
|
|
uint32(block.Height), ops...,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
// The error is not fatal, so we should not return an
|
|
|
|
// error to the caller.
|
|
|
|
chainntnfs.Log.Errorf("Unable to update spend hint to "+
|
|
|
|
"%d for %v: %v", block.Height, ops, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
// notifyBlockEpochs notifies all registered block epoch clients of the newly
|
|
|
|
// connected block to the main chain.
|
|
|
|
func (b *BitcoindNotifier) notifyBlockEpochs(newHeight int32, newSha *chainhash.Hash) {
|
2018-08-09 10:05:29 +03:00
|
|
|
for _, client := range b.blockEpochClients {
|
|
|
|
b.notifyBlockEpochClient(client, newHeight, newSha)
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
2018-08-09 10:05:29 +03:00
|
|
|
}
|
2017-11-10 03:30:20 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
// notifyBlockEpochClient sends a registered block epoch client a notification
|
|
|
|
// about a specific block.
|
|
|
|
func (b *BitcoindNotifier) notifyBlockEpochClient(epochClient *blockEpochRegistration,
|
|
|
|
height int32, sha *chainhash.Hash) {
|
2017-11-10 03:30:20 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
epoch := &chainntnfs.BlockEpoch{
|
|
|
|
Height: height,
|
|
|
|
Hash: sha,
|
|
|
|
}
|
2017-11-10 03:30:20 +03:00
|
|
|
|
2018-08-09 10:05:29 +03:00
|
|
|
select {
|
|
|
|
case epochClient.epochQueue.ChanIn() <- epoch:
|
|
|
|
case <-epochClient.cancelChan:
|
|
|
|
case <-b.quit:
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// spendNotification couples a target outpoint along with the channel used for
|
|
|
|
// notifications once a spend of the outpoint has been detected.
|
|
|
|
type spendNotification struct {
|
|
|
|
targetOutpoint *wire.OutPoint
|
|
|
|
|
|
|
|
spendChan chan *chainntnfs.SpendDetail
|
|
|
|
|
|
|
|
spendID uint64
|
2018-02-26 21:37:58 +03:00
|
|
|
|
|
|
|
heightHint uint32
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// spendCancel is a message sent to the BitcoindNotifier when a client wishes
|
|
|
|
// to cancel an outstanding spend notification that has yet to be dispatched.
|
|
|
|
type spendCancel struct {
|
|
|
|
// op is the target outpoint of the notification to be cancelled.
|
|
|
|
op wire.OutPoint
|
|
|
|
|
|
|
|
// spendID the ID of the notification to cancel.
|
|
|
|
spendID uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterSpendNtfn registers an intent to be notified once the target
|
|
|
|
// outpoint has been spent by a transaction on-chain. Once a spend of the target
|
|
|
|
// outpoint has been detected, the details of the spending event will be sent
|
2018-02-26 21:37:58 +03:00
|
|
|
// across the 'Spend' channel. The heightHint should represent the earliest
|
|
|
|
// height in the chain where the transaction could have been spent in.
|
2017-11-10 03:30:20 +03:00
|
|
|
func (b *BitcoindNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
|
2018-07-18 05:02:25 +03:00
|
|
|
pkScript []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
|
2017-11-10 03:30:20 +03:00
|
|
|
|
2018-08-15 03:55:29 +03:00
|
|
|
// Before proceeding to register the notification, we'll query our
|
|
|
|
// height hint cache to determine whether a better one exists.
|
|
|
|
if hint, err := b.spendHintCache.QuerySpendHint(*outpoint); err == nil {
|
|
|
|
if hint > heightHint {
|
|
|
|
chainntnfs.Log.Debugf("Using height hint %d retrieved "+
|
|
|
|
"from cache for %v", hint, outpoint)
|
|
|
|
heightHint = hint
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct a notification request for the outpoint and send it to the
|
|
|
|
// main event loop.
|
2017-11-10 03:30:20 +03:00
|
|
|
ntfn := &spendNotification{
|
|
|
|
targetOutpoint: outpoint,
|
|
|
|
spendChan: make(chan *chainntnfs.SpendDetail, 1),
|
|
|
|
spendID: atomic.AddUint64(&b.spendClientCounter, 1),
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-b.quit:
|
|
|
|
return nil, ErrChainNotifierShuttingDown
|
|
|
|
case b.notificationRegistry <- ntfn:
|
|
|
|
}
|
|
|
|
|
2018-03-28 11:22:41 +03:00
|
|
|
if err := b.chainConn.NotifySpent([]*wire.OutPoint{outpoint}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-02-26 21:37:58 +03:00
|
|
|
// The following conditional checks to ensure that when a spend
|
|
|
|
// notification is registered, the output hasn't already been spent. If
|
|
|
|
// the output is no longer in the UTXO set, the chain will be rescanned
|
|
|
|
// from the point where the output was added. The rescan will dispatch
|
|
|
|
// the notification.
|
|
|
|
txOut, err := b.chainConn.GetTxOut(&outpoint.Hash, outpoint.Index, true)
|
2017-11-10 03:30:20 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-08-15 03:55:29 +03:00
|
|
|
// If the output is unspent, then we'll write it to the cache with the
|
|
|
|
// given height hint. This allows us to increase the height hint as the
|
|
|
|
// chain extends and the output remains unspent.
|
|
|
|
if txOut != nil {
|
|
|
|
err := b.spendHintCache.CommitSpendHint(heightHint, *outpoint)
|
|
|
|
if err != nil {
|
|
|
|
// The error is not fatal, so we should not return an
|
|
|
|
// error to the caller.
|
|
|
|
chainntnfs.Log.Error("Unable to update spend hint to "+
|
|
|
|
"%d for %v: %v", heightHint, *outpoint, err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Otherwise, we'll determine when the output was spent.
|
|
|
|
//
|
2018-02-26 21:37:58 +03:00
|
|
|
// First, we'll attempt to retrieve the transaction's block hash
|
|
|
|
// using the backend's transaction index.
|
|
|
|
tx, err := b.chainConn.GetRawTransactionVerbose(&outpoint.Hash)
|
2017-11-10 03:30:20 +03:00
|
|
|
if err != nil {
|
2018-02-26 21:37:58 +03:00
|
|
|
// Avoid returning an error if the transaction was not
|
|
|
|
// found to proceed with fallback methods.
|
2017-11-10 03:30:20 +03:00
|
|
|
jsonErr, ok := err.(*btcjson.RPCError)
|
|
|
|
if !ok || jsonErr.Code != btcjson.ErrRPCNoTxInfo {
|
2018-02-26 21:37:58 +03:00
|
|
|
return nil, fmt.Errorf("unable to query for "+
|
|
|
|
"txid %v: %v", outpoint.Hash, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var blockHash *chainhash.Hash
|
|
|
|
if tx != nil && tx.BlockHash != "" {
|
|
|
|
// If we're able to retrieve a valid block hash from the
|
|
|
|
// transaction, then we'll use it as our rescan starting
|
|
|
|
// point.
|
|
|
|
blockHash, err = chainhash.NewHashFromStr(tx.BlockHash)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Otherwise, we'll attempt to retrieve the hash for the
|
|
|
|
// block at the heightHint.
|
|
|
|
blockHash, err = b.chainConn.GetBlockHash(
|
|
|
|
int64(heightHint),
|
|
|
|
)
|
|
|
|
if err != nil {
|
2018-08-15 03:55:29 +03:00
|
|
|
return nil, fmt.Errorf("unable to retrieve "+
|
|
|
|
"hash for block with height %d: %v",
|
|
|
|
heightHint, err)
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-07 04:25:41 +03:00
|
|
|
// We'll only scan old blocks if the transaction has actually
|
2018-01-29 01:46:54 +03:00
|
|
|
// been included within a block. Otherwise, we'll encounter an
|
|
|
|
// error when scanning for blocks. This can happens in the case
|
|
|
|
// of a race condition, wherein the output itself is unspent,
|
|
|
|
// and only arrives in the mempool after the getxout call.
|
2018-02-26 21:37:58 +03:00
|
|
|
if blockHash != nil {
|
2018-04-07 04:25:41 +03:00
|
|
|
// Rescan all the blocks until the current one.
|
2018-04-18 06:08:50 +03:00
|
|
|
startHeight, err := b.chainConn.GetBlockHeight(
|
|
|
|
blockHash,
|
|
|
|
)
|
2017-11-10 03:30:20 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-04-07 04:25:41 +03:00
|
|
|
|
|
|
|
_, endHeight, err := b.chainConn.GetBestBlock()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-07-27 07:33:32 +03:00
|
|
|
// In order to ensure we don't block the caller on what
|
|
|
|
// may be a long rescan, we'll launch a goroutine to do
|
|
|
|
// so in the background.
|
|
|
|
b.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer b.wg.Done()
|
|
|
|
|
|
|
|
err := b.dispatchSpendDetailsManually(
|
|
|
|
*outpoint, startHeight, endHeight,
|
|
|
|
)
|
2018-04-07 04:25:41 +03:00
|
|
|
if err != nil {
|
2018-07-27 07:33:32 +03:00
|
|
|
chainntnfs.Log.Errorf("Rescan for spend "+
|
|
|
|
"notification txout(%x) "+
|
|
|
|
"failed: %v", outpoint, err)
|
2018-04-07 04:25:41 +03:00
|
|
|
}
|
2018-07-27 07:33:32 +03:00
|
|
|
}()
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &chainntnfs.SpendEvent{
|
|
|
|
Spend: ntfn.spendChan,
|
|
|
|
Cancel: func() {
|
|
|
|
cancel := &spendCancel{
|
|
|
|
op: *outpoint,
|
|
|
|
spendID: ntfn.spendID,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Submit spend cancellation to notification dispatcher.
|
|
|
|
select {
|
|
|
|
case b.notificationCancels <- cancel:
|
2018-07-27 07:33:32 +03:00
|
|
|
// Cancellation is being handled, drain the
|
|
|
|
// spend chan until it is closed before yielding
|
|
|
|
// to the caller.
|
2017-11-10 03:30:20 +03:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case _, ok := <-ntfn.spendChan:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case <-b.quit:
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2018-07-27 07:33:32 +03:00
|
|
|
// disaptchSpendDetailsManually attempts to manually scan the chain within the
|
|
|
|
// given height range for a transaction that spends the given outpoint. If one
|
|
|
|
// is found, it's spending details are sent to the notifier dispatcher, which
|
|
|
|
// will then dispatch the notification to all of its clients.
|
|
|
|
func (b *BitcoindNotifier) dispatchSpendDetailsManually(op wire.OutPoint,
|
|
|
|
startHeight, endHeight int32) error {
|
|
|
|
|
|
|
|
// Begin scanning blocks at every height to determine if the outpoint
|
|
|
|
// was spent.
|
|
|
|
for height := startHeight; height <= endHeight; height++ {
|
|
|
|
// Ensure we haven't been requested to shut down before
|
|
|
|
// processing the next height.
|
|
|
|
select {
|
|
|
|
case <-b.quit:
|
|
|
|
return ErrChainNotifierShuttingDown
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
blockHash, err := b.chainConn.GetBlockHash(int64(height))
|
|
|
|
if err != nil {
|
2018-08-15 03:55:29 +03:00
|
|
|
return fmt.Errorf("unable to retrieve hash for block "+
|
|
|
|
"with height %d: %v", height, err)
|
2018-07-27 07:33:32 +03:00
|
|
|
}
|
|
|
|
block, err := b.chainConn.GetBlock(blockHash)
|
|
|
|
if err != nil {
|
2018-08-15 03:55:29 +03:00
|
|
|
return fmt.Errorf("unable to retrieve block with hash "+
|
|
|
|
"%v: %v", blockHash, err)
|
2018-07-27 07:33:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, tx := range block.Transactions {
|
|
|
|
for _, in := range tx.TxIn {
|
|
|
|
if in.PreviousOutPoint != op {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this transaction input spends the
|
|
|
|
// outpoint, we'll gather the details of the
|
|
|
|
// spending transaction and dispatch a spend
|
|
|
|
// notification to our clients.
|
|
|
|
relTx := chain.RelevantTx{
|
|
|
|
TxRecord: &wtxmgr.TxRecord{
|
|
|
|
MsgTx: *tx,
|
|
|
|
Hash: tx.TxHash(),
|
|
|
|
Received: block.Header.Timestamp,
|
|
|
|
},
|
|
|
|
Block: &wtxmgr.BlockMeta{
|
|
|
|
Block: wtxmgr.Block{
|
|
|
|
Hash: *blockHash,
|
|
|
|
Height: height,
|
|
|
|
},
|
|
|
|
Time: block.Header.Timestamp,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case b.notificationRegistry <- relTx:
|
|
|
|
case <-b.quit:
|
|
|
|
return ErrChainNotifierShuttingDown
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ErrTransactionNotFound
|
|
|
|
}
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
// confirmationNotification represents a client's intent to receive a
|
|
|
|
// notification once the target txid reaches numConfirmations confirmations.
|
2018-02-26 21:37:58 +03:00
|
|
|
type confirmationNotification struct {
|
2017-11-10 03:30:20 +03:00
|
|
|
chainntnfs.ConfNtfn
|
2018-02-26 21:37:58 +03:00
|
|
|
heightHint uint32
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterConfirmationsNtfn registers a notification with BitcoindNotifier
|
|
|
|
// which will be triggered once the txid reaches numConfs number of
|
|
|
|
// confirmations.
|
|
|
|
func (b *BitcoindNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
|
2018-05-31 08:03:05 +03:00
|
|
|
_ []byte, numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent, error) {
|
2017-11-10 03:30:20 +03:00
|
|
|
|
2018-08-15 03:54:21 +03:00
|
|
|
// Before proceeding to register the notification, we'll query our
|
|
|
|
// height hint cache to determine whether a better one exists.
|
|
|
|
if hint, err := b.confirmHintCache.QueryConfirmHint(*txid); err == nil {
|
|
|
|
if hint > heightHint {
|
|
|
|
chainntnfs.Log.Debugf("Using height hint %d retrieved "+
|
|
|
|
"from cache for %v", hint, txid)
|
|
|
|
heightHint = hint
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Construct a notification request for the transaction and send it to
|
|
|
|
// the main event loop.
|
2018-02-26 21:37:58 +03:00
|
|
|
ntfn := &confirmationNotification{
|
|
|
|
ConfNtfn: chainntnfs.ConfNtfn{
|
2018-07-27 07:30:15 +03:00
|
|
|
ConfID: atomic.AddUint64(&b.confClientCounter, 1),
|
2017-11-10 03:30:20 +03:00
|
|
|
TxID: txid,
|
|
|
|
NumConfirmations: numConfs,
|
2018-03-19 21:48:44 +03:00
|
|
|
Event: chainntnfs.NewConfirmationEvent(numConfs),
|
2017-11-10 03:30:20 +03:00
|
|
|
},
|
2018-02-26 21:37:58 +03:00
|
|
|
heightHint: heightHint,
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
2018-07-27 07:32:55 +03:00
|
|
|
if err := b.txConfNotifier.Register(&ntfn.ConfNtfn); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
select {
|
|
|
|
case b.notificationRegistry <- ntfn:
|
|
|
|
return ntfn.Event, nil
|
2018-07-27 07:32:55 +03:00
|
|
|
case <-b.quit:
|
|
|
|
return nil, ErrChainNotifierShuttingDown
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// blockEpochRegistration represents a client's intent to receive a
|
|
|
|
// notification with each newly connected block.
|
|
|
|
type blockEpochRegistration struct {
|
|
|
|
epochID uint64
|
|
|
|
|
|
|
|
epochChan chan *chainntnfs.BlockEpoch
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
epochQueue *chainntnfs.ConcurrentQueue
|
|
|
|
|
2018-08-09 10:05:27 +03:00
|
|
|
bestBlock *chainntnfs.BlockEpoch
|
|
|
|
|
|
|
|
errorChan chan error
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
cancelChan chan struct{}
|
|
|
|
|
|
|
|
wg sync.WaitGroup
|
|
|
|
}
|
|
|
|
|
|
|
|
// epochCancel is a message sent to the BitcoindNotifier when a client wishes
|
|
|
|
// to cancel an outstanding epoch notification that has yet to be dispatched.
|
|
|
|
type epochCancel struct {
|
|
|
|
epochID uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// RegisterBlockEpochNtfn returns a BlockEpochEvent which subscribes the
|
|
|
|
// caller to receive notifications, of each new block connected to the main
|
2018-08-09 10:05:27 +03:00
|
|
|
// chain. Clients have the option of passing in their best known block, which
|
|
|
|
// the notifier uses to check if they are behind on blocks and catch them up.
|
|
|
|
func (b *BitcoindNotifier) RegisterBlockEpochNtfn(
|
|
|
|
bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
|
|
|
|
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg := &blockEpochRegistration{
|
|
|
|
epochQueue: chainntnfs.NewConcurrentQueue(20),
|
2017-11-10 03:30:20 +03:00
|
|
|
epochChan: make(chan *chainntnfs.BlockEpoch, 20),
|
|
|
|
cancelChan: make(chan struct{}),
|
|
|
|
epochID: atomic.AddUint64(&b.epochClientCounter, 1),
|
2018-08-09 10:05:27 +03:00
|
|
|
bestBlock: bestBlock,
|
|
|
|
errorChan: make(chan error, 1),
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
reg.epochQueue.Start()
|
|
|
|
|
|
|
|
// Before we send the request to the main goroutine, we'll launch a new
|
|
|
|
// goroutine to proxy items added to our queue to the client itself.
|
|
|
|
// This ensures that all notifications are received *in order*.
|
|
|
|
reg.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer reg.wg.Done()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ntfn := <-reg.epochQueue.ChanOut():
|
|
|
|
blockNtfn := ntfn.(*chainntnfs.BlockEpoch)
|
|
|
|
select {
|
|
|
|
case reg.epochChan <- blockNtfn:
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
case <-reg.cancelChan:
|
|
|
|
return
|
|
|
|
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2017-11-10 03:30:20 +03:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-b.quit:
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
// As we're exiting before the registration could be sent,
|
|
|
|
// we'll stop the queue now ourselves.
|
|
|
|
reg.epochQueue.Stop()
|
|
|
|
|
2017-11-10 03:30:20 +03:00
|
|
|
return nil, errors.New("chainntnfs: system interrupt while " +
|
|
|
|
"attempting to register for block epoch notification.")
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case b.notificationRegistry <- reg:
|
2017-11-10 03:30:20 +03:00
|
|
|
return &chainntnfs.BlockEpochEvent{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
Epochs: reg.epochChan,
|
2017-11-10 03:30:20 +03:00
|
|
|
Cancel: func() {
|
|
|
|
cancel := &epochCancel{
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
epochID: reg.epochID,
|
2017-11-10 03:30:20 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Submit epoch cancellation to notification dispatcher.
|
|
|
|
select {
|
|
|
|
case b.notificationCancels <- cancel:
|
|
|
|
// Cancellation is being handled, drain the epoch channel until it is
|
|
|
|
// closed before yielding to caller.
|
|
|
|
for {
|
|
|
|
select {
|
chainntnfs: ensure all block epoch notifications are sent *in order*
In this commit, we fix a lingering bug related to the way that we
deliver block epoch notifications to end users. Before this commit, we
would launch a new goroutine for *each block*. This was done in order
to ensure that the notification dispatch wouldn’t block the main
goroutine that was dispatching the notifications. This method archived
the goal, but had a nasty side effect that the goroutines could be
re-ordered during scheduling, meaning that in the case of fast
successive blocks, then notifications would be delivered out of order.
Receiving out of order notifications is either disallowed, or can cause
sub-systems that rely on these notifications to get into weird states.
In order to fix this issue, we’ll no longer launch a new goroutine to
deliver each notification to an awaiting client. Instead, each client
will now gain a concurrent in-order queue for notification delivery.
Due to the internal design of chainntnfs.ConcurrentQueue, the caller
should never block, yet the receivers will receive notifications in
order. This change solves the re-ordering issue and also minimizes the
number of goroutines that we’ll create in order to deliver block epoch
notifications.
2018-02-10 03:13:21 +03:00
|
|
|
case _, ok := <-reg.epochChan:
|
2017-11-10 03:30:20 +03:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
case <-b.quit:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
case <-b.quit:
|
|
|
|
}
|
|
|
|
},
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
}
|