2018-09-20 13:21:32 +03:00
|
|
|
// +build dev
|
2018-08-17 05:41:32 +03:00
|
|
|
|
2018-08-09 10:05:30 +03:00
|
|
|
package neutrinonotify
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/rpcclient"
|
|
|
|
"github.com/lightninglabs/neutrino"
|
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
|
|
|
)
|
|
|
|
|
|
|
|
// UnsafeStart starts the notifier with a specified best height and optional
|
2018-10-05 12:07:55 +03:00
|
|
|
// best hash. Its bestHeight, txNotifier and neutrino node are initialized with
|
|
|
|
// bestHeight. The parameter generateBlocks is necessary for the bitcoind
|
|
|
|
// notifier to ensure we drain all notifications up to syncHeight, since if they
|
|
|
|
// are generated ahead of UnsafeStart the chainConn may start up with an
|
|
|
|
// outdated best block and miss sending ntfns. Used for testing.
|
|
|
|
func (n *NeutrinoNotifier) UnsafeStart(bestHeight int32,
|
|
|
|
bestHash *chainhash.Hash, syncHeight int32,
|
|
|
|
generateBlocks func() error) error {
|
2018-08-09 10:05:30 +03:00
|
|
|
|
|
|
|
// We'll obtain the latest block height of the p2p node. We'll
|
|
|
|
// start the auto-rescan from this point. Once a caller actually wishes
|
|
|
|
// to register a chain view, the rescan state will be rewound
|
|
|
|
// accordingly.
|
2018-09-10 13:46:23 +03:00
|
|
|
startingPoint, err := n.p2pNode.BestBlock()
|
2018-08-09 10:05:30 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll create our set of rescan options. Currently it's
|
|
|
|
// required that a user MUST set an addr/outpoint/txid when creating a
|
|
|
|
// rescan. To get around this, we'll add a "zero" outpoint, that won't
|
|
|
|
// actually be matched.
|
|
|
|
var zeroInput neutrino.InputWithScript
|
|
|
|
rescanOptions := []neutrino.RescanOption{
|
|
|
|
neutrino.StartBlock(startingPoint),
|
|
|
|
neutrino.QuitChan(n.quit),
|
|
|
|
neutrino.NotificationHandlers(
|
|
|
|
rpcclient.NotificationHandlers{
|
|
|
|
OnFilteredBlockConnected: n.onFilteredBlockConnected,
|
|
|
|
OnFilteredBlockDisconnected: n.onFilteredBlockDisconnected,
|
|
|
|
},
|
|
|
|
),
|
|
|
|
neutrino.WatchInputs(zeroInput),
|
|
|
|
}
|
|
|
|
|
2018-10-05 12:07:55 +03:00
|
|
|
n.txNotifier = chainntnfs.NewTxNotifier(
|
2018-12-05 03:58:27 +03:00
|
|
|
uint32(bestHeight), chainntnfs.ReorgSafetyLimit,
|
|
|
|
n.confirmHintCache, n.spendHintCache,
|
2018-08-15 03:53:34 +03:00
|
|
|
)
|
2018-08-09 10:05:30 +03:00
|
|
|
|
|
|
|
// Finally, we'll create our rescan struct, start it, and launch all
|
|
|
|
// the goroutines we need to operate this ChainNotifier instance.
|
|
|
|
n.chainView = n.p2pNode.NewRescan(rescanOptions...)
|
|
|
|
n.rescanErr = n.chainView.Start()
|
|
|
|
|
|
|
|
n.chainUpdates.Start()
|
2018-12-07 08:14:31 +03:00
|
|
|
n.txUpdates.Start()
|
2018-08-09 10:05:30 +03:00
|
|
|
|
|
|
|
if generateBlocks != nil {
|
|
|
|
// Ensure no block notifications are pending when we start the
|
|
|
|
// notification dispatcher goroutine.
|
|
|
|
|
|
|
|
// First generate the blocks, then drain the notifications
|
|
|
|
// for the generated blocks.
|
|
|
|
if err := generateBlocks(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
timeout := time.After(60 * time.Second)
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ntfn := <-n.chainUpdates.ChanOut():
|
|
|
|
lastReceivedNtfn := ntfn.(*filteredBlock)
|
|
|
|
if lastReceivedNtfn.height >= uint32(syncHeight) {
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
case <-timeout:
|
|
|
|
return fmt.Errorf("unable to catch up to height %d",
|
|
|
|
syncHeight)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run notificationDispatcher after setting the notifier's best height
|
|
|
|
// to avoid a race condition.
|
|
|
|
n.bestHeight = uint32(bestHeight)
|
|
|
|
|
|
|
|
n.wg.Add(1)
|
|
|
|
go n.notificationDispatcher()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|