commit
2f17030e8c
2
.gitignore
vendored
2
.gitignore
vendored
@ -28,6 +28,8 @@ _testmain.go
|
||||
/lnd-debug
|
||||
/lncli
|
||||
/lncli-debug
|
||||
/lnd-itest
|
||||
/lncli-itest
|
||||
|
||||
# Integration test log files
|
||||
output*.log
|
||||
|
8
Makefile
8
Makefile
@ -100,6 +100,11 @@ build:
|
||||
$(GOBUILD) -tags="$(DEV_TAGS)" -o lnd-debug $(LDFLAGS) $(PKG)
|
||||
$(GOBUILD) -tags="$(DEV_TAGS)" -o lncli-debug $(LDFLAGS) $(PKG)/cmd/lncli
|
||||
|
||||
build-itest:
|
||||
@$(call print, "Building itest lnd and lncli.")
|
||||
$(GOBUILD) -tags="$(ITEST_TAGS)" -o lnd-itest $(LDFLAGS) $(PKG)
|
||||
$(GOBUILD) -tags="$(ITEST_TAGS)" -o lncli-itest $(LDFLAGS) $(PKG)/cmd/lncli
|
||||
|
||||
install:
|
||||
@$(call print, "Installing lnd and lncli.")
|
||||
$(GOINSTALL) -tags="${tags}" $(LDFLAGS) $(PKG)
|
||||
@ -118,7 +123,7 @@ itest-only:
|
||||
@$(call print, "Running integration tests.")
|
||||
$(ITEST)
|
||||
|
||||
itest: btcd build itest-only
|
||||
itest: btcd build-itest itest-only
|
||||
|
||||
unit: btcd
|
||||
@$(call print, "Running unit tests.")
|
||||
@ -181,6 +186,7 @@ rpc:
|
||||
clean:
|
||||
@$(call print, "Cleaning source.$(NC)")
|
||||
$(RM) ./lnd-debug ./lncli-debug
|
||||
$(RM) ./lnd-itest ./lncli-itest
|
||||
$(RM) -r ./vendor .vendor-new
|
||||
|
||||
|
||||
|
@ -757,6 +757,7 @@ type breachedOutput struct {
|
||||
outpoint wire.OutPoint
|
||||
witnessType lnwallet.WitnessType
|
||||
signDesc lnwallet.SignDescriptor
|
||||
confHeight uint32
|
||||
|
||||
secondLevelWitnessScript []byte
|
||||
|
||||
@ -768,7 +769,8 @@ type breachedOutput struct {
|
||||
func makeBreachedOutput(outpoint *wire.OutPoint,
|
||||
witnessType lnwallet.WitnessType,
|
||||
secondLevelScript []byte,
|
||||
signDescriptor *lnwallet.SignDescriptor) breachedOutput {
|
||||
signDescriptor *lnwallet.SignDescriptor,
|
||||
confHeight uint32) breachedOutput {
|
||||
|
||||
amount := signDescriptor.Output.Value
|
||||
|
||||
@ -778,6 +780,7 @@ func makeBreachedOutput(outpoint *wire.OutPoint,
|
||||
secondLevelWitnessScript: secondLevelScript,
|
||||
witnessType: witnessType,
|
||||
signDesc: *signDescriptor,
|
||||
confHeight: confHeight,
|
||||
}
|
||||
}
|
||||
|
||||
@ -831,6 +834,12 @@ func (bo *breachedOutput) BlocksToMaturity() uint32 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// HeightHint returns the minimum height at which a confirmed spending tx can
|
||||
// occur.
|
||||
func (bo *breachedOutput) HeightHint() uint32 {
|
||||
return bo.confHeight
|
||||
}
|
||||
|
||||
// Add compile-time constraint ensuring breachedOutput implements the Input
|
||||
// interface.
|
||||
var _ sweep.Input = (*breachedOutput)(nil)
|
||||
@ -878,7 +887,8 @@ func newRetributionInfo(chanPoint *wire.OutPoint,
|
||||
// No second level script as this is a commitment
|
||||
// output.
|
||||
nil,
|
||||
breachInfo.LocalOutputSignDesc)
|
||||
breachInfo.LocalOutputSignDesc,
|
||||
breachInfo.BreachHeight)
|
||||
|
||||
breachedOutputs = append(breachedOutputs, localOutput)
|
||||
}
|
||||
@ -895,7 +905,8 @@ func newRetributionInfo(chanPoint *wire.OutPoint,
|
||||
// No second level script as this is a commitment
|
||||
// output.
|
||||
nil,
|
||||
breachInfo.RemoteOutputSignDesc)
|
||||
breachInfo.RemoteOutputSignDesc,
|
||||
breachInfo.BreachHeight)
|
||||
|
||||
breachedOutputs = append(breachedOutputs, remoteOutput)
|
||||
}
|
||||
@ -919,7 +930,8 @@ func newRetributionInfo(chanPoint *wire.OutPoint,
|
||||
&breachInfo.HtlcRetributions[i].OutPoint,
|
||||
htlcWitnessType,
|
||||
breachInfo.HtlcRetributions[i].SecondLevelWitnessScript,
|
||||
&breachInfo.HtlcRetributions[i].SignDesc)
|
||||
&breachInfo.HtlcRetributions[i].SignDesc,
|
||||
breachInfo.BreachHeight)
|
||||
|
||||
breachedOutputs = append(breachedOutputs, htlcOutput)
|
||||
}
|
||||
|
@ -1336,7 +1336,7 @@ func createTestArbiter(t *testing.T, contractBreaches chan *ContractBreachEvent,
|
||||
ba := newBreachArbiter(&BreachConfig{
|
||||
CloseLink: func(_ *wire.OutPoint, _ htlcswitch.ChannelCloseType) {},
|
||||
DB: db,
|
||||
Estimator: &lnwallet.StaticFeeEstimator{FeePerKW: 12500},
|
||||
Estimator: lnwallet.NewStaticFeeEstimator(12500, 0),
|
||||
GenSweepScript: func() ([]byte, error) { return nil, nil },
|
||||
ContractBreaches: contractBreaches,
|
||||
Signer: signer,
|
||||
@ -1476,7 +1476,7 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
estimator := &lnwallet.StaticFeeEstimator{FeePerKW: 12500}
|
||||
estimator := lnwallet.NewStaticFeeEstimator(12500, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
|
@ -150,9 +150,9 @@ func newChainControlFromConfig(cfg *config, chanDB *channeldb.DB,
|
||||
FeeRate: cfg.Bitcoin.FeeRate,
|
||||
TimeLockDelta: cfg.Bitcoin.TimeLockDelta,
|
||||
}
|
||||
cc.feeEstimator = lnwallet.StaticFeeEstimator{
|
||||
FeePerKW: defaultBitcoinStaticFeePerKW,
|
||||
}
|
||||
cc.feeEstimator = lnwallet.NewStaticFeeEstimator(
|
||||
defaultBitcoinStaticFeePerKW, 0,
|
||||
)
|
||||
case litecoinChain:
|
||||
cc.routingPolicy = htlcswitch.ForwardingPolicy{
|
||||
MinHTLC: cfg.Litecoin.MinHTLC,
|
||||
@ -160,9 +160,9 @@ func newChainControlFromConfig(cfg *config, chanDB *channeldb.DB,
|
||||
FeeRate: cfg.Litecoin.FeeRate,
|
||||
TimeLockDelta: cfg.Litecoin.TimeLockDelta,
|
||||
}
|
||||
cc.feeEstimator = lnwallet.StaticFeeEstimator{
|
||||
FeePerKW: defaultLitecoinStaticFeePerKW,
|
||||
}
|
||||
cc.feeEstimator = lnwallet.NewStaticFeeEstimator(
|
||||
defaultLitecoinStaticFeePerKW, 0,
|
||||
)
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("Default routing policy for "+
|
||||
"chain %v is unknown", registeredChains.PrimaryChain())
|
||||
|
@ -462,6 +462,7 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) {
|
||||
&h.htlcResolution.ClaimOutpoint,
|
||||
&h.htlcResolution.SweepSignDesc,
|
||||
h.htlcResolution.Preimage[:],
|
||||
h.broadcastHeight,
|
||||
)
|
||||
|
||||
// With the input created, we can now generate the full
|
||||
@ -471,6 +472,8 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) {
|
||||
// TODO: Set tx lock time to current block height
|
||||
// instead of zero. Will be taken care of once sweeper
|
||||
// implementation is complete.
|
||||
//
|
||||
// TODO: Use time-based sweeper and result chan.
|
||||
var err error
|
||||
h.sweepTx, err = h.Sweeper.CreateSweepTx(
|
||||
[]sweep.Input{&input}, sweepConfTarget, 0,
|
||||
@ -1254,6 +1257,7 @@ func (c *commitSweepResolver) Resolve() (ContractResolver, error) {
|
||||
&c.commitResolution.SelfOutPoint,
|
||||
lnwallet.CommitmentNoDelay,
|
||||
&c.commitResolution.SelfOutputSignDesc,
|
||||
c.broadcastHeight,
|
||||
)
|
||||
|
||||
// With out input constructed, we'll now request that the
|
||||
@ -1263,6 +1267,8 @@ func (c *commitSweepResolver) Resolve() (ContractResolver, error) {
|
||||
// TODO: Set tx lock time to current block height instead of
|
||||
// zero. Will be taken care of once sweeper implementation is
|
||||
// complete.
|
||||
//
|
||||
// TODO: Use time-based sweeper and result chan.
|
||||
c.sweepTx, err = c.Sweeper.CreateSweepTx(
|
||||
[]sweep.Input{&input}, sweepConfTarget, 0,
|
||||
)
|
||||
|
@ -231,7 +231,7 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey,
|
||||
addr *lnwire.NetAddress, tempTestDir string) (*testNode, error) {
|
||||
|
||||
netParams := activeNetParams.Params
|
||||
estimator := lnwallet.StaticFeeEstimator{FeePerKW: 62500}
|
||||
estimator := lnwallet.NewStaticFeeEstimator(62500, 0)
|
||||
|
||||
chainNotifier := &mockNotifier{
|
||||
oneConfChannel: make(chan *chainntnfs.TxConfirmation, 1),
|
||||
@ -250,7 +250,9 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey,
|
||||
signer := &mockSigner{
|
||||
key: alicePrivKey,
|
||||
}
|
||||
bio := &mockChainIO{}
|
||||
bio := &mockChainIO{
|
||||
bestHeight: fundingBroadcastHeight,
|
||||
}
|
||||
|
||||
dbDir := filepath.Join(tempTestDir, "cdb")
|
||||
cdb, err := channeldb.Open(dbDir)
|
||||
|
@ -1795,7 +1795,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) {
|
||||
coreLink.cfg.HodlMask = hodl.MaskFromFlags(hodl.ExitSettle)
|
||||
coreLink.cfg.DebugHTLC = true
|
||||
|
||||
estimator := &lnwallet.StaticFeeEstimator{FeePerKW: 6000}
|
||||
estimator := lnwallet.NewStaticFeeEstimator(6000, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query fee estimator: %v", err)
|
||||
@ -2206,7 +2206,7 @@ func TestChannelLinkBandwidthConsistencyOverflow(t *testing.T) {
|
||||
aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
|
||||
)
|
||||
|
||||
estimator := &lnwallet.StaticFeeEstimator{FeePerKW: 6000}
|
||||
estimator := lnwallet.NewStaticFeeEstimator(6000, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query fee estimator: %v", err)
|
||||
@ -2453,7 +2453,7 @@ func TestChannelLinkTrimCircuitsPending(t *testing.T) {
|
||||
|
||||
// Compute the static fees that will be used to determine the
|
||||
// correctness of Alice's bandwidth when forwarding HTLCs.
|
||||
estimator := &lnwallet.StaticFeeEstimator{FeePerKW: 6000}
|
||||
estimator := lnwallet.NewStaticFeeEstimator(6000, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query fee estimator: %v", err)
|
||||
@ -2731,7 +2731,7 @@ func TestChannelLinkTrimCircuitsNoCommit(t *testing.T) {
|
||||
|
||||
// Compute the static fees that will be used to determine the
|
||||
// correctness of Alice's bandwidth when forwarding HTLCs.
|
||||
estimator := &lnwallet.StaticFeeEstimator{FeePerKW: 6000}
|
||||
estimator := lnwallet.NewStaticFeeEstimator(6000, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query fee estimator: %v", err)
|
||||
@ -2989,7 +2989,7 @@ func TestChannelLinkBandwidthChanReserve(t *testing.T) {
|
||||
aliceMsgs = coreLink.cfg.Peer.(*mockPeer).sentMsgs
|
||||
)
|
||||
|
||||
estimator := &lnwallet.StaticFeeEstimator{FeePerKW: 6000}
|
||||
estimator := lnwallet.NewStaticFeeEstimator(6000, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query fee estimator: %v", err)
|
||||
|
@ -75,6 +75,10 @@ func (m *mockFeeEstimator) EstimateFeePerKW(
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockFeeEstimator) RelayFeePerKW() lnwallet.SatPerKWeight {
|
||||
return 1e3
|
||||
}
|
||||
|
||||
func (m *mockFeeEstimator) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
estimator := &lnwallet.StaticFeeEstimator{FeePerKW: 6000}
|
||||
estimator := lnwallet.NewStaticFeeEstimator(6000, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
|
@ -281,7 +281,7 @@ func (hn *HarnessNode) start(lndError chan<- error) error {
|
||||
|
||||
args := hn.cfg.genArgs()
|
||||
args = append(args, fmt.Sprintf("--profile=%d", 9000+hn.NodeID))
|
||||
hn.cmd = exec.Command("./lnd-debug", args...)
|
||||
hn.cmd = exec.Command("./lnd-itest", args...)
|
||||
|
||||
// Redirect stderr output to buffer
|
||||
var errb bytes.Buffer
|
||||
|
@ -1131,7 +1131,7 @@ func TestHTLCSigNumber(t *testing.T) {
|
||||
}
|
||||
|
||||
// Calculate two values that will be below and above Bob's dust limit.
|
||||
estimator := &StaticFeeEstimator{FeePerKW: 6000}
|
||||
estimator := NewStaticFeeEstimator(6000, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get fee: %v", err)
|
||||
|
@ -59,22 +59,50 @@ type FeeEstimator interface {
|
||||
// Stop stops any spawned goroutines and cleans up the resources used
|
||||
// by the fee estimator.
|
||||
Stop() error
|
||||
|
||||
// RelayFeePerKW returns the minimum fee rate required for transactions
|
||||
// to be relayed. This is also the basis for calculation of the dust
|
||||
// limit.
|
||||
RelayFeePerKW() SatPerKWeight
|
||||
}
|
||||
|
||||
// StaticFeeEstimator will return a static value for all fee calculation
|
||||
// requests. It is designed to be replaced by a proper fee calculation
|
||||
// implementation.
|
||||
// implementation. The fees are not accessible directly, because changing them
|
||||
// would not be thread safe.
|
||||
type StaticFeeEstimator struct {
|
||||
// FeePerKW is the static fee rate in satoshis-per-vbyte that will be
|
||||
// feePerKW is the static fee rate in satoshis-per-vbyte that will be
|
||||
// returned by this fee estimator.
|
||||
FeePerKW SatPerKWeight
|
||||
feePerKW SatPerKWeight
|
||||
|
||||
// relayFee is the minimum fee rate required for transactions to be
|
||||
// relayed.
|
||||
relayFee SatPerKWeight
|
||||
}
|
||||
|
||||
// NewStaticFeeEstimator returns a new static fee estimator instance.
|
||||
func NewStaticFeeEstimator(feePerKW,
|
||||
relayFee SatPerKWeight) *StaticFeeEstimator {
|
||||
|
||||
return &StaticFeeEstimator{
|
||||
feePerKW: feePerKW,
|
||||
relayFee: relayFee,
|
||||
}
|
||||
}
|
||||
|
||||
// EstimateFeePerKW will return a static value for fee calculations.
|
||||
//
|
||||
// NOTE: This method is part of the FeeEstimator interface.
|
||||
func (e StaticFeeEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, error) {
|
||||
return e.FeePerKW, nil
|
||||
return e.feePerKW, nil
|
||||
}
|
||||
|
||||
// RelayFeePerKW returns the minimum fee rate required for transactions to be
|
||||
// relayed.
|
||||
//
|
||||
// NOTE: This method is part of the FeeEstimator interface.
|
||||
func (e StaticFeeEstimator) RelayFeePerKW() SatPerKWeight {
|
||||
return e.relayFee
|
||||
}
|
||||
|
||||
// Start signals the FeeEstimator to start any processes or goroutines
|
||||
@ -206,6 +234,14 @@ func (b *BtcdFeeEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight, er
|
||||
return feeEstimate, nil
|
||||
}
|
||||
|
||||
// RelayFeePerKW returns the minimum fee rate required for transactions to be
|
||||
// relayed.
|
||||
//
|
||||
// NOTE: This method is part of the FeeEstimator interface.
|
||||
func (b *BtcdFeeEstimator) RelayFeePerKW() SatPerKWeight {
|
||||
return b.minFeePerKW
|
||||
}
|
||||
|
||||
// fetchEstimate returns a fee estimate for a transaction to be confirmed in
|
||||
// confTarget blocks. The estimate is returned in sat/kw.
|
||||
func (b *BtcdFeeEstimator) fetchEstimate(confTarget uint32) (SatPerKWeight, error) {
|
||||
@ -359,6 +395,14 @@ func (b *BitcoindFeeEstimator) EstimateFeePerKW(numBlocks uint32) (SatPerKWeight
|
||||
return feeEstimate, nil
|
||||
}
|
||||
|
||||
// RelayFeePerKW returns the minimum fee rate required for transactions to be
|
||||
// relayed.
|
||||
//
|
||||
// NOTE: This method is part of the FeeEstimator interface.
|
||||
func (b *BitcoindFeeEstimator) RelayFeePerKW() SatPerKWeight {
|
||||
return b.minFeePerKW
|
||||
}
|
||||
|
||||
// fetchEstimate returns a fee estimate for a transaction to be confirmed in
|
||||
// confTarget blocks. The estimate is returned in sat/kw.
|
||||
func (b *BitcoindFeeEstimator) fetchEstimate(confTarget uint32) (SatPerKWeight, error) {
|
||||
|
@ -74,9 +74,7 @@ func TestStaticFeeEstimator(t *testing.T) {
|
||||
|
||||
const feePerKw = lnwallet.FeePerKwFloor
|
||||
|
||||
feeEstimator := &lnwallet.StaticFeeEstimator{
|
||||
FeePerKW: feePerKw,
|
||||
}
|
||||
feeEstimator := lnwallet.NewStaticFeeEstimator(feePerKw, 0)
|
||||
if err := feeEstimator.Start(); err != nil {
|
||||
t.Fatalf("unable to start fee estimator: %v", err)
|
||||
}
|
||||
|
@ -368,7 +368,7 @@ func createTestWallet(tempTestDir string, miningNode *rpctest.Harness,
|
||||
WalletController: wc,
|
||||
Signer: signer,
|
||||
ChainIO: bio,
|
||||
FeeEstimator: lnwallet.StaticFeeEstimator{FeePerKW: 2500},
|
||||
FeeEstimator: lnwallet.NewStaticFeeEstimator(2500, 0),
|
||||
DefaultConstraints: channeldb.ChannelConstraints{
|
||||
DustLimit: 500,
|
||||
MaxPendingAmount: lnwire.NewMSatFromSatoshis(btcutil.SatoshiPerBitcoin) * 100,
|
||||
@ -2440,7 +2440,7 @@ func runTests(t *testing.T, walletDriver *lnwallet.WalletDriver,
|
||||
}
|
||||
|
||||
case "neutrino":
|
||||
feeEstimator = lnwallet.StaticFeeEstimator{FeePerKW: 62500}
|
||||
feeEstimator = lnwallet.NewStaticFeeEstimator(62500, 0)
|
||||
|
||||
// Set some package-level variable to speed up
|
||||
// operation for tests.
|
||||
|
@ -229,7 +229,7 @@ func CreateTestChannels() (*LightningChannel, *LightningChannel, func(), error)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
estimator := &StaticFeeEstimator{FeePerKW: 6000}
|
||||
estimator := NewStaticFeeEstimator(6000, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
|
@ -71,6 +71,44 @@ const (
|
||||
HtlcSecondLevelRevoke WitnessType = 9
|
||||
)
|
||||
|
||||
// Stirng returns a human readable version of the target WitnessType.
|
||||
func (wt WitnessType) String() string {
|
||||
switch wt {
|
||||
case CommitmentTimeLock:
|
||||
return "CommitmentTimeLock"
|
||||
|
||||
case CommitmentNoDelay:
|
||||
return "CommitmentNoDelay"
|
||||
|
||||
case CommitmentRevoke:
|
||||
return "CommitmentRevoke"
|
||||
|
||||
case HtlcOfferedRevoke:
|
||||
return "HtlcOfferedRevoke"
|
||||
|
||||
case HtlcAcceptedRevoke:
|
||||
return "HtlcAcceptedRevoke"
|
||||
|
||||
case HtlcOfferedTimeoutSecondLevel:
|
||||
return "HtlcOfferedTimeoutSecondLevel"
|
||||
|
||||
case HtlcAcceptedSuccessSecondLevel:
|
||||
return "HtlcAcceptedSuccessSecondLevel"
|
||||
|
||||
case HtlcOfferedRemoteTimeout:
|
||||
return "HtlcOfferedRemoteTimeout"
|
||||
|
||||
case HtlcAcceptedRemoteSuccess:
|
||||
return "HtlcAcceptedRemoteSuccess"
|
||||
|
||||
case HtlcSecondLevelRevoke:
|
||||
return "HtlcSecondLevelRevoke"
|
||||
|
||||
default:
|
||||
return fmt.Sprintf("Unknown WitnessType: %v", uint32(wt))
|
||||
}
|
||||
}
|
||||
|
||||
// WitnessGenerator represents a function which is able to generate the final
|
||||
// witness for a particular public key script. This function acts as an
|
||||
// abstraction layer, hiding the details of the underlying script.
|
||||
|
8
mock.go
8
mock.go
@ -168,10 +168,12 @@ func (m *mockSpendNotifier) Spend(outpoint *wire.OutPoint, height int32,
|
||||
}
|
||||
}
|
||||
|
||||
type mockChainIO struct{}
|
||||
type mockChainIO struct {
|
||||
bestHeight int32
|
||||
}
|
||||
|
||||
func (*mockChainIO) GetBestBlock() (*chainhash.Hash, int32, error) {
|
||||
return activeNetParams.GenesisHash, fundingBroadcastHeight, nil
|
||||
func (m *mockChainIO) GetBestBlock() (*chainhash.Hash, int32, error) {
|
||||
return activeNetParams.GenesisHash, m.bestHeight, nil
|
||||
}
|
||||
|
||||
func (*mockChainIO) GetUtxo(op *wire.OutPoint, _ []byte,
|
||||
|
337
nursery_store.go
337
nursery_store.go
@ -23,21 +23,6 @@ import (
|
||||
//
|
||||
// utxn<chain-hash>/
|
||||
// |
|
||||
// | LAST PURGED + FINALIZED HEIGHTS
|
||||
// |
|
||||
// | Each nursery store tracks a "last graduated height", which records the
|
||||
// | most recent block height for which the nursery store has successfully
|
||||
// | graduated all outputs. It also tracks a "last finalized height", which
|
||||
// | records the last block height that the nursery attempted to graduate
|
||||
// | If a finalized height has kindergarten outputs, the sweep txn for these
|
||||
// | outputs will be stored in the height bucket. This ensure that the same
|
||||
// | txid will be used after restarts. Otherwise, the nursery will be unable
|
||||
// | to recover the txid of kindergarten sweep transaction it has already
|
||||
// | broadcast.
|
||||
// |
|
||||
// ├── last-finalized-height-key: <last-finalized-height>
|
||||
// ├── last-graduated-height-key: <last-graduated-height>
|
||||
// |
|
||||
// | CHANNEL INDEX
|
||||
// |
|
||||
// | The channel index contains a directory for each channel that has a
|
||||
@ -72,10 +57,7 @@ import (
|
||||
// | relative file path:
|
||||
// | e.g. <chan-point-3>/<prefix><outpoint-2>/
|
||||
// | that can be queried in the channel index to retrieve the serialized
|
||||
// | output. If a height bucket is less than or equal to the current last
|
||||
// | finalized height and has a non-zero number of kindergarten outputs, a
|
||||
// | height bucket will also contain the finalized kindergarten sweep txn
|
||||
// | under the "finalized-kndr-txn" key.
|
||||
// | output.
|
||||
// |
|
||||
// └── height-index-key/
|
||||
// ├── <height-1>/ <- HEIGHT BUCKET
|
||||
@ -84,12 +66,15 @@ import (
|
||||
// | | └── <state-prefix><outpoint-5>: ""
|
||||
// | ├── <chan-point-2>/
|
||||
// | | └── <state-prefix><outpoint-3>: ""
|
||||
// | └── finalized-kndr-txn: "" | <kndr-sweep-tnx>
|
||||
// └── <height-2>/
|
||||
// └── <chan-point-1>/
|
||||
// └── <state-prefix><outpoint-1>: ""
|
||||
// └── <state-prefix><outpoint-2>: ""
|
||||
|
||||
// TODO(joostjager): Add database migration to clean up now unused last
|
||||
// graduated height and finalized txes. This also prevents people downgrading
|
||||
// and surprising the downgraded nursery with missing data.
|
||||
|
||||
// NurseryStore abstracts the persistent storage layer for the utxo nursery.
|
||||
// Concretely, it stores commitment and htlc outputs until any time-bounded
|
||||
// constraints have fully matured. The store exposes methods for enumerating its
|
||||
@ -110,47 +95,30 @@ type NurseryStore interface {
|
||||
CribToKinder(*babyOutput) error
|
||||
|
||||
// PreschoolToKinder atomically moves a kidOutput from the preschool
|
||||
// bucket to the kindergarten bucket. This transition should be
|
||||
// executed after receiving confirmation of the preschool output.
|
||||
// Incoming HTLC's we need to go to the second-layer to claim, and also
|
||||
// our commitment outputs fall into this class.
|
||||
PreschoolToKinder(*kidOutput) error
|
||||
// bucket to the kindergarten bucket. This transition should be executed
|
||||
// after receiving confirmation of the preschool output. Incoming HTLC's
|
||||
// we need to go to the second-layer to claim, and also our commitment
|
||||
// outputs fall into this class.
|
||||
//
|
||||
// An additional parameter specifies the last graduated height. This is
|
||||
// used in case of late registration. It schedules the output for sweep
|
||||
// at the next epoch even though it has already expired earlier.
|
||||
PreschoolToKinder(kid *kidOutput, lastGradHeight uint32) error
|
||||
|
||||
// GraduateKinder atomically moves the kindergarten class at the
|
||||
// provided height into the graduated status. This involves removing the
|
||||
// kindergarten entries from both the height and channel indexes, and
|
||||
// cleaning up the finalized kindergarten sweep txn. The height bucket
|
||||
// will be opportunistically pruned from the height index as outputs are
|
||||
// GraduateKinder atomically moves an output at the provided height into
|
||||
// the graduated status. This involves removing the kindergarten entries
|
||||
// from both the height and channel indexes. The height bucket will be
|
||||
// opportunistically pruned from the height index as outputs are
|
||||
// removed.
|
||||
GraduateKinder(height uint32) error
|
||||
GraduateKinder(height uint32, output *kidOutput) error
|
||||
|
||||
// FetchPreschools returns a list of all outputs currently stored in
|
||||
// the preschool bucket.
|
||||
FetchPreschools() ([]kidOutput, error)
|
||||
|
||||
// FetchClass returns a list of kindergarten and crib outputs whose
|
||||
// timelocks expire at the given height. If the kindergarten class at
|
||||
// this height hash been finalized previously, via FinalizeKinder, it
|
||||
// will also returns the finalized kindergarten sweep txn.
|
||||
FetchClass(height uint32) (*wire.MsgTx, []kidOutput, []babyOutput, error)
|
||||
|
||||
// FinalizeKinder accepts a block height and the kindergarten sweep txn
|
||||
// computed for this height. Upon startup, we will rebroadcast any
|
||||
// finalized kindergarten txns instead of signing a new txn, as this
|
||||
// result in a different txid from a preceding broadcast.
|
||||
FinalizeKinder(height uint32, tx *wire.MsgTx) error
|
||||
|
||||
// LastFinalizedHeight returns the last block height for which the
|
||||
// nursery store finalized a kindergarten class.
|
||||
LastFinalizedHeight() (uint32, error)
|
||||
|
||||
// GraduateHeight records the provided height as the last height for
|
||||
// which the nursery store successfully graduated all outputs.
|
||||
GraduateHeight(height uint32) error
|
||||
|
||||
// LastGraduatedHeight returns the last block height for which the
|
||||
// nursery store successfully graduated all outputs.
|
||||
LastGraduatedHeight() (uint32, error)
|
||||
// timelocks expire at the given height.
|
||||
FetchClass(height uint32) ([]kidOutput, []babyOutput, error)
|
||||
|
||||
// HeightsBelowOrEqual returns the lowest non-empty heights in the
|
||||
// height index, that exist at or below the provided upper bound.
|
||||
@ -181,14 +149,6 @@ var (
|
||||
// the root-level, chain-segmented bucket for each nursery store.
|
||||
utxnChainPrefix = []byte("utxn")
|
||||
|
||||
// lastFinalizedHeightKey is a static key used to locate nursery store's
|
||||
// last finalized height.
|
||||
lastFinalizedHeightKey = []byte("last-finalized-height")
|
||||
|
||||
// lastGraduatedHeightKey is a static key used to retrieve the height of
|
||||
// the last bucket that successfully graduated all outputs.
|
||||
lastGraduatedHeightKey = []byte("last-graduated-height")
|
||||
|
||||
// channelIndexKey is a static key used to lookup the bucket containing
|
||||
// all of the nursery's active channels.
|
||||
channelIndexKey = []byte("channel-index")
|
||||
@ -197,10 +157,6 @@ var (
|
||||
// containing all heights for which the nursery will need to take
|
||||
// action.
|
||||
heightIndexKey = []byte("height-index")
|
||||
|
||||
// finalizedKndrTxnKey is a static key that can be used to locate a
|
||||
// finalized kindergarten sweep txn.
|
||||
finalizedKndrTxnKey = []byte("finalized-kndr-txn")
|
||||
)
|
||||
|
||||
// Defines the state prefixes that will be used to persistently track an
|
||||
@ -415,7 +371,9 @@ func (ns *nurseryStore) CribToKinder(bby *babyOutput) error {
|
||||
// PreschoolToKinder atomically moves a kidOutput from the preschool bucket to
|
||||
// the kindergarten bucket. This transition should be executed after receiving
|
||||
// confirmation of the preschool output's commitment transaction.
|
||||
func (ns *nurseryStore) PreschoolToKinder(kid *kidOutput) error {
|
||||
func (ns *nurseryStore) PreschoolToKinder(kid *kidOutput,
|
||||
lastGradHeight uint32) error {
|
||||
|
||||
return ns.db.Update(func(tx *bbolt.Tx) error {
|
||||
// Create or retrieve the channel bucket corresponding to the
|
||||
// kid output's origin channel point.
|
||||
@ -478,13 +436,6 @@ func (ns *nurseryStore) PreschoolToKinder(kid *kidOutput) error {
|
||||
maturityHeight = kid.ConfHeight() + kid.BlocksToMaturity()
|
||||
}
|
||||
|
||||
// In the case of a Late Registration, we've already graduated
|
||||
// the class that this kid is destined for. So we'll bump its
|
||||
// height by one to ensure we don't forget to graduate it.
|
||||
lastGradHeight, err := ns.getLastGraduatedHeight(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maturityHeight <= lastGradHeight {
|
||||
utxnLog.Debugf("Late Registration for kid output=%v "+
|
||||
"detected: class_height=%v, "+
|
||||
@ -515,41 +466,22 @@ func (ns *nurseryStore) PreschoolToKinder(kid *kidOutput) error {
|
||||
})
|
||||
}
|
||||
|
||||
// GraduateKinder atomically moves the kindergarten class at the provided height
|
||||
// into the graduated status. This involves removing the kindergarten entries
|
||||
// from both the height and channel indexes, and cleaning up the finalized
|
||||
// kindergarten sweep txn. The height bucket will be opportunistically pruned
|
||||
// from the height index as outputs are removed.
|
||||
func (ns *nurseryStore) GraduateKinder(height uint32) error {
|
||||
// GraduateKinder atomically moves an output at the provided height into the
|
||||
// graduated status. This involves removing the kindergarten entries from both
|
||||
// the height and channel indexes. The height bucket will be opportunistically
|
||||
// pruned from the height index as outputs are removed.
|
||||
func (ns *nurseryStore) GraduateKinder(height uint32, kid *kidOutput) error {
|
||||
return ns.db.Update(func(tx *bbolt.Tx) error {
|
||||
|
||||
// Since all kindergarten outputs at a particular height are
|
||||
// swept in a single txn, we can now safely delete the finalized
|
||||
// txn, since it has already been broadcast and confirmed.
|
||||
hghtBucket := ns.getHeightBucket(tx, height)
|
||||
if hghtBucket == nil {
|
||||
// Nothing to delete, bucket has already been removed.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove the finalized kindergarten txn, we do this before
|
||||
// removing the outputs so that the extra entry doesn't prevent
|
||||
// the height bucket from being opportunistically pruned below.
|
||||
if err := hghtBucket.Delete(finalizedKndrTxnKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For each kindergarten found output, delete its entry from the
|
||||
// For the kindergarten output, delete its entry from the
|
||||
// height and channel index, and create a new grad output in the
|
||||
// channel index.
|
||||
return ns.forEachHeightPrefix(tx, kndrPrefix, height,
|
||||
func(v []byte) error {
|
||||
var kid kidOutput
|
||||
err := kid.Decode(bytes.NewReader(v))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
outpoint := kid.OutPoint()
|
||||
chanPoint := kid.OriginChanPoint()
|
||||
|
||||
@ -594,29 +526,6 @@ func (ns *nurseryStore) GraduateKinder(height uint32) error {
|
||||
// using graduate-prefixed key.
|
||||
return chanBucket.Put(pfxOutputKey,
|
||||
gradBuffer.Bytes())
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
// FinalizeKinder accepts a block height and a finalized kindergarten sweep
|
||||
// transaction, persisting the transaction at the appropriate height bucket. The
|
||||
// nursery store's last finalized height is also updated with the provided
|
||||
// height.
|
||||
func (ns *nurseryStore) FinalizeKinder(height uint32,
|
||||
finalTx *wire.MsgTx) error {
|
||||
|
||||
return ns.db.Update(func(tx *bbolt.Tx) error {
|
||||
return ns.finalizeKinder(tx, height, finalTx)
|
||||
})
|
||||
}
|
||||
|
||||
// GraduateHeight persists the provided height as the nursery store's last
|
||||
// graduated height.
|
||||
func (ns *nurseryStore) GraduateHeight(height uint32) error {
|
||||
|
||||
return ns.db.Update(func(tx *bbolt.Tx) error {
|
||||
return ns.putLastGraduatedHeight(tx, height)
|
||||
})
|
||||
}
|
||||
|
||||
@ -625,23 +534,15 @@ func (ns *nurseryStore) GraduateHeight(height uint32) error {
|
||||
// FetchClass returns a list of the kindergarten and crib outputs whose timeouts
|
||||
// are expiring
|
||||
func (ns *nurseryStore) FetchClass(
|
||||
height uint32) (*wire.MsgTx, []kidOutput, []babyOutput, error) {
|
||||
height uint32) ([]kidOutput, []babyOutput, error) {
|
||||
|
||||
// Construct list of all crib and kindergarten outputs that need to be
|
||||
// processed at the provided block height.
|
||||
var finalTx *wire.MsgTx
|
||||
var kids []kidOutput
|
||||
var babies []babyOutput
|
||||
if err := ns.db.View(func(tx *bbolt.Tx) error {
|
||||
|
||||
var err error
|
||||
finalTx, err = ns.getFinalizedTxn(tx, height)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Append each crib output to our list of babyOutputs.
|
||||
if err = ns.forEachHeightPrefix(tx, cribPrefix, height,
|
||||
if err := ns.forEachHeightPrefix(tx, cribPrefix, height,
|
||||
func(buf []byte) error {
|
||||
|
||||
// We will attempt to deserialize all outputs
|
||||
@ -683,10 +584,10 @@ func (ns *nurseryStore) FetchClass(
|
||||
})
|
||||
|
||||
}); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return finalTx, kids, babies, nil
|
||||
return kids, babies, nil
|
||||
}
|
||||
|
||||
// FetchPreschools returns a list of all outputs currently stored in the
|
||||
@ -938,32 +839,6 @@ func (ns *nurseryStore) RemoveChannel(chanPoint *wire.OutPoint) error {
|
||||
})
|
||||
}
|
||||
|
||||
// LastFinalizedHeight returns the last block height for which the nursery
|
||||
// store has finalized a kindergarten class.
|
||||
func (ns *nurseryStore) LastFinalizedHeight() (uint32, error) {
|
||||
var lastFinalizedHeight uint32
|
||||
err := ns.db.View(func(tx *bbolt.Tx) error {
|
||||
var err error
|
||||
lastFinalizedHeight, err = ns.getLastFinalizedHeight(tx)
|
||||
return err
|
||||
})
|
||||
|
||||
return lastFinalizedHeight, err
|
||||
}
|
||||
|
||||
// LastGraduatedHeight returns the last block height for which the nursery
|
||||
// store has successfully graduated all outputs.
|
||||
func (ns *nurseryStore) LastGraduatedHeight() (uint32, error) {
|
||||
var lastGraduatedHeight uint32
|
||||
err := ns.db.View(func(tx *bbolt.Tx) error {
|
||||
var err error
|
||||
lastGraduatedHeight, err = ns.getLastGraduatedHeight(tx)
|
||||
return err
|
||||
})
|
||||
|
||||
return lastGraduatedHeight, err
|
||||
}
|
||||
|
||||
// Helper Methods
|
||||
|
||||
// enterCrib accepts a new htlc output that the nursery will incubate through
|
||||
@ -994,6 +869,8 @@ func (ns *nurseryStore) enterCrib(tx *bbolt.Tx, baby *babyOutput) error {
|
||||
|
||||
// Next, retrieve or create the height-channel bucket located in the
|
||||
// height bucket corresponding to the baby output's CLTV expiry height.
|
||||
|
||||
// TODO: Handle late registration.
|
||||
hghtChanBucket, err := ns.createHeightChanBucket(tx,
|
||||
baby.expiry, chanPoint)
|
||||
if err != nil {
|
||||
@ -1332,145 +1209,6 @@ func (ns *nurseryStore) forChanOutputs(tx *bbolt.Tx, chanPoint *wire.OutPoint,
|
||||
return chanBucket.ForEach(callback)
|
||||
}
|
||||
|
||||
// getLastFinalizedHeight is a helper method that retrieves the last height for
|
||||
// which the database finalized its persistent state.
|
||||
func (ns *nurseryStore) getLastFinalizedHeight(tx *bbolt.Tx) (uint32, error) {
|
||||
// Retrieve the chain bucket associated with the given nursery store.
|
||||
chainBucket := tx.Bucket(ns.pfxChainKey)
|
||||
if chainBucket == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Lookup the last finalized height in the top-level chain bucket.
|
||||
heightBytes := chainBucket.Get(lastFinalizedHeightKey)
|
||||
if heightBytes == nil {
|
||||
// We have never finalized, return height 0.
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// If the resulting bytes are not sized like a uint32, then we have
|
||||
// never finalized, so we return 0.
|
||||
|
||||
// Otherwise, parse the bytes and return the last finalized height.
|
||||
return byteOrder.Uint32(heightBytes), nil
|
||||
}
|
||||
|
||||
// finalizeKinder records a finalized kindergarten sweep txn to the given height
|
||||
// bucket. It also updates the nursery store's last finalized height, so that we
|
||||
// do not finalize the same height twice. If the finalized txn is nil, i.e. if
|
||||
// the height has no kindergarten outputs, the height will be marked as
|
||||
// finalized, and we skip the process of writing the txn. When the class is
|
||||
// loaded, a nil value will be returned if no txn has been written to a
|
||||
// finalized height bucket.
|
||||
func (ns *nurseryStore) finalizeKinder(tx *bbolt.Tx, height uint32,
|
||||
finalTx *wire.MsgTx) error {
|
||||
|
||||
// TODO(conner) ensure height is greater that current finalized height.
|
||||
|
||||
// 1. Write the last finalized height to the chain bucket.
|
||||
|
||||
// Ensure that the chain bucket for this nursery store exists.
|
||||
chainBucket, err := tx.CreateBucketIfNotExists(ns.pfxChainKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the provided last-finalized height, and store it in the
|
||||
// top-level chain bucket for this nursery store.
|
||||
var lastHeightBytes [4]byte
|
||||
byteOrder.PutUint32(lastHeightBytes[:], height)
|
||||
|
||||
err = chainBucket.Put(lastFinalizedHeightKey, lastHeightBytes[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 2. Write the finalized txn in the appropriate height bucket.
|
||||
|
||||
// If there is no finalized txn, we have nothing to do.
|
||||
if finalTx == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise serialize the finalized txn and write it to the height
|
||||
// bucket.
|
||||
hghtBucket := ns.getHeightBucket(tx, height)
|
||||
if hghtBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var finalTxnBuf bytes.Buffer
|
||||
if err := finalTx.Serialize(&finalTxnBuf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return hghtBucket.Put(finalizedKndrTxnKey, finalTxnBuf.Bytes())
|
||||
}
|
||||
|
||||
// getFinalizedTxn retrieves the finalized kindergarten sweep txn at the given
|
||||
// height, returning nil if one was not found.
|
||||
func (ns *nurseryStore) getFinalizedTxn(tx *bbolt.Tx,
|
||||
height uint32) (*wire.MsgTx, error) {
|
||||
|
||||
hghtBucket := ns.getHeightBucket(tx, height)
|
||||
if hghtBucket == nil {
|
||||
// No class to finalize.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
finalTxBytes := hghtBucket.Get(finalizedKndrTxnKey)
|
||||
if finalTxBytes == nil {
|
||||
// No finalized txn for this height.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Otherwise, deserialize and return the finalized transaction.
|
||||
txn := &wire.MsgTx{}
|
||||
if err := txn.Deserialize(bytes.NewReader(finalTxBytes)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return txn, nil
|
||||
}
|
||||
|
||||
// getLastGraduatedHeight is a helper method that retrieves the last height for
|
||||
// which the database graduated all outputs successfully.
|
||||
func (ns *nurseryStore) getLastGraduatedHeight(tx *bbolt.Tx) (uint32, error) {
|
||||
// Retrieve the chain bucket associated with the given nursery store.
|
||||
chainBucket := tx.Bucket(ns.pfxChainKey)
|
||||
if chainBucket == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Lookup the last graduated height in the top-level chain bucket.
|
||||
heightBytes := chainBucket.Get(lastGraduatedHeightKey)
|
||||
if heightBytes == nil {
|
||||
// We have never graduated before, return height 0.
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Otherwise, parse the bytes and return the last graduated height.
|
||||
return byteOrder.Uint32(heightBytes), nil
|
||||
}
|
||||
|
||||
// pubLastGraduatedHeight is a helper method that writes the provided height under
|
||||
// the last graduated height key.
|
||||
func (ns *nurseryStore) putLastGraduatedHeight(tx *bbolt.Tx, height uint32) error {
|
||||
|
||||
// Ensure that the chain bucket for this nursery store exists.
|
||||
chainBucket, err := tx.CreateBucketIfNotExists(ns.pfxChainKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Serialize the provided last-graduated height, and store it in the
|
||||
// top-level chain bucket for this nursery store.
|
||||
var lastHeightBytes [4]byte
|
||||
byteOrder.PutUint32(lastHeightBytes[:], height)
|
||||
|
||||
return chainBucket.Put(lastGraduatedHeightKey, lastHeightBytes[:])
|
||||
}
|
||||
|
||||
// errBucketNotEmpty signals that an attempt to prune a particular
|
||||
// bucket failed because it still has active outputs.
|
||||
var errBucketNotEmpty = errors.New("bucket is not empty, cannot be pruned")
|
||||
@ -1541,7 +1279,8 @@ func (ns *nurseryStore) pruneHeight(tx *bbolt.Tx, height uint32) (bool, error) {
|
||||
// attempt to remove each one if they are empty, keeping track of the
|
||||
// number of height-channel buckets that still have active outputs.
|
||||
if err := hghtBucket.ForEach(func(chanBytes, v []byte) error {
|
||||
// Skip the finalized txn key.
|
||||
// Skip the finalized txn key if it still exists from a previous
|
||||
// db version.
|
||||
if v != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -88,8 +88,6 @@ func TestNurseryStoreInit(t *testing.T) {
|
||||
|
||||
assertNumChannels(t, ns, 0)
|
||||
assertNumPreschools(t, ns, 0)
|
||||
assertLastFinalizedHeight(t, ns, 0)
|
||||
assertLastGraduatedHeight(t, ns, 0)
|
||||
}
|
||||
|
||||
// TestNurseryStoreIncubate tests the primary state transitions taken by outputs
|
||||
@ -172,7 +170,7 @@ func TestNurseryStoreIncubate(t *testing.T) {
|
||||
|
||||
// Now, move the commitment output to the kindergarten
|
||||
// bucket.
|
||||
err = ns.PreschoolToKinder(test.commOutput)
|
||||
err = ns.PreschoolToKinder(test.commOutput, 0)
|
||||
if err != test.err {
|
||||
t.Fatalf("unable to move commitment output from "+
|
||||
"pscl to kndr: %v", err)
|
||||
@ -212,7 +210,7 @@ func TestNurseryStoreIncubate(t *testing.T) {
|
||||
maturityHeight := test.commOutput.ConfHeight() +
|
||||
test.commOutput.BlocksToMaturity()
|
||||
|
||||
err = ns.GraduateKinder(maturityHeight)
|
||||
err = ns.GraduateKinder(maturityHeight, test.commOutput)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to graduate kindergarten class at "+
|
||||
"height %d: %v", maturityHeight, err)
|
||||
@ -286,7 +284,8 @@ func TestNurseryStoreIncubate(t *testing.T) {
|
||||
maturityHeight := htlcOutput.ConfHeight() +
|
||||
htlcOutput.BlocksToMaturity()
|
||||
|
||||
err = ns.GraduateKinder(maturityHeight)
|
||||
err = ns.GraduateKinder(maturityHeight,
|
||||
&htlcOutput.kidOutput)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to graduate htlc output "+
|
||||
"from kndr to grad: %v", err)
|
||||
@ -333,93 +332,6 @@ func TestNurseryStoreIncubate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestNurseryStoreFinalize tests that kindergarten sweep transactions are
|
||||
// properly persisted, and that the last finalized height is being set
|
||||
// accordingly.
|
||||
func TestNurseryStoreFinalize(t *testing.T) {
|
||||
cdb, cleanUp, err := makeTestDB()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open channel db: %v", err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
ns, err := newNurseryStore(&bitcoinTestnetGenesis, cdb)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open nursery store: %v", err)
|
||||
}
|
||||
|
||||
kid := &kidOutputs[3]
|
||||
|
||||
// Compute the maturity height at which to enter the commitment output.
|
||||
maturityHeight := kid.ConfHeight() + kid.BlocksToMaturity()
|
||||
|
||||
// Since we haven't finalized before, we should see a last finalized
|
||||
// height of 0.
|
||||
assertLastFinalizedHeight(t, ns, 0)
|
||||
|
||||
// Begin incubating the commitment output, which will be placed in the
|
||||
// preschool bucket.
|
||||
err = ns.Incubate([]kidOutput{*kid}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to incubate commitment output: %v", err)
|
||||
}
|
||||
|
||||
// Then move the commitment output to the kindergarten bucket, so that
|
||||
// the output is registered in the height index.
|
||||
err = ns.PreschoolToKinder(kid)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to move pscl output to kndr: %v", err)
|
||||
}
|
||||
|
||||
// We should still see a last finalized height of 0, since no classes
|
||||
// have been graduated.
|
||||
assertLastFinalizedHeight(t, ns, 0)
|
||||
|
||||
// Now, iteratively finalize all heights below the maturity height,
|
||||
// ensuring that the last finalized height is properly persisted, and
|
||||
// that the finalized transactions are all nil.
|
||||
for i := 0; i < int(maturityHeight); i++ {
|
||||
err = ns.FinalizeKinder(uint32(i), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to finalize kndr at height=%d: %v",
|
||||
i, err)
|
||||
}
|
||||
assertLastFinalizedHeight(t, ns, uint32(i))
|
||||
assertFinalizedTxn(t, ns, uint32(i), nil)
|
||||
}
|
||||
|
||||
// As we have now finalized all heights below the maturity height, we
|
||||
// should still see the commitment output in the kindergarten bucket at
|
||||
// its maturity height.
|
||||
assertKndrAtMaturityHeight(t, ns, kid)
|
||||
|
||||
// Now, finalize the kindergarten sweep transaction at the maturity
|
||||
// height.
|
||||
err = ns.FinalizeKinder(maturityHeight, timeoutTx)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to finalize kndr at height=%d: %v",
|
||||
maturityHeight, err)
|
||||
}
|
||||
|
||||
// The nursery store should now see the maturity height finalized, and
|
||||
// the finalized kindergarten sweep txn should be returned at this
|
||||
// height.
|
||||
assertLastFinalizedHeight(t, ns, maturityHeight)
|
||||
assertFinalizedTxn(t, ns, maturityHeight, timeoutTx)
|
||||
|
||||
// Lastly, continue to finalize heights above the maturity height. Each
|
||||
// should report having a nil finalized kindergarten sweep txn.
|
||||
for i := maturityHeight + 1; i < maturityHeight+10; i++ {
|
||||
err = ns.FinalizeKinder(uint32(i), nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to finalize kndr at height=%d: %v",
|
||||
i, err)
|
||||
}
|
||||
assertLastFinalizedHeight(t, ns, uint32(i))
|
||||
assertFinalizedTxn(t, ns, uint32(i), nil)
|
||||
}
|
||||
}
|
||||
|
||||
// TestNurseryStoreGraduate verifies that the nursery store properly removes
|
||||
// populated entries from the height index as it is purged, and that the last
|
||||
// purged height is set appropriately.
|
||||
@ -441,9 +353,6 @@ func TestNurseryStoreGraduate(t *testing.T) {
|
||||
// height index.
|
||||
maturityHeight := kid.ConfHeight() + kid.BlocksToMaturity()
|
||||
|
||||
// Since we have never purged, the last purged height should be 0.
|
||||
assertLastGraduatedHeight(t, ns, 0)
|
||||
|
||||
// First, add a commitment output to the nursery store, which is
|
||||
// initially inserted in the preschool bucket.
|
||||
err = ns.Incubate([]kidOutput{*kid}, nil)
|
||||
@ -453,7 +362,7 @@ func TestNurseryStoreGraduate(t *testing.T) {
|
||||
|
||||
// Then, move the commitment output to the kindergarten bucket, such
|
||||
// that it resides in the height index at its maturity height.
|
||||
err = ns.PreschoolToKinder(kid)
|
||||
err = ns.PreschoolToKinder(kid, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to move pscl output to kndr: %v", err)
|
||||
}
|
||||
@ -462,12 +371,6 @@ func TestNurseryStoreGraduate(t *testing.T) {
|
||||
// checking that each class is now empty, and that the last purged
|
||||
// height is set correctly.
|
||||
for i := 0; i < int(maturityHeight); i++ {
|
||||
err = ns.GraduateHeight(uint32(i))
|
||||
if err != nil {
|
||||
t.Fatalf("unable to purge height=%d: %v", i, err)
|
||||
}
|
||||
|
||||
assertLastGraduatedHeight(t, ns, uint32(i))
|
||||
assertHeightIsPurged(t, ns, uint32(i))
|
||||
}
|
||||
|
||||
@ -475,27 +378,7 @@ func TestNurseryStoreGraduate(t *testing.T) {
|
||||
// height.
|
||||
assertKndrAtMaturityHeight(t, ns, kid)
|
||||
|
||||
// Finalize the kindergarten transaction, ensuring that it is a non-nil
|
||||
// value.
|
||||
err = ns.FinalizeKinder(maturityHeight, timeoutTx)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to finalize kndr at height=%d: %v",
|
||||
maturityHeight, err)
|
||||
}
|
||||
|
||||
// Verify that the maturity height has now been finalized.
|
||||
assertLastFinalizedHeight(t, ns, maturityHeight)
|
||||
assertFinalizedTxn(t, ns, maturityHeight, timeoutTx)
|
||||
|
||||
// Finally, purge the non-empty maturity height, and check that returned
|
||||
// class is empty.
|
||||
err = ns.GraduateHeight(maturityHeight)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to set graduated height=%d: %v", maturityHeight,
|
||||
err)
|
||||
}
|
||||
|
||||
err = ns.GraduateKinder(maturityHeight)
|
||||
err = ns.GraduateKinder(maturityHeight, kid)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to graduate kindergarten outputs at height=%d: "+
|
||||
"%v", maturityHeight, err)
|
||||
@ -528,36 +411,6 @@ func assertNumChanOutputs(t *testing.T, ns NurseryStore,
|
||||
}
|
||||
}
|
||||
|
||||
// assertLastFinalizedHeight checks that the nursery stores last finalized
|
||||
// height matches the expected height.
|
||||
func assertLastFinalizedHeight(t *testing.T, ns NurseryStore,
|
||||
expected uint32) {
|
||||
|
||||
lfh, err := ns.LastFinalizedHeight()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get last finalized height: %v", err)
|
||||
}
|
||||
|
||||
if lfh != expected {
|
||||
t.Fatalf("expected last finalized height to be %d, got %d",
|
||||
expected, lfh)
|
||||
}
|
||||
}
|
||||
|
||||
// assertLastGraduatedHeight checks that the nursery stores last purged height
|
||||
// matches the expected height.
|
||||
func assertLastGraduatedHeight(t *testing.T, ns NurseryStore, expected uint32) {
|
||||
lgh, err := ns.LastGraduatedHeight()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to get last graduated height: %v", err)
|
||||
}
|
||||
|
||||
if lgh != expected {
|
||||
t.Fatalf("expected last graduated height to be %d, got %d",
|
||||
expected, lgh)
|
||||
}
|
||||
}
|
||||
|
||||
// assertNumPreschools loads all preschool outputs and verifies their count
|
||||
// matches the expected number.
|
||||
func assertNumPreschools(t *testing.T, ns NurseryStore, expected int) {
|
||||
@ -592,16 +445,12 @@ func assertNumChannels(t *testing.T, ns NurseryStore, expected int) {
|
||||
func assertHeightIsPurged(t *testing.T, ns NurseryStore,
|
||||
height uint32) {
|
||||
|
||||
finalTx, kndrOutputs, cribOutputs, err := ns.FetchClass(height)
|
||||
kndrOutputs, cribOutputs, err := ns.FetchClass(height)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to retrieve class at height=%d: %v",
|
||||
height, err)
|
||||
}
|
||||
|
||||
if finalTx != nil {
|
||||
t.Fatalf("height=%d not purged, final txn should be nil", height)
|
||||
}
|
||||
|
||||
if kndrOutputs != nil {
|
||||
t.Fatalf("height=%d not purged, kndr outputs should be nil", height)
|
||||
}
|
||||
@ -617,7 +466,7 @@ func assertCribAtExpiryHeight(t *testing.T, ns NurseryStore,
|
||||
htlcOutput *babyOutput) {
|
||||
|
||||
expiryHeight := htlcOutput.expiry
|
||||
_, _, cribOutputs, err := ns.FetchClass(expiryHeight)
|
||||
_, cribOutputs, err := ns.FetchClass(expiryHeight)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to retrieve class at height=%d: %v",
|
||||
expiryHeight, err)
|
||||
@ -639,7 +488,7 @@ func assertCribNotAtExpiryHeight(t *testing.T, ns NurseryStore,
|
||||
htlcOutput *babyOutput) {
|
||||
|
||||
expiryHeight := htlcOutput.expiry
|
||||
_, _, cribOutputs, err := ns.FetchClass(expiryHeight)
|
||||
_, cribOutputs, err := ns.FetchClass(expiryHeight)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to retrieve class at height %d: %v",
|
||||
expiryHeight, err)
|
||||
@ -653,25 +502,6 @@ func assertCribNotAtExpiryHeight(t *testing.T, ns NurseryStore,
|
||||
}
|
||||
}
|
||||
|
||||
// assertFinalizedTxn loads the class at the given height and compares the
|
||||
// returned finalized txn to that in the class. It is safe to presented a nil
|
||||
// expected transaction.
|
||||
func assertFinalizedTxn(t *testing.T, ns NurseryStore, height uint32,
|
||||
exFinalTx *wire.MsgTx) {
|
||||
|
||||
finalTx, _, _, err := ns.FetchClass(height)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to fetch class at height=%d: %v", height,
|
||||
err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(finalTx, exFinalTx) {
|
||||
t.Fatalf("expected finalized txn at height=%d "+
|
||||
"to be %v, got %v", height, finalTx.TxHash(),
|
||||
exFinalTx.TxHash())
|
||||
}
|
||||
}
|
||||
|
||||
// assertKndrAtMaturityHeight loads the class at the provided height and
|
||||
// verifies that the provided kid output is one of the kindergarten outputs
|
||||
// returned.
|
||||
@ -680,7 +510,7 @@ func assertKndrAtMaturityHeight(t *testing.T, ns NurseryStore,
|
||||
|
||||
maturityHeight := kndrOutput.ConfHeight() +
|
||||
kndrOutput.BlocksToMaturity()
|
||||
_, kndrOutputs, _, err := ns.FetchClass(maturityHeight)
|
||||
kndrOutputs, _, err := ns.FetchClass(maturityHeight)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to retrieve class at height %d: %v",
|
||||
maturityHeight, err)
|
||||
@ -705,7 +535,7 @@ func assertKndrNotAtMaturityHeight(t *testing.T, ns NurseryStore,
|
||||
maturityHeight := kndrOutput.ConfHeight() +
|
||||
kndrOutput.BlocksToMaturity()
|
||||
|
||||
_, kndrOutputs, _, err := ns.FetchClass(maturityHeight)
|
||||
kndrOutputs, _, err := ns.FetchClass(maturityHeight)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to retrieve class at height %d: %v",
|
||||
maturityHeight, err)
|
||||
|
@ -157,7 +157,7 @@ func TestPeerChannelClosureAcceptFeeInitiator(t *testing.T) {
|
||||
dummyDeliveryScript),
|
||||
}
|
||||
|
||||
estimator := lnwallet.StaticFeeEstimator{FeePerKW: 12500}
|
||||
estimator := lnwallet.NewStaticFeeEstimator(12500, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query fee estimator: %v", err)
|
||||
@ -447,7 +447,7 @@ func TestPeerChannelClosureFeeNegotiationsInitiator(t *testing.T) {
|
||||
msg: respShutdown,
|
||||
}
|
||||
|
||||
estimator := lnwallet.StaticFeeEstimator{FeePerKW: 12500}
|
||||
estimator := lnwallet.NewStaticFeeEstimator(12500, 0)
|
||||
initiatorIdealFeeRate, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to query fee estimator: %v", err)
|
||||
|
38
server.go
38
server.go
@ -17,8 +17,6 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/lightningnetwork/lnd/sweep"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/connmgr"
|
||||
@ -40,6 +38,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/nat"
|
||||
"github.com/lightningnetwork/lnd/routing"
|
||||
"github.com/lightningnetwork/lnd/sweep"
|
||||
"github.com/lightningnetwork/lnd/ticker"
|
||||
"github.com/lightningnetwork/lnd/tor"
|
||||
)
|
||||
@ -156,6 +155,8 @@ type server struct {
|
||||
|
||||
utxoNursery *utxoNursery
|
||||
|
||||
sweeper *sweep.UtxoSweeper
|
||||
|
||||
chainArb *contractcourt.ChainArbitrator
|
||||
|
||||
sphinx *htlcswitch.OnionProcessor
|
||||
@ -597,24 +598,45 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sweeper := sweep.New(&sweep.UtxoSweeperConfig{
|
||||
srvrLog.Tracef("Sweeper batch window duration: %v",
|
||||
sweep.DefaultBatchWindowDuration)
|
||||
|
||||
sweeperStore, err := sweep.NewSweeperStore(
|
||||
chanDB, activeNetParams.GenesisHash,
|
||||
)
|
||||
if err != nil {
|
||||
srvrLog.Errorf("unable to create sweeper store: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.sweeper = sweep.New(&sweep.UtxoSweeperConfig{
|
||||
Estimator: cc.feeEstimator,
|
||||
GenSweepScript: func() ([]byte, error) {
|
||||
return newSweepPkScript(cc.wallet)
|
||||
},
|
||||
Signer: cc.wallet.Cfg.Signer,
|
||||
PublishTransaction: cc.wallet.PublishTransaction,
|
||||
NewBatchTimer: func() <-chan time.Time {
|
||||
return time.NewTimer(sweep.DefaultBatchWindowDuration).C
|
||||
},
|
||||
SweepTxConfTarget: 6,
|
||||
Notifier: cc.chainNotifier,
|
||||
ChainIO: cc.chainIO,
|
||||
Store: sweeperStore,
|
||||
MaxInputsPerTx: sweep.DefaultMaxInputsPerTx,
|
||||
MaxSweepAttempts: sweep.DefaultMaxSweepAttempts,
|
||||
NextAttemptDeltaFunc: sweep.DefaultNextAttemptDeltaFunc,
|
||||
})
|
||||
|
||||
s.utxoNursery = newUtxoNursery(&NurseryConfig{
|
||||
ChainIO: cc.chainIO,
|
||||
ConfDepth: 1,
|
||||
SweepTxConfTarget: 6,
|
||||
FetchClosedChannels: chanDB.FetchClosedChannels,
|
||||
FetchClosedChannel: chanDB.FetchClosedChannel,
|
||||
Notifier: cc.chainNotifier,
|
||||
PublishTransaction: cc.wallet.PublishTransaction,
|
||||
Store: utxnStore,
|
||||
Sweeper: sweeper,
|
||||
Sweeper: s.sweeper,
|
||||
})
|
||||
|
||||
// Construct a closure that wraps the htlcswitch's CloseLink method.
|
||||
@ -706,7 +728,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
|
||||
DisableChannel: func(op wire.OutPoint) error {
|
||||
return s.announceChanStatus(op, true)
|
||||
},
|
||||
Sweeper: sweeper,
|
||||
Sweeper: s.sweeper,
|
||||
}, chanDB)
|
||||
|
||||
s.breachArbiter = newBreachArbiter(&BreachConfig{
|
||||
@ -963,6 +985,9 @@ func (s *server) Start() error {
|
||||
if err := s.htlcSwitch.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.sweeper.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.utxoNursery.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1050,6 +1075,7 @@ func (s *server) Stop() error {
|
||||
s.breachArbiter.Stop()
|
||||
s.authGossiper.Stop()
|
||||
s.chainArb.Stop()
|
||||
s.sweeper.Stop()
|
||||
s.cc.wallet.Shutdown()
|
||||
s.cc.chainView.Stop()
|
||||
s.connMgr.Stop()
|
||||
|
110
sweep/backend_mock_test.go
Normal file
110
sweep/backend_mock_test.go
Normal file
@ -0,0 +1,110 @@
|
||||
package sweep
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
)
|
||||
|
||||
// mockBackend simulates a chain backend for realistic behaviour in unit tests
|
||||
// around double spends.
|
||||
type mockBackend struct {
|
||||
lock sync.Mutex
|
||||
|
||||
notifier *MockNotifier
|
||||
|
||||
confirmedSpendInputs map[wire.OutPoint]struct{}
|
||||
|
||||
unconfirmedTxes map[chainhash.Hash]*wire.MsgTx
|
||||
unconfirmedSpendInputs map[wire.OutPoint]struct{}
|
||||
}
|
||||
|
||||
func newMockBackend(notifier *MockNotifier) *mockBackend {
|
||||
return &mockBackend{
|
||||
notifier: notifier,
|
||||
unconfirmedTxes: make(map[chainhash.Hash]*wire.MsgTx),
|
||||
confirmedSpendInputs: make(map[wire.OutPoint]struct{}),
|
||||
unconfirmedSpendInputs: make(map[wire.OutPoint]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *mockBackend) publishTransaction(tx *wire.MsgTx) error {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
txHash := tx.TxHash()
|
||||
if _, ok := b.unconfirmedTxes[txHash]; ok {
|
||||
// Tx already exists
|
||||
testLog.Tracef("mockBackend duplicate tx %v", tx.TxHash())
|
||||
return lnwallet.ErrDoubleSpend
|
||||
}
|
||||
|
||||
for _, in := range tx.TxIn {
|
||||
if _, ok := b.unconfirmedSpendInputs[in.PreviousOutPoint]; ok {
|
||||
// Double spend
|
||||
testLog.Tracef("mockBackend double spend tx %v", tx.TxHash())
|
||||
return lnwallet.ErrDoubleSpend
|
||||
}
|
||||
|
||||
if _, ok := b.confirmedSpendInputs[in.PreviousOutPoint]; ok {
|
||||
// Already included in block
|
||||
testLog.Tracef("mockBackend already in block tx %v", tx.TxHash())
|
||||
return lnwallet.ErrDoubleSpend
|
||||
}
|
||||
}
|
||||
|
||||
b.unconfirmedTxes[txHash] = tx
|
||||
for _, in := range tx.TxIn {
|
||||
b.unconfirmedSpendInputs[in.PreviousOutPoint] = struct{}{}
|
||||
}
|
||||
|
||||
testLog.Tracef("mockBackend publish tx %v", tx.TxHash())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *mockBackend) deleteUnconfirmed(txHash chainhash.Hash) {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
tx, ok := b.unconfirmedTxes[txHash]
|
||||
if !ok {
|
||||
// Tx already exists
|
||||
testLog.Errorf("mockBackend delete tx not existing %v", txHash)
|
||||
return
|
||||
}
|
||||
|
||||
testLog.Tracef("mockBackend delete tx %v", tx.TxHash())
|
||||
delete(b.unconfirmedTxes, txHash)
|
||||
for _, in := range tx.TxIn {
|
||||
delete(b.unconfirmedSpendInputs, in.PreviousOutPoint)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *mockBackend) mine() {
|
||||
b.lock.Lock()
|
||||
defer b.lock.Unlock()
|
||||
|
||||
notifications := make(map[wire.OutPoint]*wire.MsgTx)
|
||||
for _, tx := range b.unconfirmedTxes {
|
||||
testLog.Tracef("mockBackend mining tx %v", tx.TxHash())
|
||||
for _, in := range tx.TxIn {
|
||||
b.confirmedSpendInputs[in.PreviousOutPoint] = struct{}{}
|
||||
notifications[in.PreviousOutPoint] = tx
|
||||
}
|
||||
}
|
||||
b.unconfirmedSpendInputs = make(map[wire.OutPoint]struct{})
|
||||
b.unconfirmedTxes = make(map[chainhash.Hash]*wire.MsgTx)
|
||||
|
||||
for outpoint, tx := range notifications {
|
||||
testLog.Tracef("mockBackend delivering spend ntfn for %v",
|
||||
outpoint)
|
||||
b.notifier.SpendOutpoint(outpoint, *tx)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *mockBackend) isDone() bool {
|
||||
return len(b.unconfirmedTxes) == 0
|
||||
}
|
14
sweep/defaults.go
Normal file
14
sweep/defaults.go
Normal file
@ -0,0 +1,14 @@
|
||||
// +build !rpctest
|
||||
|
||||
package sweep
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultBatchWindowDuration specifies duration of the sweep batch
|
||||
// window. The sweep is held back during the batch window to allow more
|
||||
// inputs to be added and thereby lower the fee per input.
|
||||
DefaultBatchWindowDuration = 30 * time.Second
|
||||
)
|
17
sweep/defaults_rpctest.go
Normal file
17
sweep/defaults_rpctest.go
Normal file
@ -0,0 +1,17 @@
|
||||
// +build rpctest
|
||||
|
||||
package sweep
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultBatchWindowDuration specifies duration of the sweep batch
|
||||
// window. The sweep is held back during the batch window to allow more
|
||||
// inputs to be added and thereby lower the fee per input.
|
||||
//
|
||||
// To speed up integration tests waiting for a sweep to happen, the
|
||||
// batch window is shortened.
|
||||
DefaultBatchWindowDuration = 2 * time.Second
|
||||
)
|
62
sweep/fee_estimator_mock_test.go
Normal file
62
sweep/fee_estimator_mock_test.go
Normal file
@ -0,0 +1,62 @@
|
||||
package sweep
|
||||
|
||||
import (
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// mockFeeEstimator implements a mock fee estimator. It closely resembles
|
||||
// lnwallet.StaticFeeEstimator with the addition that fees can be changed for
|
||||
// testing purposes in a thread safe manner.
|
||||
type mockFeeEstimator struct {
|
||||
feePerKW lnwallet.SatPerKWeight
|
||||
|
||||
relayFee lnwallet.SatPerKWeight
|
||||
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func newMockFeeEstimator(feePerKW,
|
||||
relayFee lnwallet.SatPerKWeight) *mockFeeEstimator {
|
||||
|
||||
return &mockFeeEstimator{
|
||||
feePerKW: feePerKW,
|
||||
relayFee: relayFee,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *mockFeeEstimator) updateFees(feePerKW,
|
||||
relayFee lnwallet.SatPerKWeight) {
|
||||
|
||||
e.lock.Lock()
|
||||
defer e.lock.Unlock()
|
||||
|
||||
e.feePerKW = feePerKW
|
||||
e.relayFee = relayFee
|
||||
}
|
||||
|
||||
func (e *mockFeeEstimator) EstimateFeePerKW(numBlocks uint32) (
|
||||
lnwallet.SatPerKWeight, error) {
|
||||
|
||||
e.lock.Lock()
|
||||
defer e.lock.Unlock()
|
||||
|
||||
return e.feePerKW, nil
|
||||
}
|
||||
|
||||
func (e *mockFeeEstimator) RelayFeePerKW() lnwallet.SatPerKWeight {
|
||||
e.lock.Lock()
|
||||
defer e.lock.Unlock()
|
||||
|
||||
return e.relayFee
|
||||
}
|
||||
|
||||
func (e *mockFeeEstimator) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *mockFeeEstimator) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ lnwallet.FeeEstimator = (*mockFeeEstimator)(nil)
|
@ -33,12 +33,17 @@ type Input interface {
|
||||
// the output can be spent. For non-CSV locked inputs this is always
|
||||
// zero.
|
||||
BlocksToMaturity() uint32
|
||||
|
||||
// HeightHint returns the minimum height at which a confirmed spending
|
||||
// tx can occur.
|
||||
HeightHint() uint32
|
||||
}
|
||||
|
||||
type inputKit struct {
|
||||
outpoint wire.OutPoint
|
||||
witnessType lnwallet.WitnessType
|
||||
signDesc lnwallet.SignDescriptor
|
||||
heightHint uint32
|
||||
}
|
||||
|
||||
// OutPoint returns the breached output's identifier that is to be included as
|
||||
@ -59,6 +64,12 @@ func (i *inputKit) SignDesc() *lnwallet.SignDescriptor {
|
||||
return &i.signDesc
|
||||
}
|
||||
|
||||
// HeightHint returns the minimum height at which a confirmed spending
|
||||
// tx can occur.
|
||||
func (i *inputKit) HeightHint() uint32 {
|
||||
return i.heightHint
|
||||
}
|
||||
|
||||
// BaseInput contains all the information needed to sweep a basic output
|
||||
// (CSV/CLTV/no time lock)
|
||||
type BaseInput struct {
|
||||
@ -68,13 +79,14 @@ type BaseInput struct {
|
||||
// MakeBaseInput assembles a new BaseInput that can be used to construct a
|
||||
// sweep transaction.
|
||||
func MakeBaseInput(outpoint *wire.OutPoint, witnessType lnwallet.WitnessType,
|
||||
signDescriptor *lnwallet.SignDescriptor) BaseInput {
|
||||
signDescriptor *lnwallet.SignDescriptor, heightHint uint32) BaseInput {
|
||||
|
||||
return BaseInput{
|
||||
inputKit{
|
||||
outpoint: *outpoint,
|
||||
witnessType: witnessType,
|
||||
signDesc: *signDescriptor,
|
||||
heightHint: heightHint,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -113,13 +125,14 @@ type HtlcSucceedInput struct {
|
||||
// construct a sweep transaction.
|
||||
func MakeHtlcSucceedInput(outpoint *wire.OutPoint,
|
||||
signDescriptor *lnwallet.SignDescriptor,
|
||||
preimage []byte) HtlcSucceedInput {
|
||||
preimage []byte, heightHint uint32) HtlcSucceedInput {
|
||||
|
||||
return HtlcSucceedInput{
|
||||
inputKit: inputKit{
|
||||
outpoint: *outpoint,
|
||||
witnessType: lnwallet.HtlcAcceptedRemoteSuccess,
|
||||
signDesc: *signDescriptor,
|
||||
heightHint: heightHint,
|
||||
},
|
||||
preimage: preimage,
|
||||
}
|
||||
|
247
sweep/store.go
Normal file
247
sweep/store.go
Normal file
@ -0,0 +1,247 @@
|
||||
package sweep
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/coreos/bbolt"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
)
|
||||
|
||||
var (
|
||||
// lastTxBucketKey is the key that points to a bucket containing a
|
||||
// single item storing the last published tx.
|
||||
//
|
||||
// maps: lastTxKey -> serialized_tx
|
||||
lastTxBucketKey = []byte("sweeper-last-tx")
|
||||
|
||||
// lastTxKey is the fixed key under which the serialized tx is stored.
|
||||
lastTxKey = []byte("last-tx")
|
||||
|
||||
// txHashesBucketKey is the key that points to a bucket containing the
|
||||
// hashes of all sweep txes that were published successfully.
|
||||
//
|
||||
// maps: txHash -> empty slice
|
||||
txHashesBucketKey = []byte("sweeper-tx-hashes")
|
||||
|
||||
// utxnChainPrefix is the bucket prefix for nursery buckets.
|
||||
utxnChainPrefix = []byte("utxn")
|
||||
|
||||
// utxnHeightIndexKey is the sub bucket where the nursery stores the
|
||||
// height index.
|
||||
utxnHeightIndexKey = []byte("height-index")
|
||||
|
||||
// utxnFinalizedKndrTxnKey is a static key that can be used to locate
|
||||
// the nursery finalized kindergarten sweep txn.
|
||||
utxnFinalizedKndrTxnKey = []byte("finalized-kndr-txn")
|
||||
|
||||
byteOrder = binary.BigEndian
|
||||
)
|
||||
|
||||
// SweeperStore stores published txes.
|
||||
type SweeperStore interface {
|
||||
// IsOurTx determines whether a tx is published by us, based on its
|
||||
// hash.
|
||||
IsOurTx(hash chainhash.Hash) (bool, error)
|
||||
|
||||
// NotifyPublishTx signals that we are about to publish a tx.
|
||||
NotifyPublishTx(*wire.MsgTx) error
|
||||
|
||||
// GetLastPublishedTx returns the last tx that we called NotifyPublishTx
|
||||
// for.
|
||||
GetLastPublishedTx() (*wire.MsgTx, error)
|
||||
}
|
||||
|
||||
type sweeperStore struct {
|
||||
db *channeldb.DB
|
||||
}
|
||||
|
||||
// NewSweeperStore returns a new store instance.
|
||||
func NewSweeperStore(db *channeldb.DB, chainHash *chainhash.Hash) (
|
||||
SweeperStore, error) {
|
||||
|
||||
err := db.Update(func(tx *bbolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists(
|
||||
lastTxBucketKey,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tx.Bucket(txHashesBucketKey) != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
txHashesBucket, err := tx.CreateBucket(txHashesBucketKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Use non-existence of tx hashes bucket as a signal to migrate
|
||||
// nursery finalized txes.
|
||||
err = migrateTxHashes(tx, txHashesBucket, chainHash)
|
||||
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &sweeperStore{
|
||||
db: db,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// migrateTxHashes migrates nursery finalized txes to the tx hashes bucket. This
|
||||
// is not implemented as a database migration, to keep the downgrade path open.
|
||||
func migrateTxHashes(tx *bbolt.Tx, txHashesBucket *bbolt.Bucket,
|
||||
chainHash *chainhash.Hash) error {
|
||||
|
||||
log.Infof("Migrating UTXO nursery finalized TXIDs")
|
||||
|
||||
// Compose chain bucket key.
|
||||
var b bytes.Buffer
|
||||
if _, err := b.Write(utxnChainPrefix); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := b.Write(chainHash[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get chain bucket if exists.
|
||||
chainBucket := tx.Bucket(b.Bytes())
|
||||
if chainBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the existing height index.
|
||||
hghtIndex := chainBucket.Bucket(utxnHeightIndexKey)
|
||||
if hghtIndex == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve all heights.
|
||||
err := hghtIndex.ForEach(func(k, v []byte) error {
|
||||
heightBucket := hghtIndex.Bucket(k)
|
||||
if heightBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get finalized tx for height.
|
||||
txBytes := heightBucket.Get(utxnFinalizedKndrTxnKey)
|
||||
if txBytes == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Deserialize and skip tx if it cannot be deserialized.
|
||||
tx := &wire.MsgTx{}
|
||||
err := tx.Deserialize(bytes.NewReader(txBytes))
|
||||
if err != nil {
|
||||
log.Warnf("Cannot deserialize utxn tx")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Calculate hash.
|
||||
hash := tx.TxHash()
|
||||
|
||||
// Insert utxn tx hash in hashes bucket.
|
||||
log.Debugf("Inserting nursery tx %v in hash list "+
|
||||
"(height=%v)", hash, byteOrder.Uint32(k))
|
||||
|
||||
return txHashesBucket.Put(hash[:], []byte{})
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NotifyPublishTx signals that we are about to publish a tx.
|
||||
func (s *sweeperStore) NotifyPublishTx(sweepTx *wire.MsgTx) error {
|
||||
return s.db.Update(func(tx *bbolt.Tx) error {
|
||||
lastTxBucket := tx.Bucket(lastTxBucketKey)
|
||||
if lastTxBucket == nil {
|
||||
return errors.New("last tx bucket does not exist")
|
||||
}
|
||||
|
||||
txHashesBucket := tx.Bucket(txHashesBucketKey)
|
||||
if txHashesBucket == nil {
|
||||
return errors.New("tx hashes bucket does not exist")
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
if err := sweepTx.Serialize(&b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := lastTxBucket.Put(lastTxKey, b.Bytes()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hash := sweepTx.TxHash()
|
||||
|
||||
return txHashesBucket.Put(hash[:], []byte{})
|
||||
})
|
||||
}
|
||||
|
||||
// GetLastPublishedTx returns the last tx that we called NotifyPublishTx
|
||||
// for.
|
||||
func (s *sweeperStore) GetLastPublishedTx() (*wire.MsgTx, error) {
|
||||
var sweepTx *wire.MsgTx
|
||||
|
||||
err := s.db.View(func(tx *bbolt.Tx) error {
|
||||
lastTxBucket := tx.Bucket(lastTxBucketKey)
|
||||
if lastTxBucket == nil {
|
||||
return errors.New("last tx bucket does not exist")
|
||||
}
|
||||
|
||||
sweepTxRaw := lastTxBucket.Get(lastTxKey)
|
||||
if sweepTxRaw == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
sweepTx = &wire.MsgTx{}
|
||||
txReader := bytes.NewReader(sweepTxRaw)
|
||||
if err := sweepTx.Deserialize(txReader); err != nil {
|
||||
return fmt.Errorf("tx deserialize: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sweepTx, nil
|
||||
}
|
||||
|
||||
// IsOurTx determines whether a tx is published by us, based on its
|
||||
// hash.
|
||||
func (s *sweeperStore) IsOurTx(hash chainhash.Hash) (bool, error) {
|
||||
var ours bool
|
||||
|
||||
err := s.db.View(func(tx *bbolt.Tx) error {
|
||||
txHashesBucket := tx.Bucket(txHashesBucketKey)
|
||||
if txHashesBucket == nil {
|
||||
return errors.New("tx hashes bucket does not exist")
|
||||
}
|
||||
|
||||
ours = txHashesBucket.Get(hash[:]) != nil
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return ours, nil
|
||||
}
|
||||
|
||||
// Compile-time constraint to ensure sweeperStore implements SweeperStore.
|
||||
var _ SweeperStore = (*sweeperStore)(nil)
|
45
sweep/store_mock.go
Normal file
45
sweep/store_mock.go
Normal file
@ -0,0 +1,45 @@
|
||||
package sweep
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
)
|
||||
|
||||
// MockSweeperStore is a mock implementation of sweeper store. This type is
|
||||
// exported, because it is currently used in nursery tests too.
|
||||
type MockSweeperStore struct {
|
||||
lastTx *wire.MsgTx
|
||||
ourTxes map[chainhash.Hash]struct{}
|
||||
}
|
||||
|
||||
// NewMockSweeperStore returns a new instance.
|
||||
func NewMockSweeperStore() *MockSweeperStore {
|
||||
return &MockSweeperStore{
|
||||
ourTxes: make(map[chainhash.Hash]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// IsOurTx determines whether a tx is published by us, based on its
|
||||
// hash.
|
||||
func (s *MockSweeperStore) IsOurTx(hash chainhash.Hash) (bool, error) {
|
||||
_, ok := s.ourTxes[hash]
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
// NotifyPublishTx signals that we are about to publish a tx.
|
||||
func (s *MockSweeperStore) NotifyPublishTx(tx *wire.MsgTx) error {
|
||||
txHash := tx.TxHash()
|
||||
s.ourTxes[txHash] = struct{}{}
|
||||
s.lastTx = tx
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetLastPublishedTx returns the last tx that we called NotifyPublishTx
|
||||
// for.
|
||||
func (s *MockSweeperStore) GetLastPublishedTx() (*wire.MsgTx, error) {
|
||||
return s.lastTx, nil
|
||||
}
|
||||
|
||||
// Compile-time constraint to ensure MockSweeperStore implements SweeperStore.
|
||||
var _ SweeperStore = (*MockSweeperStore)(nil)
|
153
sweep/store_test.go
Normal file
153
sweep/store_test.go
Normal file
@ -0,0 +1,153 @@
|
||||
package sweep
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
)
|
||||
|
||||
// makeTestDB creates a new instance of the ChannelDB for testing purposes. A
|
||||
// callback which cleans up the created temporary directories is also returned
|
||||
// and intended to be executed after the test completes.
|
||||
func makeTestDB() (*channeldb.DB, func(), error) {
|
||||
// First, create a temporary directory to be used for the duration of
|
||||
// this test.
|
||||
tempDirName, err := ioutil.TempDir("", "channeldb")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Next, create channeldb for the first time.
|
||||
cdb, err := channeldb.Open(tempDirName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cleanUp := func() {
|
||||
cdb.Close()
|
||||
os.RemoveAll(tempDirName)
|
||||
}
|
||||
|
||||
return cdb, cleanUp, nil
|
||||
}
|
||||
|
||||
// TestStore asserts that the store persists the presented data to disk and is
|
||||
// able to retrieve it again.
|
||||
func TestStore(t *testing.T) {
|
||||
t.Run("bolt", func(t *testing.T) {
|
||||
|
||||
// Create new store.
|
||||
cdb, cleanUp, err := makeTestDB()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to open channel db: %v", err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testStore(t, func() (SweeperStore, error) {
|
||||
var chain chainhash.Hash
|
||||
return NewSweeperStore(cdb, &chain)
|
||||
})
|
||||
})
|
||||
t.Run("mock", func(t *testing.T) {
|
||||
store := NewMockSweeperStore()
|
||||
|
||||
testStore(t, func() (SweeperStore, error) {
|
||||
// Return same store, because the mock has no real
|
||||
// persistence.
|
||||
return store, nil
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func testStore(t *testing.T, createStore func() (SweeperStore, error)) {
|
||||
store, err := createStore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Initially we expect the store not to have a last published tx.
|
||||
retrievedTx, err := store.GetLastPublishedTx()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if retrievedTx != nil {
|
||||
t.Fatal("expected no last published tx")
|
||||
}
|
||||
|
||||
// Notify publication of tx1
|
||||
tx1 := wire.MsgTx{}
|
||||
tx1.AddTxIn(&wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Index: 1,
|
||||
},
|
||||
})
|
||||
|
||||
err = store.NotifyPublishTx(&tx1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Notify publication of tx2
|
||||
tx2 := wire.MsgTx{}
|
||||
tx2.AddTxIn(&wire.TxIn{
|
||||
PreviousOutPoint: wire.OutPoint{
|
||||
Index: 2,
|
||||
},
|
||||
})
|
||||
|
||||
err = store.NotifyPublishTx(&tx2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Recreate the sweeper store
|
||||
store, err = createStore()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Assert that last published tx2 is present.
|
||||
retrievedTx, err = store.GetLastPublishedTx()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if tx2.TxHash() != retrievedTx.TxHash() {
|
||||
t.Fatal("txes do not match")
|
||||
}
|
||||
|
||||
// Assert that both txes are recognized as our own.
|
||||
ours, err := store.IsOurTx(tx1.TxHash())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ours {
|
||||
t.Fatal("expected tx to be ours")
|
||||
}
|
||||
|
||||
ours, err = store.IsOurTx(tx2.TxHash())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !ours {
|
||||
t.Fatal("expected tx to be ours")
|
||||
}
|
||||
|
||||
// An different hash should be reported on as not being ours.
|
||||
var unknownHash chainhash.Hash
|
||||
ours, err = store.IsOurTx(unknownHash)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ours {
|
||||
t.Fatal("expected tx to be not ours")
|
||||
}
|
||||
}
|
893
sweep/sweeper.go
893
sweep/sweeper.go
@ -1,24 +1,88 @@
|
||||
package sweep
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcd/blockchain"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
)
|
||||
|
||||
// UtxoSweeper provides the functionality to generate sweep txes. The plan is
|
||||
// to extend UtxoSweeper in the future to also manage the actual sweeping
|
||||
// process by itself.
|
||||
var (
|
||||
// ErrRemoteSpend is returned in case an output that we try to sweep is
|
||||
// confirmed in a tx of the remote party.
|
||||
ErrRemoteSpend = errors.New("remote party swept utxo")
|
||||
|
||||
// ErrTooManyAttempts is returned in case sweeping an output has failed
|
||||
// for the configured max number of attempts.
|
||||
ErrTooManyAttempts = errors.New("sweep failed after max attempts")
|
||||
|
||||
// DefaultMaxSweepAttempts specifies the default maximum number of times
|
||||
// an input is included in a publish attempt before giving up and
|
||||
// returning an error to the caller.
|
||||
DefaultMaxSweepAttempts = 10
|
||||
)
|
||||
|
||||
// pendingInput is created when an input reaches the main loop for the first
|
||||
// time. It tracks all relevant state that is needed for sweeping.
|
||||
type pendingInput struct {
|
||||
// listeners is a list of channels over which the final outcome of the
|
||||
// sweep needs to be broadcasted.
|
||||
listeners []chan Result
|
||||
|
||||
// input is the original struct that contains the input and sign
|
||||
// descriptor.
|
||||
input Input
|
||||
|
||||
// ntfnRegCancel is populated with a function that cancels the chain
|
||||
// notifier spend registration.
|
||||
ntfnRegCancel func()
|
||||
|
||||
// minPublishHeight indicates the minimum block height at which this
|
||||
// input may be (re)published.
|
||||
minPublishHeight int32
|
||||
|
||||
// publishAttempts records the number of attempts that have already been
|
||||
// made to sweep this tx.
|
||||
publishAttempts int
|
||||
}
|
||||
|
||||
// UtxoSweeper is responsible for sweeping outputs back into the wallet
|
||||
type UtxoSweeper struct {
|
||||
started uint32 // To be used atomically.
|
||||
stopped uint32 // To be used atomically.
|
||||
|
||||
cfg *UtxoSweeperConfig
|
||||
|
||||
newInputs chan *sweepInputMessage
|
||||
spendChan chan *chainntnfs.SpendDetail
|
||||
|
||||
pendingInputs map[wire.OutPoint]*pendingInput
|
||||
|
||||
// timer is the channel that signals expiry of the sweep batch timer.
|
||||
timer <-chan time.Time
|
||||
|
||||
testSpendChan chan wire.OutPoint
|
||||
|
||||
currentOutputScript []byte
|
||||
|
||||
relayFeePerKW lnwallet.SatPerKWeight
|
||||
|
||||
quit chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// UtxoSweeperConfig contains dependencies of UtxoSweeper.
|
||||
type UtxoSweeperConfig struct {
|
||||
// GenSweepScript generates a P2WKH script belonging to the wallet
|
||||
// where funds can be swept.
|
||||
// GenSweepScript generates a P2WKH script belonging to the wallet where
|
||||
// funds can be swept.
|
||||
GenSweepScript func() ([]byte, error)
|
||||
|
||||
// Estimator is used when crafting sweep transactions to estimate the
|
||||
@ -26,18 +90,651 @@ type UtxoSweeperConfig struct {
|
||||
// transaction.
|
||||
Estimator lnwallet.FeeEstimator
|
||||
|
||||
// PublishTransaction facilitates the process of broadcasting a signed
|
||||
// transaction to the appropriate network.
|
||||
PublishTransaction func(*wire.MsgTx) error
|
||||
|
||||
// NewBatchTimer creates a channel that will be sent on when a certain
|
||||
// time window has passed. During this time window, new inputs can still
|
||||
// be added to the sweep tx that is about to be generated.
|
||||
NewBatchTimer func() <-chan time.Time
|
||||
|
||||
// Notifier is an instance of a chain notifier we'll use to watch for
|
||||
// certain on-chain events.
|
||||
Notifier chainntnfs.ChainNotifier
|
||||
|
||||
// ChainIO is used to determine the current block height.
|
||||
ChainIO lnwallet.BlockChainIO
|
||||
|
||||
// Store stores the published sweeper txes.
|
||||
Store SweeperStore
|
||||
|
||||
// Signer is used by the sweeper to generate valid witnesses at the
|
||||
// time the incubated outputs need to be spent.
|
||||
Signer lnwallet.Signer
|
||||
|
||||
// SweepTxConfTarget assigns a confirmation target for sweep txes on
|
||||
// which the fee calculation will be based.
|
||||
SweepTxConfTarget uint32
|
||||
|
||||
// MaxInputsPerTx specifies the default maximum number of inputs allowed
|
||||
// in a single sweep tx. If more need to be swept, multiple txes are
|
||||
// created and published.
|
||||
MaxInputsPerTx int
|
||||
|
||||
// MaxSweepAttempts specifies the maximum number of times an input is
|
||||
// included in a publish attempt before giving up and returning an error
|
||||
// to the caller.
|
||||
MaxSweepAttempts int
|
||||
|
||||
// NextAttemptDeltaFunc returns given the number of already attempted
|
||||
// sweeps, how many blocks to wait before retrying to sweep.
|
||||
NextAttemptDeltaFunc func(int) int32
|
||||
}
|
||||
|
||||
// New returns a new UtxoSweeper instance.
|
||||
// Result is the struct that is pushed through the result channel. Callers can
|
||||
// use this to be informed of the final sweep result. In case of a remote
|
||||
// spend, Err will be ErrRemoteSpend.
|
||||
type Result struct {
|
||||
// Err is the final result of the sweep. It is nil when the input is
|
||||
// swept successfully by us. ErrRemoteSpend is returned when another
|
||||
// party took the input.
|
||||
Err error
|
||||
|
||||
// Tx is the transaction that spent the input.
|
||||
Tx *wire.MsgTx
|
||||
}
|
||||
|
||||
// sweepInputMessage structs are used in the internal channel between the
|
||||
// SweepInput call and the sweeper main loop.
|
||||
type sweepInputMessage struct {
|
||||
input Input
|
||||
resultChan chan Result
|
||||
}
|
||||
|
||||
// New returns a new Sweeper instance.
|
||||
func New(cfg *UtxoSweeperConfig) *UtxoSweeper {
|
||||
|
||||
return &UtxoSweeper{
|
||||
cfg: cfg,
|
||||
newInputs: make(chan *sweepInputMessage),
|
||||
spendChan: make(chan *chainntnfs.SpendDetail),
|
||||
quit: make(chan struct{}),
|
||||
pendingInputs: make(map[wire.OutPoint]*pendingInput),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the process of constructing and publish sweep txes.
|
||||
func (s *UtxoSweeper) Start() error {
|
||||
if !atomic.CompareAndSwapUint32(&s.started, 0, 1) {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Tracef("Sweeper starting")
|
||||
|
||||
// Retrieve last published tx from database.
|
||||
lastTx, err := s.cfg.Store.GetLastPublishedTx()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get last published tx: %v", err)
|
||||
}
|
||||
|
||||
// Republish in case the previous call crashed lnd. We don't care about
|
||||
// the return value, because inputs will be re-offered and retried
|
||||
// anyway. The only reason we republish here is to prevent the corner
|
||||
// case where lnd goes into a restart loop because of a crashing publish
|
||||
// tx where we keep deriving new output script. By publishing and
|
||||
// possibly crashing already now, we haven't derived a new output script
|
||||
// yet.
|
||||
if lastTx != nil {
|
||||
log.Debugf("Publishing last tx %v", lastTx.TxHash())
|
||||
|
||||
// Error can be ignored. Because we are starting up, there are
|
||||
// no pending inputs to update based on the publish result.
|
||||
err := s.cfg.PublishTransaction(lastTx)
|
||||
if err != nil && err != lnwallet.ErrDoubleSpend {
|
||||
log.Errorf("last tx publish: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve relay fee for dust limit calculation. Assume that this will
|
||||
// not change from here on.
|
||||
s.relayFeePerKW = s.cfg.Estimator.RelayFeePerKW()
|
||||
|
||||
// Register for block epochs to retry sweeping every block.
|
||||
bestHash, bestHeight, err := s.cfg.ChainIO.GetBestBlock()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get best block: %v", err)
|
||||
}
|
||||
|
||||
log.Debugf("Best height: %v", bestHeight)
|
||||
|
||||
blockEpochs, err := s.cfg.Notifier.RegisterBlockEpochNtfn(
|
||||
&chainntnfs.BlockEpoch{
|
||||
Height: bestHeight,
|
||||
Hash: bestHash,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("register block epoch ntfn: %v", err)
|
||||
}
|
||||
|
||||
// Start sweeper main loop.
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer blockEpochs.Cancel()
|
||||
defer s.wg.Done()
|
||||
|
||||
err := s.collector(blockEpochs.Epochs, bestHeight)
|
||||
if err != nil {
|
||||
log.Errorf("sweeper stopped: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops sweeper from listening to block epochs and constructing sweep
|
||||
// txes.
|
||||
func (s *UtxoSweeper) Stop() error {
|
||||
if !atomic.CompareAndSwapUint32(&s.stopped, 0, 1) {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debugf("Sweeper shutting down")
|
||||
|
||||
close(s.quit)
|
||||
s.wg.Wait()
|
||||
|
||||
log.Debugf("Sweeper shut down")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SweepInput sweeps inputs back into the wallet. The inputs will be batched and
|
||||
// swept after the batch time window ends.
|
||||
//
|
||||
// NOTE: Extreme care needs to be taken that input isn't changed externally.
|
||||
// Because it is an interface and we don't know what is exactly behind it, we
|
||||
// cannot make a local copy in sweeper.
|
||||
func (s *UtxoSweeper) SweepInput(input Input) (chan Result, error) {
|
||||
if input == nil || input.OutPoint() == nil || input.SignDesc() == nil {
|
||||
return nil, errors.New("nil input received")
|
||||
}
|
||||
|
||||
log.Infof("Sweep request received: out_point=%v, witness_type=%v, "+
|
||||
"time_lock=%v, size=%v", input.OutPoint(), input.WitnessType(),
|
||||
input.BlocksToMaturity(),
|
||||
btcutil.Amount(input.SignDesc().Output.Value))
|
||||
|
||||
sweeperInput := &sweepInputMessage{
|
||||
input: input,
|
||||
resultChan: make(chan Result, 1),
|
||||
}
|
||||
|
||||
// Deliver input to main event loop.
|
||||
select {
|
||||
case s.newInputs <- sweeperInput:
|
||||
case <-s.quit:
|
||||
return nil, fmt.Errorf("sweeper shutting down")
|
||||
}
|
||||
|
||||
return sweeperInput.resultChan, nil
|
||||
}
|
||||
|
||||
// collector is the sweeper main loop. It processes new inputs, spend
|
||||
// notifications and counts down to publication of the sweep tx.
|
||||
func (s *UtxoSweeper) collector(blockEpochs <-chan *chainntnfs.BlockEpoch,
|
||||
bestHeight int32) error {
|
||||
|
||||
for {
|
||||
select {
|
||||
// A new inputs is offered to the sweeper. We check to see if we
|
||||
// are already trying to sweep this input and if not, set up a
|
||||
// listener for spend and schedule a sweep.
|
||||
case input := <-s.newInputs:
|
||||
outpoint := *input.input.OutPoint()
|
||||
pendInput, pending := s.pendingInputs[outpoint]
|
||||
if pending {
|
||||
log.Debugf("Already pending input %v received",
|
||||
outpoint)
|
||||
|
||||
// Add additional result channel to signal
|
||||
// spend of this input.
|
||||
pendInput.listeners = append(
|
||||
pendInput.listeners, input.resultChan,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
// Create a new pendingInput and initialize the
|
||||
// listeners slice with the passed in result channel. If
|
||||
// this input is offered for sweep again, the result
|
||||
// channel will be appended to this slice.
|
||||
pendInput = &pendingInput{
|
||||
listeners: []chan Result{input.resultChan},
|
||||
input: input.input,
|
||||
minPublishHeight: bestHeight,
|
||||
}
|
||||
s.pendingInputs[outpoint] = pendInput
|
||||
|
||||
// Start watching for spend of this input, either by us
|
||||
// or the remote party.
|
||||
cancel, err := s.waitForSpend(
|
||||
outpoint,
|
||||
input.input.SignDesc().Output.PkScript,
|
||||
input.input.HeightHint(),
|
||||
)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("wait for spend: %v", err)
|
||||
s.signalAndRemove(&outpoint, Result{Err: err})
|
||||
continue
|
||||
}
|
||||
pendInput.ntfnRegCancel = cancel
|
||||
|
||||
// Check to see if with this new input a sweep tx can be
|
||||
// formed.
|
||||
if err := s.scheduleSweep(bestHeight); err != nil {
|
||||
log.Errorf("schedule sweep: %v", err)
|
||||
}
|
||||
|
||||
// A spend of one of our inputs is detected. Signal sweep
|
||||
// results to the caller(s).
|
||||
case spend := <-s.spendChan:
|
||||
// For testing purposes.
|
||||
if s.testSpendChan != nil {
|
||||
s.testSpendChan <- *spend.SpentOutPoint
|
||||
}
|
||||
|
||||
// Query store to find out if we every published this
|
||||
// tx.
|
||||
spendHash := *spend.SpenderTxHash
|
||||
isOurTx, err := s.cfg.Store.IsOurTx(spendHash)
|
||||
if err != nil {
|
||||
log.Errorf("cannot determine if tx %v "+
|
||||
"is ours: %v", spendHash, err,
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Detected spend related to in flight inputs "+
|
||||
"(is_ours=%v): %v",
|
||||
newLogClosure(func() string {
|
||||
return spew.Sdump(spend.SpendingTx)
|
||||
}), isOurTx,
|
||||
)
|
||||
|
||||
// Signal sweep results for inputs in this confirmed
|
||||
// tx.
|
||||
for _, txIn := range spend.SpendingTx.TxIn {
|
||||
outpoint := txIn.PreviousOutPoint
|
||||
|
||||
// Check if this input is known to us. It could
|
||||
// probably be unknown if we canceled the
|
||||
// registration, deleted from pendingInputs but
|
||||
// the ntfn was in-flight already. Or this could
|
||||
// be not one of our inputs.
|
||||
_, ok := s.pendingInputs[outpoint]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// Return either a nil or a remote spend result.
|
||||
var err error
|
||||
if !isOurTx {
|
||||
err = ErrRemoteSpend
|
||||
}
|
||||
|
||||
// Signal result channels.
|
||||
s.signalAndRemove(&outpoint, Result{
|
||||
Tx: spend.SpendingTx,
|
||||
Err: err,
|
||||
})
|
||||
}
|
||||
|
||||
// Now that an input of ours is spent, we can try to
|
||||
// resweep the remaining inputs.
|
||||
if err := s.scheduleSweep(bestHeight); err != nil {
|
||||
log.Errorf("schedule sweep: %v", err)
|
||||
}
|
||||
|
||||
// The timer expires and we are going to (re)sweep.
|
||||
case <-s.timer:
|
||||
log.Debugf("Sweep timer expired")
|
||||
|
||||
// Set timer to nil so we know that a new timer needs to
|
||||
// be started when new inputs arrive.
|
||||
s.timer = nil
|
||||
|
||||
// Retrieve fee estimate for input filtering and final
|
||||
// tx fee calculation.
|
||||
satPerKW, err := s.cfg.Estimator.EstimateFeePerKW(
|
||||
s.cfg.SweepTxConfTarget,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("estimate fee: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Examine pending inputs and try to construct lists of
|
||||
// inputs.
|
||||
inputLists, err := s.getInputLists(bestHeight, satPerKW)
|
||||
if err != nil {
|
||||
log.Errorf("get input lists: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Sweep selected inputs.
|
||||
for _, inputs := range inputLists {
|
||||
err := s.sweep(inputs, satPerKW, bestHeight)
|
||||
if err != nil {
|
||||
log.Errorf("sweep: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// A new block comes in. Things may have changed, so we retry a
|
||||
// sweep.
|
||||
case epoch, ok := <-blockEpochs:
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
bestHeight = epoch.Height
|
||||
|
||||
log.Debugf("New blocks: height=%v, sha=%v",
|
||||
epoch.Height, epoch.Hash)
|
||||
|
||||
if err := s.scheduleSweep(bestHeight); err != nil {
|
||||
log.Errorf("schedule sweep: %v", err)
|
||||
}
|
||||
|
||||
case <-s.quit:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scheduleSweep starts the sweep timer to create an opportunity for more inputs
|
||||
// to be added.
|
||||
func (s *UtxoSweeper) scheduleSweep(currentHeight int32) error {
|
||||
// The timer is already ticking, no action needed for the sweep to
|
||||
// happen.
|
||||
if s.timer != nil {
|
||||
log.Debugf("Timer still ticking")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve fee estimate for input filtering and final tx fee
|
||||
// calculation.
|
||||
satPerKW, err := s.cfg.Estimator.EstimateFeePerKW(
|
||||
s.cfg.SweepTxConfTarget,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("estimate fee: %v", err)
|
||||
}
|
||||
|
||||
// Examine pending inputs and try to construct lists of inputs.
|
||||
inputLists, err := s.getInputLists(currentHeight, satPerKW)
|
||||
if err != nil {
|
||||
return fmt.Errorf("get input lists: %v", err)
|
||||
}
|
||||
|
||||
log.Infof("Sweep candidates at height=%v, yield %v distinct txns",
|
||||
currentHeight, len(inputLists))
|
||||
|
||||
// If there are no input sets, there is nothing sweepable and we can
|
||||
// return without starting the timer.
|
||||
if len(inputLists) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Start sweep timer to create opportunity for more inputs to be added
|
||||
// before a tx is constructed.
|
||||
s.timer = s.cfg.NewBatchTimer()
|
||||
|
||||
log.Debugf("Sweep timer started")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// signalAndRemove notifies the listeners of the final result of the input
|
||||
// sweep. It cancels any pending spend notification and removes the input from
|
||||
// the list of pending inputs. When this function returns, the sweeper has
|
||||
// completely forgotten about the input.
|
||||
func (s *UtxoSweeper) signalAndRemove(outpoint *wire.OutPoint, result Result) {
|
||||
pendInput := s.pendingInputs[*outpoint]
|
||||
listeners := pendInput.listeners
|
||||
|
||||
if result.Err == nil {
|
||||
log.Debugf("Dispatching sweep success for %v to %v listeners",
|
||||
outpoint, len(listeners),
|
||||
)
|
||||
} else {
|
||||
log.Debugf("Dispatching sweep error for %v to %v listeners: %v",
|
||||
outpoint, len(listeners), result.Err,
|
||||
)
|
||||
}
|
||||
|
||||
// Signal all listeners. Channel is buffered. Because we only send once
|
||||
// on every channel, it should never block.
|
||||
for _, resultChan := range listeners {
|
||||
resultChan <- result
|
||||
}
|
||||
|
||||
// Cancel spend notification with chain notifier. This is not necessary
|
||||
// in case of a success, except for that a reorg could still happen.
|
||||
if pendInput.ntfnRegCancel != nil {
|
||||
log.Debugf("Canceling spend ntfn for %v", outpoint)
|
||||
|
||||
pendInput.ntfnRegCancel()
|
||||
}
|
||||
|
||||
// Inputs are no longer pending after result has been sent.
|
||||
delete(s.pendingInputs, *outpoint)
|
||||
}
|
||||
|
||||
// getInputLists goes through all pending inputs and constructs sweep lists,
|
||||
// each up to the configured maximum number of inputs. Negative yield inputs are
|
||||
// skipped. Transactions with an output below the dust limit are not published.
|
||||
// Those inputs remain pending and will be bundled with future inputs if
|
||||
// possible.
|
||||
func (s *UtxoSweeper) getInputLists(currentHeight int32,
|
||||
satPerKW lnwallet.SatPerKWeight) ([]inputSet, error) {
|
||||
|
||||
// Filter for inputs that need to be swept. Create two lists: all
|
||||
// sweepable inputs and a list containing only the new, never tried
|
||||
// inputs.
|
||||
//
|
||||
// We want to create as large a tx as possible, so we return a final set
|
||||
// list that starts with sets created from all inputs. However, there is
|
||||
// a chance that those txes will not publish, because they already
|
||||
// contain inputs that failed before. Therefore we also add sets
|
||||
// consisting of only new inputs to the list, to make sure that new
|
||||
// inputs are given a good, isolated chance of being published.
|
||||
var newInputs, retryInputs []Input
|
||||
for _, input := range s.pendingInputs {
|
||||
// Skip inputs that have a minimum publish height that is not
|
||||
// yet reached.
|
||||
if input.minPublishHeight > currentHeight {
|
||||
continue
|
||||
}
|
||||
|
||||
// Add input to the either one of the lists.
|
||||
if input.publishAttempts == 0 {
|
||||
newInputs = append(newInputs, input.input)
|
||||
} else {
|
||||
retryInputs = append(retryInputs, input.input)
|
||||
}
|
||||
}
|
||||
|
||||
// If there is anything to retry, combine it with the new inputs and
|
||||
// form input sets.
|
||||
var allSets []inputSet
|
||||
if len(retryInputs) > 0 {
|
||||
var err error
|
||||
allSets, err = generateInputPartitionings(
|
||||
append(retryInputs, newInputs...),
|
||||
s.relayFeePerKW, satPerKW,
|
||||
s.cfg.MaxInputsPerTx,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("input partitionings: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create sets for just the new inputs.
|
||||
newSets, err := generateInputPartitionings(
|
||||
newInputs,
|
||||
s.relayFeePerKW, satPerKW,
|
||||
s.cfg.MaxInputsPerTx,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("input partitionings: %v", err)
|
||||
}
|
||||
|
||||
log.Debugf("Sweep candidates at height=%v: total_num_pending=%v, "+
|
||||
"total_num_new=%v", currentHeight, len(allSets), len(newSets))
|
||||
|
||||
// Append the new sets at the end of the list, because those tx likely
|
||||
// have a higher fee per input.
|
||||
return append(allSets, newSets...), nil
|
||||
}
|
||||
|
||||
// sweep takes a set of preselected inputs, creates a sweep tx and publishes the
|
||||
// tx. The output address is only marked as used if the publish succeeds.
|
||||
func (s *UtxoSweeper) sweep(inputs inputSet,
|
||||
satPerKW lnwallet.SatPerKWeight, currentHeight int32) error {
|
||||
|
||||
var err error
|
||||
|
||||
// Generate output script if no unused script available.
|
||||
if s.currentOutputScript == nil {
|
||||
s.currentOutputScript, err = s.cfg.GenSweepScript()
|
||||
if err != nil {
|
||||
return fmt.Errorf("gen sweep script: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create sweep tx.
|
||||
tx, err := createSweepTx(
|
||||
inputs, s.currentOutputScript,
|
||||
uint32(currentHeight), satPerKW, s.cfg.Signer,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create sweep tx: %v", err)
|
||||
}
|
||||
|
||||
// Add tx before publication, so that we will always know that a spend
|
||||
// by this tx is ours. Otherwise if the publish doesn't return, but did
|
||||
// publish, we loose track of this tx. Even republication on startup
|
||||
// doesn't prevent this, because that call returns a double spend error
|
||||
// then and would also not add the hash to the store.
|
||||
err = s.cfg.Store.NotifyPublishTx(tx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("notify publish tx: %v", err)
|
||||
}
|
||||
|
||||
// Publish sweep tx.
|
||||
log.Debugf("Publishing sweep tx %v, num_inputs=%v, height=%v",
|
||||
tx.TxHash(), len(tx.TxIn), currentHeight)
|
||||
|
||||
log.Tracef("Sweep tx at height=%v: %v", currentHeight,
|
||||
newLogClosure(func() string {
|
||||
return spew.Sdump(tx)
|
||||
}),
|
||||
)
|
||||
|
||||
err = s.cfg.PublishTransaction(tx)
|
||||
|
||||
// In case of an unexpected error, don't try to recover.
|
||||
if err != nil && err != lnwallet.ErrDoubleSpend {
|
||||
return fmt.Errorf("publish tx: %v", err)
|
||||
}
|
||||
|
||||
// Keep outputScript in case of an error, so that it can be reused for
|
||||
// the next tx and causes no address inflation.
|
||||
if err == nil {
|
||||
s.currentOutputScript = nil
|
||||
}
|
||||
|
||||
// Reschedule sweep.
|
||||
for _, input := range tx.TxIn {
|
||||
pi, ok := s.pendingInputs[input.PreviousOutPoint]
|
||||
if !ok {
|
||||
// It can be that the input has been removed because it
|
||||
// exceed the maximum number of attempts in a previous
|
||||
// input set.
|
||||
continue
|
||||
}
|
||||
|
||||
// Record another publish attempt.
|
||||
pi.publishAttempts++
|
||||
|
||||
// We don't care what the result of the publish call was. Even
|
||||
// if it is published successfully, it can still be that it
|
||||
// needs to be retried. Call NextAttemptDeltaFunc to calculate
|
||||
// when to resweep this input.
|
||||
nextAttemptDelta := s.cfg.NextAttemptDeltaFunc(
|
||||
pi.publishAttempts,
|
||||
)
|
||||
|
||||
pi.minPublishHeight = currentHeight + nextAttemptDelta
|
||||
|
||||
log.Debugf("Rescheduling input %v after %v attempts at "+
|
||||
"height %v (delta %v)", input.PreviousOutPoint,
|
||||
pi.publishAttempts, pi.minPublishHeight,
|
||||
nextAttemptDelta)
|
||||
|
||||
if pi.publishAttempts >= s.cfg.MaxSweepAttempts {
|
||||
// Signal result channels sweep result.
|
||||
s.signalAndRemove(&input.PreviousOutPoint, Result{
|
||||
Err: ErrTooManyAttempts,
|
||||
})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForSpend registers a spend notification with the chain notifier. It
|
||||
// returns a cancel function that can be used to cancel the registration.
|
||||
func (s *UtxoSweeper) waitForSpend(outpoint wire.OutPoint,
|
||||
script []byte, heightHint uint32) (func(), error) {
|
||||
|
||||
log.Debugf("Wait for spend of %v", outpoint)
|
||||
|
||||
spendEvent, err := s.cfg.Notifier.RegisterSpendNtfn(
|
||||
&outpoint, script, heightHint,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("register spend ntfn: %v", err)
|
||||
}
|
||||
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
select {
|
||||
case spend, ok := <-spendEvent.Spend:
|
||||
if !ok {
|
||||
log.Debugf("Spend ntfn for %v canceled",
|
||||
outpoint)
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("Delivering spend ntfn for %v",
|
||||
outpoint)
|
||||
select {
|
||||
case s.spendChan <- spend:
|
||||
log.Debugf("Delivered spend ntfn for %v",
|
||||
outpoint)
|
||||
|
||||
case <-s.quit:
|
||||
}
|
||||
case <-s.quit:
|
||||
}
|
||||
}()
|
||||
|
||||
return spendEvent.Cancel, nil
|
||||
}
|
||||
|
||||
// CreateSweepTx accepts a list of inputs and signs and generates a txn that
|
||||
// spends from them. This method also makes an accurate fee estimate before
|
||||
// generating the required witnesses.
|
||||
@ -56,179 +753,31 @@ func New(cfg *UtxoSweeperConfig) *UtxoSweeper {
|
||||
func (s *UtxoSweeper) CreateSweepTx(inputs []Input, confTarget uint32,
|
||||
currentBlockHeight uint32) (*wire.MsgTx, error) {
|
||||
|
||||
feePerKw, err := s.cfg.Estimator.EstimateFeePerKW(confTarget)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Generate the receiving script to which the funds will be swept.
|
||||
pkScript, err := s.cfg.GenSweepScript()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Using the txn weight estimate, compute the required txn fee.
|
||||
feePerKw, err := s.cfg.Estimator.EstimateFeePerKW(confTarget)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
inputs, txWeight, csvCount, cltvCount := s.getWeightEstimate(inputs)
|
||||
log.Infof("Creating sweep transaction for %v inputs (%v CSV, %v CLTV) "+
|
||||
"using %v sat/kw", len(inputs), csvCount, cltvCount,
|
||||
int64(feePerKw))
|
||||
|
||||
txFee := feePerKw.FeeForWeight(txWeight)
|
||||
|
||||
// Sum up the total value contained in the inputs.
|
||||
var totalSum btcutil.Amount
|
||||
for _, o := range inputs {
|
||||
totalSum += btcutil.Amount(o.SignDesc().Output.Value)
|
||||
}
|
||||
|
||||
// Sweep as much possible, after subtracting txn fees.
|
||||
sweepAmt := int64(totalSum - txFee)
|
||||
|
||||
// Create the sweep transaction that we will be building. We use
|
||||
// version 2 as it is required for CSV. The txn will sweep the amount
|
||||
// after fees to the pkscript generated above.
|
||||
sweepTx := wire.NewMsgTx(2)
|
||||
sweepTx.AddTxOut(&wire.TxOut{
|
||||
PkScript: pkScript,
|
||||
Value: sweepAmt,
|
||||
})
|
||||
|
||||
sweepTx.LockTime = currentBlockHeight
|
||||
|
||||
// Add all inputs to the sweep transaction. Ensure that for each
|
||||
// csvInput, we set the sequence number properly.
|
||||
for _, input := range inputs {
|
||||
sweepTx.AddTxIn(&wire.TxIn{
|
||||
PreviousOutPoint: *input.OutPoint(),
|
||||
Sequence: input.BlocksToMaturity(),
|
||||
})
|
||||
}
|
||||
|
||||
// Before signing the transaction, check to ensure that it meets some
|
||||
// basic validity requirements.
|
||||
//
|
||||
// TODO(conner): add more control to sanity checks, allowing us to
|
||||
// delay spending "problem" outputs, e.g. possibly batching with other
|
||||
// classes if fees are too low.
|
||||
btx := btcutil.NewTx(sweepTx)
|
||||
if err := blockchain.CheckTransactionSanity(btx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hashCache := txscript.NewTxSigHashes(sweepTx)
|
||||
|
||||
// With all the inputs in place, use each output's unique witness
|
||||
// function to generate the final witness required for spending.
|
||||
addWitness := func(idx int, tso Input) error {
|
||||
witness, err := tso.BuildWitness(
|
||||
s.cfg.Signer, sweepTx, hashCache, idx,
|
||||
return createSweepTx(
|
||||
inputs, pkScript, currentBlockHeight, feePerKw, s.cfg.Signer,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sweepTx.TxIn[idx].Witness = witness
|
||||
|
||||
return nil
|
||||
// DefaultNextAttemptDeltaFunc is the default calculation for next sweep attempt
|
||||
// scheduling. It implements exponential back-off with some randomness. This is
|
||||
// to prevent a stuck tx (for example because fee is too low and can't be bumped
|
||||
// in btcd) from blocking all other retried inputs in the same tx.
|
||||
func DefaultNextAttemptDeltaFunc(attempts int) int32 {
|
||||
return 1 + rand.Int31n(1<<uint(attempts-1))
|
||||
}
|
||||
|
||||
// Finally we'll attach a valid witness to each csv and cltv input
|
||||
// within the sweeping transaction.
|
||||
for i, input := range inputs {
|
||||
if err := addWitness(i, input); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return sweepTx, nil
|
||||
}
|
||||
|
||||
// getWeightEstimate returns a weight estimate for the given inputs.
|
||||
// Additionally, it returns counts for the number of csv and cltv inputs.
|
||||
func (s *UtxoSweeper) getWeightEstimate(inputs []Input) ([]Input, int64, int, int) {
|
||||
// We initialize a weight estimator so we can accurately asses the
|
||||
// amount of fees we need to pay for this sweep transaction.
|
||||
//
|
||||
// TODO(roasbeef): can be more intelligent about buffering outputs to
|
||||
// be more efficient on-chain.
|
||||
var weightEstimate lnwallet.TxWeightEstimator
|
||||
|
||||
// Our sweep transaction will pay to a single segwit p2wkh address,
|
||||
// ensure it contributes to our weight estimate.
|
||||
weightEstimate.AddP2WKHOutput()
|
||||
|
||||
// For each output, use its witness type to determine the estimate
|
||||
// weight of its witness, and add it to the proper set of spendable
|
||||
// outputs.
|
||||
var (
|
||||
sweepInputs []Input
|
||||
csvCount, cltvCount int
|
||||
)
|
||||
for i := range inputs {
|
||||
input := inputs[i]
|
||||
|
||||
switch input.WitnessType() {
|
||||
|
||||
// Outputs on a remote commitment transaction that pay directly
|
||||
// to us.
|
||||
case lnwallet.CommitmentNoDelay:
|
||||
weightEstimate.AddP2WKHInput()
|
||||
sweepInputs = append(sweepInputs, input)
|
||||
|
||||
// Outputs on a past commitment transaction that pay directly
|
||||
// to us.
|
||||
case lnwallet.CommitmentTimeLock:
|
||||
weightEstimate.AddWitnessInput(
|
||||
lnwallet.ToLocalTimeoutWitnessSize,
|
||||
)
|
||||
sweepInputs = append(sweepInputs, input)
|
||||
csvCount++
|
||||
|
||||
// Outgoing second layer HTLC's that have confirmed within the
|
||||
// chain, and the output they produced is now mature enough to
|
||||
// sweep.
|
||||
case lnwallet.HtlcOfferedTimeoutSecondLevel:
|
||||
weightEstimate.AddWitnessInput(
|
||||
lnwallet.ToLocalTimeoutWitnessSize,
|
||||
)
|
||||
sweepInputs = append(sweepInputs, input)
|
||||
csvCount++
|
||||
|
||||
// Incoming second layer HTLC's that have confirmed within the
|
||||
// chain, and the output they produced is now mature enough to
|
||||
// sweep.
|
||||
case lnwallet.HtlcAcceptedSuccessSecondLevel:
|
||||
weightEstimate.AddWitnessInput(
|
||||
lnwallet.ToLocalTimeoutWitnessSize,
|
||||
)
|
||||
sweepInputs = append(sweepInputs, input)
|
||||
csvCount++
|
||||
|
||||
// An HTLC on the commitment transaction of the remote party,
|
||||
// that has had its absolute timelock expire.
|
||||
case lnwallet.HtlcOfferedRemoteTimeout:
|
||||
weightEstimate.AddWitnessInput(
|
||||
lnwallet.AcceptedHtlcTimeoutWitnessSize,
|
||||
)
|
||||
sweepInputs = append(sweepInputs, input)
|
||||
cltvCount++
|
||||
|
||||
// An HTLC on the commitment transaction of the remote party,
|
||||
// that can be swept with the preimage.
|
||||
case lnwallet.HtlcAcceptedRemoteSuccess:
|
||||
weightEstimate.AddWitnessInput(
|
||||
lnwallet.OfferedHtlcSuccessWitnessSize,
|
||||
)
|
||||
sweepInputs = append(sweepInputs, input)
|
||||
|
||||
default:
|
||||
log.Warnf("kindergarten output in nursery store "+
|
||||
"contains unexpected witness type: %v",
|
||||
input.WitnessType())
|
||||
}
|
||||
}
|
||||
|
||||
txWeight := int64(weightEstimate.Weight())
|
||||
|
||||
return sweepInputs, txWeight, csvCount, cltvCount
|
||||
// init initializes the random generator for random input rescheduling.
|
||||
func init() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
}
|
||||
|
902
sweep/sweeper_test.go
Normal file
902
sweep/sweeper_test.go
Normal file
@ -0,0 +1,902 @@
|
||||
package sweep
|
||||
|
||||
import (
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"runtime/pprof"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/build"
|
||||
"github.com/lightningnetwork/lnd/keychain"
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
)
|
||||
|
||||
var (
|
||||
testLog = build.NewSubLogger("SWPR_TEST", nil)
|
||||
|
||||
testMaxSweepAttempts = 3
|
||||
|
||||
testMaxInputsPerTx = 3
|
||||
)
|
||||
|
||||
type sweeperTestContext struct {
|
||||
t *testing.T
|
||||
|
||||
sweeper *UtxoSweeper
|
||||
notifier *MockNotifier
|
||||
estimator *mockFeeEstimator
|
||||
backend *mockBackend
|
||||
store *MockSweeperStore
|
||||
|
||||
timeoutChan chan chan time.Time
|
||||
publishChan chan wire.MsgTx
|
||||
}
|
||||
|
||||
var (
|
||||
spendableInputs []*BaseInput
|
||||
testInputCount int
|
||||
|
||||
testPubKey, _ = btcec.ParsePubKey([]byte{
|
||||
0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
||||
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
||||
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
||||
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
|
||||
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
||||
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
||||
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
||||
0xb4, 0x12, 0xa3,
|
||||
}, btcec.S256())
|
||||
)
|
||||
|
||||
func createTestInput(value int64, witnessType lnwallet.WitnessType) BaseInput {
|
||||
hash := chainhash.Hash{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
byte(testInputCount)}
|
||||
|
||||
input := MakeBaseInput(
|
||||
&wire.OutPoint{
|
||||
Hash: hash,
|
||||
},
|
||||
witnessType,
|
||||
&lnwallet.SignDescriptor{
|
||||
Output: &wire.TxOut{
|
||||
Value: value,
|
||||
},
|
||||
KeyDesc: keychain.KeyDescriptor{
|
||||
PubKey: testPubKey,
|
||||
},
|
||||
},
|
||||
0,
|
||||
)
|
||||
|
||||
testInputCount++
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Create a set of test spendable inputs.
|
||||
for i := 0; i < 5; i++ {
|
||||
input := createTestInput(int64(10000+i*500),
|
||||
lnwallet.CommitmentTimeLock)
|
||||
|
||||
spendableInputs = append(spendableInputs, &input)
|
||||
}
|
||||
}
|
||||
|
||||
func createSweeperTestContext(t *testing.T) *sweeperTestContext {
|
||||
notifier := NewMockNotifier(t)
|
||||
|
||||
store := NewMockSweeperStore()
|
||||
|
||||
backend := newMockBackend(notifier)
|
||||
|
||||
estimator := newMockFeeEstimator(10000, 1000)
|
||||
|
||||
publishChan := make(chan wire.MsgTx, 2)
|
||||
ctx := &sweeperTestContext{
|
||||
notifier: notifier,
|
||||
publishChan: publishChan,
|
||||
t: t,
|
||||
estimator: estimator,
|
||||
backend: backend,
|
||||
store: store,
|
||||
timeoutChan: make(chan chan time.Time, 1),
|
||||
}
|
||||
|
||||
var outputScriptCount byte
|
||||
ctx.sweeper = New(&UtxoSweeperConfig{
|
||||
Notifier: notifier,
|
||||
PublishTransaction: func(tx *wire.MsgTx) error {
|
||||
log.Tracef("Publishing tx %v", tx.TxHash())
|
||||
err := backend.publishTransaction(tx)
|
||||
select {
|
||||
case publishChan <- *tx:
|
||||
case <-time.After(defaultTestTimeout):
|
||||
t.Fatalf("unexpected tx published")
|
||||
}
|
||||
return err
|
||||
},
|
||||
NewBatchTimer: func() <-chan time.Time {
|
||||
c := make(chan time.Time, 1)
|
||||
ctx.timeoutChan <- c
|
||||
return c
|
||||
},
|
||||
Store: store,
|
||||
Signer: &mockSigner{},
|
||||
SweepTxConfTarget: 1,
|
||||
ChainIO: &mockChainIO{},
|
||||
GenSweepScript: func() ([]byte, error) {
|
||||
script := []byte{outputScriptCount}
|
||||
outputScriptCount++
|
||||
return script, nil
|
||||
},
|
||||
Estimator: estimator,
|
||||
MaxInputsPerTx: testMaxInputsPerTx,
|
||||
MaxSweepAttempts: testMaxSweepAttempts,
|
||||
NextAttemptDeltaFunc: func(attempts int) int32 {
|
||||
// Use delta func without random factor.
|
||||
return 1 << uint(attempts-1)
|
||||
},
|
||||
})
|
||||
|
||||
ctx.sweeper.Start()
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (ctx *sweeperTestContext) tick() {
|
||||
testLog.Trace("Waiting for tick to be consumed")
|
||||
select {
|
||||
case c := <-ctx.timeoutChan:
|
||||
select {
|
||||
case c <- time.Time{}:
|
||||
testLog.Trace("Tick")
|
||||
case <-time.After(defaultTestTimeout):
|
||||
debug.PrintStack()
|
||||
ctx.t.Fatal("tick timeout - tick not consumed")
|
||||
}
|
||||
case <-time.After(defaultTestTimeout):
|
||||
debug.PrintStack()
|
||||
ctx.t.Fatal("tick timeout - no new timer created")
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *sweeperTestContext) assertNoNewTimer() {
|
||||
select {
|
||||
case <-ctx.timeoutChan:
|
||||
ctx.t.Fatal("no new timer expected")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *sweeperTestContext) finish(expectedGoroutineCount int) {
|
||||
// We assume that when finish is called, sweeper has finished all its
|
||||
// goroutines. This implies that the waitgroup is empty.
|
||||
signalChan := make(chan struct{})
|
||||
go func() {
|
||||
ctx.sweeper.wg.Wait()
|
||||
close(signalChan)
|
||||
}()
|
||||
|
||||
// Simulate exits of the expected number of running goroutines.
|
||||
for i := 0; i < expectedGoroutineCount; i++ {
|
||||
ctx.sweeper.wg.Done()
|
||||
}
|
||||
|
||||
// We now expect the Wait to succeed.
|
||||
select {
|
||||
case <-signalChan:
|
||||
case <-time.After(time.Second):
|
||||
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
|
||||
ctx.t.Fatalf("lingering goroutines detected after test " +
|
||||
"is finished")
|
||||
}
|
||||
|
||||
// Restore waitgroup state to what it was before.
|
||||
ctx.sweeper.wg.Add(expectedGoroutineCount)
|
||||
|
||||
// Stop sweeper.
|
||||
ctx.sweeper.Stop()
|
||||
|
||||
// We should have consumed and asserted all published transactions in
|
||||
// our unit tests.
|
||||
ctx.assertNoTx()
|
||||
ctx.assertNoNewTimer()
|
||||
if !ctx.backend.isDone() {
|
||||
ctx.t.Fatal("unconfirmed txes remaining")
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *sweeperTestContext) assertNoTx() {
|
||||
ctx.t.Helper()
|
||||
select {
|
||||
case <-ctx.publishChan:
|
||||
ctx.t.Fatalf("unexpected transactions published")
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *sweeperTestContext) receiveTx() wire.MsgTx {
|
||||
ctx.t.Helper()
|
||||
var tx wire.MsgTx
|
||||
select {
|
||||
case tx = <-ctx.publishChan:
|
||||
return tx
|
||||
case <-time.After(5 * time.Second):
|
||||
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
|
||||
ctx.t.Fatalf("tx not published")
|
||||
}
|
||||
return tx
|
||||
}
|
||||
|
||||
func (ctx *sweeperTestContext) expectResult(c chan Result, expected error) {
|
||||
ctx.t.Helper()
|
||||
select {
|
||||
case result := <-c:
|
||||
if result.Err != expected {
|
||||
ctx.t.Fatalf("expected %v result, but got %v",
|
||||
expected, result.Err,
|
||||
)
|
||||
}
|
||||
case <-time.After(defaultTestTimeout):
|
||||
ctx.t.Fatalf("no result received")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSuccess tests the sweeper happy flow.
|
||||
func TestSuccess(t *testing.T) {
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
resultChan, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx.tick()
|
||||
|
||||
sweepTx := ctx.receiveTx()
|
||||
|
||||
ctx.backend.mine()
|
||||
|
||||
select {
|
||||
case result := <-resultChan:
|
||||
if result.Err != nil {
|
||||
t.Fatalf("expected successful spend, but received "+
|
||||
"error %v instead", result.Err)
|
||||
}
|
||||
if result.Tx.TxHash() != sweepTx.TxHash() {
|
||||
t.Fatalf("expected sweep tx ")
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("no result received")
|
||||
}
|
||||
|
||||
ctx.finish(1)
|
||||
|
||||
// Assert that last tx is stored in the database so we can republish
|
||||
// on restart.
|
||||
lastTx, err := ctx.store.GetLastPublishedTx()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if lastTx == nil || sweepTx.TxHash() != lastTx.TxHash() {
|
||||
t.Fatalf("last tx not stored")
|
||||
}
|
||||
}
|
||||
|
||||
// TestDust asserts that inputs that are not big enough to raise above the dust
|
||||
// limit, are held back until the total set does surpass the limit.
|
||||
func TestDust(t *testing.T) {
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
// Sweeping a single output produces a tx of 486 weight units. With the
|
||||
// test fee rate, the sweep tx will pay 4860 sat in fees.
|
||||
//
|
||||
// Create an input so that the output after paying fees is still
|
||||
// positive (400 sat), but less than the dust limit (537 sat) for the
|
||||
// sweep tx output script (P2WPKH).
|
||||
dustInput := createTestInput(5260, lnwallet.CommitmentTimeLock)
|
||||
|
||||
_, err := ctx.sweeper.SweepInput(&dustInput)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// No sweep transaction is expected now. The sweeper should recognize
|
||||
// that the sweep output will not be relayed and not generate the tx.
|
||||
|
||||
// Sweep another input that brings the tx output above the dust limit.
|
||||
largeInput := createTestInput(100000, lnwallet.CommitmentTimeLock)
|
||||
|
||||
_, err = ctx.sweeper.SweepInput(&largeInput)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx.tick()
|
||||
|
||||
// The second input brings the sweep output above the dust limit. We
|
||||
// expect a sweep tx now.
|
||||
|
||||
sweepTx := ctx.receiveTx()
|
||||
if len(sweepTx.TxIn) != 2 {
|
||||
t.Fatalf("Expected tx to sweep 2 inputs, but contains %v "+
|
||||
"inputs instead", len(sweepTx.TxIn))
|
||||
}
|
||||
|
||||
ctx.backend.mine()
|
||||
|
||||
ctx.finish(1)
|
||||
}
|
||||
|
||||
// TestNegativeInput asserts that no inputs with a negative yield are swept.
|
||||
// Negative yield means that the value minus the added fee is negative.
|
||||
func TestNegativeInput(t *testing.T) {
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
// Sweep an input large enough to cover fees, so in any case the tx
|
||||
// output will be above the dust limit.
|
||||
largeInput := createTestInput(100000, lnwallet.CommitmentNoDelay)
|
||||
largeInputResult, err := ctx.sweeper.SweepInput(&largeInput)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Sweep an additional input with a negative net yield. The weight of
|
||||
// the HtlcAcceptedRemoteSuccess input type adds more in fees than its
|
||||
// value at the current fee level.
|
||||
negInput := createTestInput(2900, lnwallet.HtlcOfferedRemoteTimeout)
|
||||
negInputResult, err := ctx.sweeper.SweepInput(&negInput)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Sweep a third input that has a smaller output than the previous one,
|
||||
// but yields positively because of its lower weight.
|
||||
positiveInput := createTestInput(2800, lnwallet.CommitmentNoDelay)
|
||||
positiveInputResult, err := ctx.sweeper.SweepInput(&positiveInput)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx.tick()
|
||||
|
||||
// We expect that a sweep tx is published now, but it should only
|
||||
// contain the large input. The negative input should stay out of sweeps
|
||||
// until fees come down to get a positive net yield.
|
||||
sweepTx1 := ctx.receiveTx()
|
||||
|
||||
if !testTxIns(&sweepTx1, []*wire.OutPoint{
|
||||
largeInput.OutPoint(), positiveInput.OutPoint(),
|
||||
}) {
|
||||
t.Fatal("Tx does not contain expected inputs")
|
||||
}
|
||||
|
||||
ctx.backend.mine()
|
||||
|
||||
ctx.expectResult(largeInputResult, nil)
|
||||
ctx.expectResult(positiveInputResult, nil)
|
||||
|
||||
// Lower fee rate so that the negative input is no longer negative.
|
||||
ctx.estimator.updateFees(1000, 1000)
|
||||
|
||||
// Create another large input
|
||||
secondLargeInput := createTestInput(100000, lnwallet.CommitmentNoDelay)
|
||||
secondLargeInputResult, err := ctx.sweeper.SweepInput(&secondLargeInput)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx.tick()
|
||||
|
||||
sweepTx2 := ctx.receiveTx()
|
||||
if !testTxIns(&sweepTx2, []*wire.OutPoint{
|
||||
secondLargeInput.OutPoint(), negInput.OutPoint(),
|
||||
}) {
|
||||
t.Fatal("Tx does not contain expected inputs")
|
||||
}
|
||||
|
||||
ctx.backend.mine()
|
||||
|
||||
ctx.expectResult(secondLargeInputResult, nil)
|
||||
ctx.expectResult(negInputResult, nil)
|
||||
|
||||
ctx.finish(1)
|
||||
}
|
||||
|
||||
func testTxIns(tx *wire.MsgTx, inputs []*wire.OutPoint) bool {
|
||||
if len(tx.TxIn) != len(inputs) {
|
||||
return false
|
||||
}
|
||||
|
||||
ins := make(map[wire.OutPoint]struct{})
|
||||
for _, in := range tx.TxIn {
|
||||
ins[in.PreviousOutPoint] = struct{}{}
|
||||
}
|
||||
|
||||
for _, expectedIn := range inputs {
|
||||
if _, ok := ins[*expectedIn]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestChunks asserts that large sets of inputs are split into multiple txes.
|
||||
func TestChunks(t *testing.T) {
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
// Sweep five inputs.
|
||||
for _, input := range spendableInputs[:5] {
|
||||
_, err := ctx.sweeper.SweepInput(input)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
ctx.tick()
|
||||
|
||||
// We expect two txes to be published because of the max input count of
|
||||
// three.
|
||||
sweepTx1 := ctx.receiveTx()
|
||||
if len(sweepTx1.TxIn) != 3 {
|
||||
t.Fatalf("Expected first tx to sweep 3 inputs, but contains %v "+
|
||||
"inputs instead", len(sweepTx1.TxIn))
|
||||
}
|
||||
|
||||
sweepTx2 := ctx.receiveTx()
|
||||
if len(sweepTx2.TxIn) != 2 {
|
||||
t.Fatalf("Expected first tx to sweep 2 inputs, but contains %v "+
|
||||
"inputs instead", len(sweepTx1.TxIn))
|
||||
}
|
||||
|
||||
ctx.backend.mine()
|
||||
|
||||
ctx.finish(1)
|
||||
}
|
||||
|
||||
// TestRemoteSpend asserts that remote spends are properly detected and handled
|
||||
// both before the sweep is published as well as after.
|
||||
func TestRemoteSpend(t *testing.T) {
|
||||
t.Run("pre-sweep", func(t *testing.T) {
|
||||
testRemoteSpend(t, false)
|
||||
})
|
||||
t.Run("post-sweep", func(t *testing.T) {
|
||||
testRemoteSpend(t, true)
|
||||
})
|
||||
}
|
||||
|
||||
func testRemoteSpend(t *testing.T, postSweep bool) {
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
resultChan1, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resultChan2, err := ctx.sweeper.SweepInput(spendableInputs[1])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Spend the input with an unknown tx.
|
||||
remoteTx := &wire.MsgTx{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: *(spendableInputs[0].OutPoint()),
|
||||
},
|
||||
},
|
||||
}
|
||||
err = ctx.backend.publishTransaction(remoteTx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if postSweep {
|
||||
ctx.tick()
|
||||
|
||||
// Tx publication by sweeper returns ErrDoubleSpend. Sweeper
|
||||
// will retry the inputs without reporting a result. It could be
|
||||
// spent by the remote party.
|
||||
ctx.receiveTx()
|
||||
}
|
||||
|
||||
ctx.backend.mine()
|
||||
|
||||
select {
|
||||
case result := <-resultChan1:
|
||||
if result.Err != ErrRemoteSpend {
|
||||
t.Fatalf("expected remote spend")
|
||||
}
|
||||
if result.Tx.TxHash() != remoteTx.TxHash() {
|
||||
t.Fatalf("expected remote spend tx")
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("no result received")
|
||||
}
|
||||
|
||||
if !postSweep {
|
||||
// Assert that the sweeper sweeps the remaining input.
|
||||
ctx.tick()
|
||||
sweepTx := ctx.receiveTx()
|
||||
|
||||
if len(sweepTx.TxIn) != 1 {
|
||||
t.Fatal("expected sweep to only sweep the one remaining output")
|
||||
}
|
||||
|
||||
ctx.backend.mine()
|
||||
|
||||
ctx.expectResult(resultChan2, nil)
|
||||
|
||||
ctx.finish(1)
|
||||
} else {
|
||||
// Expected sweeper to be still listening for spend of the
|
||||
// error input.
|
||||
ctx.finish(2)
|
||||
|
||||
select {
|
||||
case <-resultChan2:
|
||||
t.Fatalf("no result expected for error input")
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestIdempotency asserts that offering the same input multiple times is
|
||||
// handled correctly.
|
||||
func TestIdempotency(t *testing.T) {
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
resultChan1, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resultChan2, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx.tick()
|
||||
|
||||
ctx.receiveTx()
|
||||
|
||||
resultChan3, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Spend the input of the sweep tx.
|
||||
ctx.backend.mine()
|
||||
|
||||
ctx.expectResult(resultChan1, nil)
|
||||
ctx.expectResult(resultChan2, nil)
|
||||
ctx.expectResult(resultChan3, nil)
|
||||
|
||||
// Offer the same input again. The sweeper will register a spend ntfn
|
||||
// for this input. Because the input has already been spent, it will
|
||||
// immediately receive the spend notification with a spending tx hash.
|
||||
// Because the sweeper kept track of all of its sweep txes, it will
|
||||
// recognize the spend as its own.
|
||||
resultChan4, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ctx.expectResult(resultChan4, nil)
|
||||
|
||||
// Timer is still running, but spend notification was delivered before
|
||||
// it expired.
|
||||
ctx.tick()
|
||||
|
||||
ctx.finish(1)
|
||||
}
|
||||
|
||||
// TestNoInputs asserts that nothing happens if nothing happens.
|
||||
func TestNoInputs(t *testing.T) {
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
// No tx should appear. This is asserted in finish().
|
||||
ctx.finish(1)
|
||||
}
|
||||
|
||||
// TestRestart asserts that the sweeper picks up sweeping properly after
|
||||
// a restart.
|
||||
func TestRestart(t *testing.T) {
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
// Sweep input and expect sweep tx.
|
||||
_, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ctx.tick()
|
||||
|
||||
ctx.receiveTx()
|
||||
|
||||
// Restart sweeper.
|
||||
ctx.sweeper.Stop()
|
||||
|
||||
ctx.sweeper = New(ctx.sweeper.cfg)
|
||||
ctx.sweeper.Start()
|
||||
|
||||
// Expect last tx to be republished.
|
||||
ctx.receiveTx()
|
||||
|
||||
// Simulate other subsystem (eg contract resolver) re-offering inputs.
|
||||
spendChan1, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
spendChan2, err := ctx.sweeper.SweepInput(spendableInputs[1])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Spend inputs of sweep txes and verify that spend channels signal
|
||||
// spends.
|
||||
ctx.backend.mine()
|
||||
|
||||
// Sweeper should recognize that its sweep tx of the previous run is
|
||||
// spending the input.
|
||||
select {
|
||||
case result := <-spendChan1:
|
||||
if result.Err != nil {
|
||||
t.Fatalf("expected successful sweep")
|
||||
}
|
||||
case <-time.After(defaultTestTimeout):
|
||||
t.Fatalf("no result received")
|
||||
}
|
||||
|
||||
// Timer tick should trigger republishing a sweep for the remaining
|
||||
// input.
|
||||
ctx.tick()
|
||||
|
||||
ctx.receiveTx()
|
||||
|
||||
ctx.backend.mine()
|
||||
|
||||
select {
|
||||
case result := <-spendChan2:
|
||||
if result.Err != nil {
|
||||
t.Fatalf("expected successful sweep")
|
||||
}
|
||||
case <-time.After(defaultTestTimeout):
|
||||
t.Fatalf("no result received")
|
||||
}
|
||||
|
||||
// Restart sweeper again. No action is expected.
|
||||
ctx.sweeper.Stop()
|
||||
ctx.sweeper = New(ctx.sweeper.cfg)
|
||||
ctx.sweeper.Start()
|
||||
|
||||
// Expect last tx to be republished.
|
||||
ctx.receiveTx()
|
||||
|
||||
ctx.finish(1)
|
||||
}
|
||||
|
||||
// TestRestartRemoteSpend asserts that the sweeper picks up sweeping properly after
|
||||
// a restart with remote spend.
|
||||
func TestRestartRemoteSpend(t *testing.T) {
|
||||
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
// Sweep input.
|
||||
_, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Sweep another input.
|
||||
_, err = ctx.sweeper.SweepInput(spendableInputs[1])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx.tick()
|
||||
|
||||
sweepTx := ctx.receiveTx()
|
||||
|
||||
// Restart sweeper.
|
||||
ctx.sweeper.Stop()
|
||||
|
||||
ctx.sweeper = New(ctx.sweeper.cfg)
|
||||
ctx.sweeper.Start()
|
||||
|
||||
// Expect last tx to be republished.
|
||||
ctx.receiveTx()
|
||||
|
||||
// Replace the sweep tx with a remote tx spending input 1.
|
||||
ctx.backend.deleteUnconfirmed(sweepTx.TxHash())
|
||||
|
||||
remoteTx := &wire.MsgTx{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: *(spendableInputs[1].OutPoint()),
|
||||
},
|
||||
},
|
||||
}
|
||||
err = ctx.backend.publishTransaction(remoteTx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Mine remote spending tx.
|
||||
ctx.backend.mine()
|
||||
|
||||
// Simulate other subsystem (eg contract resolver) re-offering input 0.
|
||||
spendChan, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Expect sweeper to construct a new tx, because input 1 was spend
|
||||
// remotely.
|
||||
ctx.tick()
|
||||
|
||||
ctx.receiveTx()
|
||||
|
||||
ctx.backend.mine()
|
||||
|
||||
ctx.expectResult(spendChan, nil)
|
||||
|
||||
ctx.finish(1)
|
||||
}
|
||||
|
||||
// TestRestartConfirmed asserts that the sweeper picks up sweeping properly after
|
||||
// a restart with a confirm of our own sweep tx.
|
||||
func TestRestartConfirmed(t *testing.T) {
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
// Sweep input.
|
||||
_, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx.tick()
|
||||
|
||||
ctx.receiveTx()
|
||||
|
||||
// Restart sweeper.
|
||||
ctx.sweeper.Stop()
|
||||
|
||||
ctx.sweeper = New(ctx.sweeper.cfg)
|
||||
ctx.sweeper.Start()
|
||||
|
||||
// Expect last tx to be republished.
|
||||
ctx.receiveTx()
|
||||
|
||||
// Mine the sweep tx.
|
||||
ctx.backend.mine()
|
||||
|
||||
// Simulate other subsystem (eg contract resolver) re-offering input 0.
|
||||
spendChan, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Here we expect again a successful sweep.
|
||||
ctx.expectResult(spendChan, nil)
|
||||
|
||||
// Timer started but not needed because spend ntfn was sent.
|
||||
ctx.tick()
|
||||
|
||||
ctx.finish(1)
|
||||
}
|
||||
|
||||
// TestRestartRepublish asserts that sweeper republishes the last published
|
||||
// tx on restart.
|
||||
func TestRestartRepublish(t *testing.T) {
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
_, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx.tick()
|
||||
|
||||
sweepTx := ctx.receiveTx()
|
||||
|
||||
// Restart sweeper again. No action is expected.
|
||||
ctx.sweeper.Stop()
|
||||
ctx.sweeper = New(ctx.sweeper.cfg)
|
||||
ctx.sweeper.Start()
|
||||
|
||||
republishedTx := ctx.receiveTx()
|
||||
|
||||
if sweepTx.TxHash() != republishedTx.TxHash() {
|
||||
t.Fatalf("last tx not republished")
|
||||
}
|
||||
|
||||
// Mine the tx to conclude the test properly.
|
||||
ctx.backend.mine()
|
||||
|
||||
ctx.finish(1)
|
||||
}
|
||||
|
||||
// TestRetry tests the sweeper retry flow.
|
||||
func TestRetry(t *testing.T) {
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
resultChan0, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx.tick()
|
||||
|
||||
// We expect a sweep to be published.
|
||||
ctx.receiveTx()
|
||||
|
||||
// New block arrives. This should trigger a new sweep attempt timer
|
||||
// start.
|
||||
ctx.notifier.NotifyEpoch(1000)
|
||||
|
||||
// Offer a fresh input.
|
||||
resultChan1, err := ctx.sweeper.SweepInput(spendableInputs[1])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx.tick()
|
||||
|
||||
// Two txes are expected to be published, because new and retry inputs
|
||||
// are separated.
|
||||
ctx.receiveTx()
|
||||
ctx.receiveTx()
|
||||
|
||||
ctx.backend.mine()
|
||||
|
||||
ctx.expectResult(resultChan0, nil)
|
||||
ctx.expectResult(resultChan1, nil)
|
||||
|
||||
ctx.finish(1)
|
||||
}
|
||||
|
||||
// TestGiveUp asserts that the sweeper gives up on an input if it can't be swept
|
||||
// after a configured number of attempts.a
|
||||
func TestGiveUp(t *testing.T) {
|
||||
ctx := createSweeperTestContext(t)
|
||||
|
||||
resultChan0, err := ctx.sweeper.SweepInput(spendableInputs[0])
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx.tick()
|
||||
|
||||
// We expect a sweep to be published at height 100 (mockChainIOHeight).
|
||||
ctx.receiveTx()
|
||||
|
||||
// Because of MaxSweepAttemps, two more sweeps will be attempted. We
|
||||
// configured exponential back-off without randomness for the test. The
|
||||
// second attempt, we expect to happen at 101. The third attempt at 103.
|
||||
// At that point, the input is expected to be failed.
|
||||
|
||||
// Second attempt
|
||||
ctx.notifier.NotifyEpoch(101)
|
||||
ctx.tick()
|
||||
ctx.receiveTx()
|
||||
|
||||
// Third attempt
|
||||
ctx.notifier.NotifyEpoch(103)
|
||||
ctx.tick()
|
||||
ctx.receiveTx()
|
||||
|
||||
ctx.expectResult(resultChan0, ErrTooManyAttempts)
|
||||
|
||||
ctx.backend.mine()
|
||||
|
||||
ctx.finish(1)
|
||||
}
|
258
sweep/test_utils.go
Normal file
258
sweep/test_utils.go
Normal file
@ -0,0 +1,258 @@
|
||||
package sweep
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultTestTimeout = 5 * time.Second
|
||||
mockChainIOHeight = int32(100)
|
||||
)
|
||||
|
||||
type mockSigner struct {
|
||||
}
|
||||
|
||||
func (m *mockSigner) SignOutputRaw(tx *wire.MsgTx,
|
||||
signDesc *lnwallet.SignDescriptor) ([]byte, error) {
|
||||
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
func (m *mockSigner) ComputeInputScript(tx *wire.MsgTx,
|
||||
signDesc *lnwallet.SignDescriptor) (*lnwallet.InputScript, error) {
|
||||
|
||||
return &lnwallet.InputScript{}, nil
|
||||
}
|
||||
|
||||
// MockNotifier simulates the chain notifier for test purposes. This type is
|
||||
// exported because it is used in nursery tests.
|
||||
type MockNotifier struct {
|
||||
confChannel map[chainhash.Hash]chan *chainntnfs.TxConfirmation
|
||||
epochChan map[chan *chainntnfs.BlockEpoch]int32
|
||||
spendChan map[wire.OutPoint][]chan *chainntnfs.SpendDetail
|
||||
spends map[wire.OutPoint]*wire.MsgTx
|
||||
mutex sync.RWMutex
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
// NewMockNotifier instantiates a new mock notifier.
|
||||
func NewMockNotifier(t *testing.T) *MockNotifier {
|
||||
return &MockNotifier{
|
||||
confChannel: make(map[chainhash.Hash]chan *chainntnfs.TxConfirmation),
|
||||
epochChan: make(map[chan *chainntnfs.BlockEpoch]int32),
|
||||
spendChan: make(map[wire.OutPoint][]chan *chainntnfs.SpendDetail),
|
||||
spends: make(map[wire.OutPoint]*wire.MsgTx),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
// NotifyEpoch simulates a new epoch arriving.
|
||||
func (m *MockNotifier) NotifyEpoch(height int32) {
|
||||
for epochChan, chanHeight := range m.epochChan {
|
||||
// Only send notifications if the height is greater than the
|
||||
// height the caller passed into the register call.
|
||||
if chanHeight >= height {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Debugf("Notifying height %v to listener", height)
|
||||
|
||||
select {
|
||||
case epochChan <- &chainntnfs.BlockEpoch{
|
||||
Height: height,
|
||||
}:
|
||||
case <-time.After(defaultTestTimeout):
|
||||
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
|
||||
m.t.Fatal("epoch event not consumed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ConfirmTx simulates a tx confirming.
|
||||
func (m *MockNotifier) ConfirmTx(txid *chainhash.Hash, height uint32) error {
|
||||
confirm := &chainntnfs.TxConfirmation{
|
||||
BlockHeight: height,
|
||||
}
|
||||
select {
|
||||
case m.getConfChannel(txid) <- confirm:
|
||||
case <-time.After(defaultTestTimeout):
|
||||
return fmt.Errorf("confirmation not consumed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SpendOutpoint simulates a utxo being spent.
|
||||
func (m *MockNotifier) SpendOutpoint(outpoint wire.OutPoint,
|
||||
spendingTx wire.MsgTx) {
|
||||
|
||||
log.Debugf("Spending outpoint %v", outpoint)
|
||||
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
channels, ok := m.spendChan[outpoint]
|
||||
if ok {
|
||||
for _, channel := range channels {
|
||||
m.sendSpend(channel, &outpoint, &spendingTx)
|
||||
}
|
||||
}
|
||||
|
||||
m.spends[outpoint] = &spendingTx
|
||||
}
|
||||
|
||||
func (m *MockNotifier) sendSpend(channel chan *chainntnfs.SpendDetail,
|
||||
outpoint *wire.OutPoint,
|
||||
spendingTx *wire.MsgTx) {
|
||||
|
||||
spenderTxHash := spendingTx.TxHash()
|
||||
channel <- &chainntnfs.SpendDetail{
|
||||
SpenderTxHash: &spenderTxHash,
|
||||
SpendingTx: spendingTx,
|
||||
SpentOutPoint: outpoint,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterConfirmationsNtfn registers for tx confirm notifications.
|
||||
func (m *MockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
|
||||
_ []byte, numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent,
|
||||
error) {
|
||||
|
||||
return &chainntnfs.ConfirmationEvent{
|
||||
Confirmed: m.getConfChannel(txid),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *MockNotifier) getConfChannel(
|
||||
txid *chainhash.Hash) chan *chainntnfs.TxConfirmation {
|
||||
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
channel, ok := m.confChannel[*txid]
|
||||
if ok {
|
||||
return channel
|
||||
}
|
||||
channel = make(chan *chainntnfs.TxConfirmation)
|
||||
m.confChannel[*txid] = channel
|
||||
|
||||
return channel
|
||||
}
|
||||
|
||||
// RegisterBlockEpochNtfn registers a block notification.
|
||||
func (m *MockNotifier) RegisterBlockEpochNtfn(
|
||||
bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
|
||||
|
||||
log.Tracef("Mock block ntfn registered")
|
||||
|
||||
m.mutex.Lock()
|
||||
epochChan := make(chan *chainntnfs.BlockEpoch, 0)
|
||||
bestHeight := int32(0)
|
||||
if bestBlock != nil {
|
||||
bestHeight = bestBlock.Height
|
||||
}
|
||||
m.epochChan[epochChan] = bestHeight
|
||||
m.mutex.Unlock()
|
||||
|
||||
return &chainntnfs.BlockEpochEvent{
|
||||
Epochs: epochChan,
|
||||
Cancel: func() {
|
||||
log.Tracef("Mock block ntfn cancelled")
|
||||
m.mutex.Lock()
|
||||
delete(m.epochChan, epochChan)
|
||||
m.mutex.Unlock()
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start the notifier.
|
||||
func (m *MockNotifier) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop the notifier.
|
||||
func (m *MockNotifier) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterSpendNtfn registers for spend notifications.
|
||||
func (m *MockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
|
||||
_ []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
|
||||
|
||||
// Add channel to global spend ntfn map.
|
||||
m.mutex.Lock()
|
||||
|
||||
channels, ok := m.spendChan[*outpoint]
|
||||
if !ok {
|
||||
channels = make([]chan *chainntnfs.SpendDetail, 0)
|
||||
}
|
||||
|
||||
channel := make(chan *chainntnfs.SpendDetail, 1)
|
||||
channels = append(channels, channel)
|
||||
m.spendChan[*outpoint] = channels
|
||||
|
||||
// Check if this output has already been spent.
|
||||
spendingTx, spent := m.spends[*outpoint]
|
||||
|
||||
m.mutex.Unlock()
|
||||
|
||||
// If output has been spent already, signal now. Do this outside the
|
||||
// lock to prevent a dead lock.
|
||||
if spent {
|
||||
m.sendSpend(channel, outpoint, spendingTx)
|
||||
}
|
||||
|
||||
return &chainntnfs.SpendEvent{
|
||||
Spend: channel,
|
||||
Cancel: func() {
|
||||
log.Infof("Cancelling RegisterSpendNtfn for %v",
|
||||
outpoint)
|
||||
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
channels := m.spendChan[*outpoint]
|
||||
for i, c := range channels {
|
||||
if c == channel {
|
||||
channels[i] = channels[len(channels)-1]
|
||||
m.spendChan[*outpoint] =
|
||||
channels[:len(channels)-1]
|
||||
}
|
||||
}
|
||||
|
||||
close(channel)
|
||||
|
||||
log.Infof("Spend ntfn channel closed for %v",
|
||||
outpoint)
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type mockChainIO struct{}
|
||||
|
||||
func (m *mockChainIO) GetBestBlock() (*chainhash.Hash, int32, error) {
|
||||
return nil, mockChainIOHeight, nil
|
||||
}
|
||||
|
||||
func (m *mockChainIO) GetUtxo(op *wire.OutPoint, pkScript []byte,
|
||||
heightHint uint32) (*wire.TxOut, error) {
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockChainIO) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockChainIO) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error) {
|
||||
return nil, nil
|
||||
}
|
335
sweep/txgenerator.go
Normal file
335
sweep/txgenerator.go
Normal file
@ -0,0 +1,335 @@
|
||||
package sweep
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/btcsuite/btcd/blockchain"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/btcsuite/btcwallet/wallet/txrules"
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultMaxInputsPerTx specifies the default maximum number of inputs
|
||||
// allowed in a single sweep tx. If more need to be swept, multiple txes
|
||||
// are created and published.
|
||||
DefaultMaxInputsPerTx = 100
|
||||
)
|
||||
|
||||
// inputSet is a set of inputs that can be used as the basis to generate a tx
|
||||
// on.
|
||||
type inputSet []Input
|
||||
|
||||
// generateInputPartitionings goes through all given inputs and constructs sets
|
||||
// of inputs that can be used to generate a sensible transaction. Each set
|
||||
// contains up to the configured maximum number of inputs. Negative yield
|
||||
// inputs are skipped. No input sets with a total value after fees below the
|
||||
// dust limit are returned.
|
||||
func generateInputPartitionings(sweepableInputs []Input,
|
||||
relayFeePerKW, feePerKW lnwallet.SatPerKWeight,
|
||||
maxInputsPerTx int) ([]inputSet, error) {
|
||||
|
||||
// Calculate dust limit based on the P2WPKH output script of the sweep
|
||||
// txes.
|
||||
dustLimit := txrules.GetDustThreshold(
|
||||
lnwallet.P2WPKHSize,
|
||||
btcutil.Amount(relayFeePerKW.FeePerKVByte()),
|
||||
)
|
||||
|
||||
// Sort input by yield. We will start constructing input sets starting
|
||||
// with the highest yield inputs. This is to prevent the construction
|
||||
// of a set with an output below the dust limit, causing the sweep
|
||||
// process to stop, while there are still higher value inputs
|
||||
// available. It also allows us to stop evaluating more inputs when the
|
||||
// first input in this ordering is encountered with a negative yield.
|
||||
//
|
||||
// Yield is calculated as the difference between value and added fee
|
||||
// for this input. The fee calculation excludes fee components that are
|
||||
// common to all inputs, as those wouldn't influence the order. The
|
||||
// single component that is differentiating is witness size.
|
||||
//
|
||||
// For witness size, the upper limit is taken. The actual size depends
|
||||
// on the signature length, which is not known yet at this point.
|
||||
yields := make(map[wire.OutPoint]int64)
|
||||
for _, input := range sweepableInputs {
|
||||
size, err := getInputWitnessSizeUpperBound(input)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed adding input weight: %v", err)
|
||||
}
|
||||
|
||||
yields[*input.OutPoint()] = input.SignDesc().Output.Value -
|
||||
int64(feePerKW.FeeForWeight(int64(size)))
|
||||
}
|
||||
|
||||
sort.Slice(sweepableInputs, func(i, j int) bool {
|
||||
return yields[*sweepableInputs[i].OutPoint()] >
|
||||
yields[*sweepableInputs[j].OutPoint()]
|
||||
})
|
||||
|
||||
// Select blocks of inputs up to the configured maximum number.
|
||||
var sets []inputSet
|
||||
for len(sweepableInputs) > 0 {
|
||||
// Get the maximum number of inputs from sweepableInputs that
|
||||
// we can use to create a positive yielding set from.
|
||||
count, outputValue := getPositiveYieldInputs(
|
||||
sweepableInputs, maxInputsPerTx, feePerKW,
|
||||
)
|
||||
|
||||
// If there are no positive yield inputs left, we can stop
|
||||
// here.
|
||||
if count == 0 {
|
||||
return sets, nil
|
||||
}
|
||||
|
||||
// If the output value of this block of inputs does not reach
|
||||
// the dust limit, stop sweeping. Because of the sorting,
|
||||
// continuing with the remaining inputs will only lead to sets
|
||||
// with a even lower output value.
|
||||
if outputValue < dustLimit {
|
||||
log.Debugf("Set value %v below dust limit of %v",
|
||||
outputValue, dustLimit)
|
||||
return sets, nil
|
||||
}
|
||||
|
||||
log.Infof("Candidate sweep set of size=%v, has yield=%v",
|
||||
count, outputValue)
|
||||
|
||||
sets = append(sets, sweepableInputs[:count])
|
||||
sweepableInputs = sweepableInputs[count:]
|
||||
}
|
||||
|
||||
return sets, nil
|
||||
}
|
||||
|
||||
// getPositiveYieldInputs returns the maximum of a number n for which holds
|
||||
// that the inputs [0,n) of sweepableInputs have a positive yield.
|
||||
// Additionally, the total values of these inputs minus the fee is returned.
|
||||
//
|
||||
// TODO(roasbeef): Consider including some negative yield inputs too to clean
|
||||
// up the utxo set even if it costs us some fees up front. In the spirit of
|
||||
// minimizing any negative externalities we cause for the Bitcoin system as a
|
||||
// whole.
|
||||
func getPositiveYieldInputs(sweepableInputs []Input, maxInputs int,
|
||||
feePerKW lnwallet.SatPerKWeight) (int, btcutil.Amount) {
|
||||
|
||||
var weightEstimate lnwallet.TxWeightEstimator
|
||||
|
||||
// Add the sweep tx output to the weight estimate.
|
||||
weightEstimate.AddP2WKHOutput()
|
||||
|
||||
var total, outputValue btcutil.Amount
|
||||
for idx, input := range sweepableInputs {
|
||||
// Can ignore error, because it has already been checked when
|
||||
// calculating the yields.
|
||||
size, _ := getInputWitnessSizeUpperBound(input)
|
||||
|
||||
// Keep a running weight estimate of the input set.
|
||||
weightEstimate.AddWitnessInput(size)
|
||||
|
||||
newTotal := total + btcutil.Amount(input.SignDesc().Output.Value)
|
||||
|
||||
weight := weightEstimate.Weight()
|
||||
fee := feePerKW.FeeForWeight(int64(weight))
|
||||
|
||||
// Calculate the output value if the current input would be
|
||||
// added to the set.
|
||||
newOutputValue := newTotal - fee
|
||||
|
||||
// If adding this input makes the total output value of the set
|
||||
// decrease, this is a negative yield input. It shouldn't be
|
||||
// added to the set. We return the current index as the number
|
||||
// of inputs, so the current input is being excluded.
|
||||
if newOutputValue <= outputValue {
|
||||
return idx, outputValue
|
||||
}
|
||||
|
||||
// Update running values.
|
||||
total = newTotal
|
||||
outputValue = newOutputValue
|
||||
|
||||
// Stop if max inputs is reached.
|
||||
if idx == maxInputs-1 {
|
||||
return maxInputs, outputValue
|
||||
}
|
||||
}
|
||||
|
||||
// We could add all inputs to the set, so return them all.
|
||||
return len(sweepableInputs), outputValue
|
||||
}
|
||||
|
||||
// createSweepTx builds a signed tx spending the inputs to a the output script.
|
||||
func createSweepTx(inputs []Input, outputPkScript []byte,
|
||||
currentBlockHeight uint32, feePerKw lnwallet.SatPerKWeight,
|
||||
signer lnwallet.Signer) (*wire.MsgTx, error) {
|
||||
|
||||
inputs, txWeight, csvCount, cltvCount := getWeightEstimate(inputs)
|
||||
|
||||
log.Infof("Creating sweep transaction for %v inputs (%v CSV, %v CLTV) "+
|
||||
"using %v sat/kw", len(inputs), csvCount, cltvCount,
|
||||
int64(feePerKw))
|
||||
|
||||
txFee := feePerKw.FeeForWeight(txWeight)
|
||||
|
||||
// Sum up the total value contained in the inputs.
|
||||
var totalSum btcutil.Amount
|
||||
for _, o := range inputs {
|
||||
totalSum += btcutil.Amount(o.SignDesc().Output.Value)
|
||||
}
|
||||
|
||||
// Sweep as much possible, after subtracting txn fees.
|
||||
sweepAmt := int64(totalSum - txFee)
|
||||
|
||||
// Create the sweep transaction that we will be building. We use
|
||||
// version 2 as it is required for CSV. The txn will sweep the amount
|
||||
// after fees to the pkscript generated above.
|
||||
sweepTx := wire.NewMsgTx(2)
|
||||
sweepTx.AddTxOut(&wire.TxOut{
|
||||
PkScript: outputPkScript,
|
||||
Value: sweepAmt,
|
||||
})
|
||||
|
||||
sweepTx.LockTime = currentBlockHeight
|
||||
|
||||
// Add all inputs to the sweep transaction. Ensure that for each
|
||||
// csvInput, we set the sequence number properly.
|
||||
for _, input := range inputs {
|
||||
sweepTx.AddTxIn(&wire.TxIn{
|
||||
PreviousOutPoint: *input.OutPoint(),
|
||||
Sequence: input.BlocksToMaturity(),
|
||||
})
|
||||
}
|
||||
|
||||
// Before signing the transaction, check to ensure that it meets some
|
||||
// basic validity requirements.
|
||||
//
|
||||
// TODO(conner): add more control to sanity checks, allowing us to
|
||||
// delay spending "problem" outputs, e.g. possibly batching with other
|
||||
// classes if fees are too low.
|
||||
btx := btcutil.NewTx(sweepTx)
|
||||
if err := blockchain.CheckTransactionSanity(btx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
hashCache := txscript.NewTxSigHashes(sweepTx)
|
||||
|
||||
// With all the inputs in place, use each output's unique witness
|
||||
// function to generate the final witness required for spending.
|
||||
addWitness := func(idx int, tso Input) error {
|
||||
witness, err := tso.BuildWitness(
|
||||
signer, sweepTx, hashCache, idx,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sweepTx.TxIn[idx].Witness = witness
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Finally we'll attach a valid witness to each csv and cltv input
|
||||
// within the sweeping transaction.
|
||||
for i, input := range inputs {
|
||||
if err := addWitness(i, input); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return sweepTx, nil
|
||||
}
|
||||
|
||||
// getInputWitnessSizeUpperBound returns the maximum length of the witness for
|
||||
// the given input if it would be included in a tx.
|
||||
func getInputWitnessSizeUpperBound(input Input) (int, error) {
|
||||
switch input.WitnessType() {
|
||||
|
||||
// Outputs on a remote commitment transaction that pay directly
|
||||
// to us.
|
||||
case lnwallet.CommitmentNoDelay:
|
||||
return lnwallet.P2WKHWitnessSize, nil
|
||||
|
||||
// Outputs on a past commitment transaction that pay directly
|
||||
// to us.
|
||||
case lnwallet.CommitmentTimeLock:
|
||||
return lnwallet.ToLocalTimeoutWitnessSize, nil
|
||||
|
||||
// Outgoing second layer HTLC's that have confirmed within the
|
||||
// chain, and the output they produced is now mature enough to
|
||||
// sweep.
|
||||
case lnwallet.HtlcOfferedTimeoutSecondLevel:
|
||||
return lnwallet.ToLocalTimeoutWitnessSize, nil
|
||||
|
||||
// Incoming second layer HTLC's that have confirmed within the
|
||||
// chain, and the output they produced is now mature enough to
|
||||
// sweep.
|
||||
case lnwallet.HtlcAcceptedSuccessSecondLevel:
|
||||
return lnwallet.ToLocalTimeoutWitnessSize, nil
|
||||
|
||||
// An HTLC on the commitment transaction of the remote party,
|
||||
// that has had its absolute timelock expire.
|
||||
case lnwallet.HtlcOfferedRemoteTimeout:
|
||||
return lnwallet.AcceptedHtlcTimeoutWitnessSize, nil
|
||||
|
||||
// An HTLC on the commitment transaction of the remote party,
|
||||
// that can be swept with the preimage.
|
||||
case lnwallet.HtlcAcceptedRemoteSuccess:
|
||||
return lnwallet.OfferedHtlcSuccessWitnessSize, nil
|
||||
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("unexpected witness type: %v", input.WitnessType())
|
||||
}
|
||||
|
||||
// getWeightEstimate returns a weight estimate for the given inputs.
|
||||
// Additionally, it returns counts for the number of csv and cltv inputs.
|
||||
func getWeightEstimate(inputs []Input) ([]Input, int64, int, int) {
|
||||
// We initialize a weight estimator so we can accurately asses the
|
||||
// amount of fees we need to pay for this sweep transaction.
|
||||
//
|
||||
// TODO(roasbeef): can be more intelligent about buffering outputs to
|
||||
// be more efficient on-chain.
|
||||
var weightEstimate lnwallet.TxWeightEstimator
|
||||
|
||||
// Our sweep transaction will pay to a single segwit p2wkh address,
|
||||
// ensure it contributes to our weight estimate.
|
||||
weightEstimate.AddP2WKHOutput()
|
||||
|
||||
// For each output, use its witness type to determine the estimate
|
||||
// weight of its witness, and add it to the proper set of spendable
|
||||
// outputs.
|
||||
var (
|
||||
sweepInputs []Input
|
||||
csvCount, cltvCount int
|
||||
)
|
||||
for i := range inputs {
|
||||
input := inputs[i]
|
||||
|
||||
size, err := getInputWitnessSizeUpperBound(input)
|
||||
if err != nil {
|
||||
log.Warn(err)
|
||||
|
||||
// Skip inputs for which no weight estimate can be
|
||||
// given.
|
||||
continue
|
||||
}
|
||||
weightEstimate.AddWitnessInput(size)
|
||||
|
||||
switch input.WitnessType() {
|
||||
case lnwallet.CommitmentTimeLock,
|
||||
lnwallet.HtlcOfferedTimeoutSecondLevel,
|
||||
lnwallet.HtlcAcceptedSuccessSecondLevel:
|
||||
csvCount++
|
||||
case lnwallet.HtlcOfferedRemoteTimeout:
|
||||
cltvCount++
|
||||
}
|
||||
sweepInputs = append(sweepInputs, input)
|
||||
}
|
||||
|
||||
txWeight := int64(weightEstimate.Weight())
|
||||
|
||||
return sweepInputs, txWeight, csvCount, cltvCount
|
||||
}
|
@ -202,7 +202,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
estimator := &lnwallet.StaticFeeEstimator{FeePerKW: 12500}
|
||||
estimator := lnwallet.NewStaticFeeEstimator(12500, 0)
|
||||
feePerKw, err := estimator.EstimateFeePerKW(1)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
@ -316,7 +316,9 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
|
||||
}
|
||||
bobPool.Start()
|
||||
|
||||
chainIO := &mockChainIO{}
|
||||
chainIO := &mockChainIO{
|
||||
bestHeight: fundingBroadcastHeight,
|
||||
}
|
||||
wallet := &lnwallet.LightningWallet{
|
||||
WalletController: &mockWalletController{
|
||||
rootKey: aliceKeyPriv,
|
||||
|
361
utxonursery.go
361
utxonursery.go
@ -72,17 +72,6 @@ import (
|
||||
// the utxo nursery will sweep all KNDR outputs scheduled for that height
|
||||
// using a single txn.
|
||||
//
|
||||
// NOTE: Due to the fact that KNDR outputs can be dynamically aggregated and
|
||||
// swept, we make precautions to finalize the KNDR outputs at a particular
|
||||
// height on our first attempt to sweep it. Finalizing involves signing the
|
||||
// sweep transaction and persisting it in the nursery store, and recording
|
||||
// the last finalized height. Any attempts to replay an already finalized
|
||||
// height will result in broadcasting the already finalized txn, ensuring the
|
||||
// nursery does not broadcast different txids for the same batch of KNDR
|
||||
// outputs. The reason txids may change is due to the probabilistic nature of
|
||||
// generating the pkscript in the sweep txn's output, even if the set of
|
||||
// inputs remains static across attempts.
|
||||
//
|
||||
// - GRAD (kidOutput) outputs are KNDR outputs that have successfully been
|
||||
// swept into the user's wallet. A channel is considered mature once all of
|
||||
// its outputs, including two-stage htlcs, have entered the GRAD state,
|
||||
@ -183,10 +172,6 @@ type NurseryConfig struct {
|
||||
// determining outputs in the chain as confirmed.
|
||||
ConfDepth uint32
|
||||
|
||||
// SweepTxConfTarget assigns a confirmation target for sweep txes on
|
||||
// which the fee calculation will be based.
|
||||
SweepTxConfTarget uint32
|
||||
|
||||
// FetchClosedChannels provides access to a user's channels, such that
|
||||
// they can be marked fully closed after incubation has concluded.
|
||||
FetchClosedChannels func(pendingOnly bool) (
|
||||
@ -254,25 +239,22 @@ func (u *utxoNursery) Start() error {
|
||||
|
||||
utxnLog.Tracef("Starting UTXO nursery")
|
||||
|
||||
// 1. Start watching for new blocks, as this will drive the nursery
|
||||
// store's state machine.
|
||||
|
||||
// Register with the notifier to receive notifications for each newly
|
||||
// connected block. We register immediately on startup to ensure that
|
||||
// no blocks are missed while we are handling blocks that were missed
|
||||
// during the time the UTXO nursery was unavailable.
|
||||
newBlockChan, err := u.cfg.Notifier.RegisterBlockEpochNtfn(nil)
|
||||
// Retrieve the currently best known block. This is needed to have the
|
||||
// state machine catch up with the blocks we missed when we were down.
|
||||
bestHash, bestHeight, err := u.cfg.ChainIO.GetBestBlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set best known height to schedule late registrations properly.
|
||||
atomic.StoreUint32(&u.bestHeight, uint32(bestHeight))
|
||||
|
||||
// 2. Flush all fully-graduated channels from the pipeline.
|
||||
|
||||
// Load any pending close channels, which represents the super set of
|
||||
// all channels that may still be incubating.
|
||||
pendingCloseChans, err := u.cfg.FetchClosedChannels(true)
|
||||
if err != nil {
|
||||
newBlockChan.Cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
@ -281,7 +263,6 @@ func (u *utxoNursery) Start() error {
|
||||
for _, pendingClose := range pendingCloseChans {
|
||||
err := u.closeAndRemoveIfMature(&pendingClose.ChanPoint)
|
||||
if err != nil {
|
||||
newBlockChan.Cancel()
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -289,15 +270,6 @@ func (u *utxoNursery) Start() error {
|
||||
// TODO(conner): check if any fully closed channels can be removed from
|
||||
// utxn.
|
||||
|
||||
// Query the nursery store for the lowest block height we could be
|
||||
// incubating, which is taken to be the last height for which the
|
||||
// database was purged.
|
||||
lastGraduatedHeight, err := u.cfg.Store.LastGraduatedHeight()
|
||||
if err != nil {
|
||||
newBlockChan.Cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
// 2. Restart spend ntfns for any preschool outputs, which are waiting
|
||||
// for the force closed commitment txn to confirm, or any second-layer
|
||||
// HTLC success transactions.
|
||||
@ -306,15 +278,24 @@ func (u *utxoNursery) Start() error {
|
||||
// point forward, we must close the nursery's quit channel if we detect
|
||||
// any failures during startup to ensure they terminate.
|
||||
if err := u.reloadPreschool(); err != nil {
|
||||
newBlockChan.Cancel()
|
||||
close(u.quit)
|
||||
return err
|
||||
}
|
||||
|
||||
// 3. Replay all crib and kindergarten outputs from last pruned to
|
||||
// current best height.
|
||||
if err := u.reloadClasses(lastGraduatedHeight); err != nil {
|
||||
newBlockChan.Cancel()
|
||||
// 3. Replay all crib and kindergarten outputs up to the current best
|
||||
// height.
|
||||
if err := u.reloadClasses(uint32(bestHeight)); err != nil {
|
||||
close(u.quit)
|
||||
return err
|
||||
}
|
||||
|
||||
// Start watching for new blocks, as this will drive the nursery store's
|
||||
// state machine.
|
||||
newBlockChan, err := u.cfg.Notifier.RegisterBlockEpochNtfn(&chainntnfs.BlockEpoch{
|
||||
Height: bestHeight,
|
||||
Hash: bestHash,
|
||||
})
|
||||
if err != nil {
|
||||
close(u.quit)
|
||||
return err
|
||||
}
|
||||
@ -672,123 +653,44 @@ func (u *utxoNursery) reloadPreschool() error {
|
||||
|
||||
// reloadClasses reinitializes any height-dependent state transitions for which
|
||||
// the utxonursery has not received confirmation, and replays the graduation of
|
||||
// all kindergarten and crib outputs for heights that have not been finalized.
|
||||
// all kindergarten and crib outputs for all heights up to the current block.
|
||||
// This allows the nursery to reinitialize all state to continue sweeping
|
||||
// outputs, even in the event that we missed blocks while offline.
|
||||
// reloadClasses is called during the startup of the UTXO Nursery.
|
||||
func (u *utxoNursery) reloadClasses(lastGradHeight uint32) error {
|
||||
// Begin by loading all of the still-active heights up to and including
|
||||
// the last height we successfully graduated.
|
||||
activeHeights, err := u.cfg.Store.HeightsBelowOrEqual(lastGradHeight)
|
||||
// outputs, even in the event that we missed blocks while offline. reloadClasses
|
||||
// is called during the startup of the UTXO Nursery.
|
||||
func (u *utxoNursery) reloadClasses(bestHeight uint32) error {
|
||||
// Loading all active heights up to and including the current block.
|
||||
activeHeights, err := u.cfg.Store.HeightsBelowOrEqual(
|
||||
uint32(bestHeight))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(activeHeights) > 0 {
|
||||
utxnLog.Infof("Re-registering confirmations for %d already "+
|
||||
"graduated heights below height=%d", len(activeHeights),
|
||||
lastGradHeight)
|
||||
// Return early if nothing to sweep.
|
||||
if len(activeHeights) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
utxnLog.Infof("(Re)-sweeping %d heights below height=%d",
|
||||
len(activeHeights), bestHeight)
|
||||
|
||||
// Attempt to re-register notifications for any outputs still at these
|
||||
// heights.
|
||||
for _, classHeight := range activeHeights {
|
||||
utxnLog.Debugf("Attempting to regraduate outputs at height=%v",
|
||||
utxnLog.Debugf("Attempting to sweep outputs at height=%v",
|
||||
classHeight)
|
||||
|
||||
if err = u.regraduateClass(classHeight); err != nil {
|
||||
utxnLog.Errorf("Failed to regraduate outputs at "+
|
||||
if err = u.graduateClass(classHeight); err != nil {
|
||||
utxnLog.Errorf("Failed to sweep outputs at "+
|
||||
"height=%v: %v", classHeight, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Get the most recently mined block.
|
||||
_, bestHeight, err := u.cfg.ChainIO.GetBestBlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we haven't yet seen any registered force closes, or we're already
|
||||
// caught up with the current best chain, then we can exit early.
|
||||
if lastGradHeight == 0 || uint32(bestHeight) == lastGradHeight {
|
||||
return nil
|
||||
}
|
||||
|
||||
utxnLog.Infof("Processing outputs from missed blocks. Starting with "+
|
||||
"blockHeight=%v, to current blockHeight=%v", lastGradHeight,
|
||||
bestHeight)
|
||||
|
||||
// Loop through and check for graduating outputs at each of the missed
|
||||
// block heights.
|
||||
for curHeight := lastGradHeight + 1; curHeight <= uint32(bestHeight); curHeight++ {
|
||||
utxnLog.Debugf("Attempting to graduate outputs at height=%v",
|
||||
curHeight)
|
||||
|
||||
if err := u.graduateClass(curHeight); err != nil {
|
||||
utxnLog.Errorf("Failed to graduate outputs at "+
|
||||
"height=%v: %v", curHeight, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
utxnLog.Infof("UTXO Nursery is now fully synced")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// regraduateClass handles the steps involved in re-registering for
|
||||
// confirmations for all still-active outputs at a particular height. This is
|
||||
// used during restarts to ensure that any still-pending state transitions are
|
||||
// properly registered, so they can be driven by the chain notifier. No
|
||||
// transactions or signing are done as a result of this step.
|
||||
func (u *utxoNursery) regraduateClass(classHeight uint32) error {
|
||||
// Fetch all information about the crib and kindergarten outputs at
|
||||
// this height. In addition to the outputs, we also retrieve the
|
||||
// finalized kindergarten sweep txn, which will be nil if we have not
|
||||
// attempted this height before, or if no kindergarten outputs exist at
|
||||
// this height.
|
||||
finalTx, kgtnOutputs, cribOutputs, err := u.cfg.Store.FetchClass(
|
||||
classHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if finalTx != nil {
|
||||
utxnLog.Infof("Re-registering confirmation for kindergarten "+
|
||||
"sweep transaction at height=%d ", classHeight)
|
||||
|
||||
err = u.sweepMatureOutputs(classHeight, finalTx, kgtnOutputs)
|
||||
if err != nil {
|
||||
utxnLog.Errorf("Failed to re-register for kindergarten "+
|
||||
"sweep transaction at height=%d: %v",
|
||||
classHeight, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(cribOutputs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
utxnLog.Infof("Re-registering confirmation for first-stage HTLC "+
|
||||
"outputs at height=%d ", classHeight)
|
||||
|
||||
// Now, we broadcast all pre-signed htlc txns from the crib outputs at
|
||||
// this height. There is no need to finalize these txns, since the txid
|
||||
// is predetermined when signed in the wallet.
|
||||
for i := range cribOutputs {
|
||||
err := u.sweepCribOutput(classHeight, &cribOutputs[i])
|
||||
if err != nil {
|
||||
utxnLog.Errorf("Failed to re-register first-stage "+
|
||||
"HTLC output %v", cribOutputs[i].OutPoint())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// incubator is tasked with driving all state transitions that are dependent on
|
||||
// the current height of the blockchain. As new blocks arrive, the incubator
|
||||
// will attempt spend outputs at the latest height. The asynchronous
|
||||
@ -821,6 +723,11 @@ func (u *utxoNursery) incubator(newBlockChan *chainntnfs.BlockEpochEvent) {
|
||||
// as signing and broadcasting a sweep txn that spends
|
||||
// from all kindergarten outputs at this height.
|
||||
height := uint32(epoch.Height)
|
||||
|
||||
// Update best known block height for late registrations
|
||||
// to be scheduled properly.
|
||||
atomic.StoreUint32(&u.bestHeight, height)
|
||||
|
||||
if err := u.graduateClass(height); err != nil {
|
||||
utxnLog.Errorf("error while graduating "+
|
||||
"class at height=%d: %v", height, err)
|
||||
@ -843,14 +750,9 @@ func (u *utxoNursery) graduateClass(classHeight uint32) error {
|
||||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
|
||||
u.bestHeight = classHeight
|
||||
|
||||
// Fetch all information about the crib and kindergarten outputs at
|
||||
// this height. In addition to the outputs, we also retrieve the
|
||||
// finalized kindergarten sweep txn, which will be nil if we have not
|
||||
// attempted this height before, or if no kindergarten outputs exist at
|
||||
// this height.
|
||||
finalTx, kgtnOutputs, cribOutputs, err := u.cfg.Store.FetchClass(
|
||||
kgtnOutputs, cribOutputs, err := u.cfg.Store.FetchClass(
|
||||
classHeight)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -859,64 +761,11 @@ func (u *utxoNursery) graduateClass(classHeight uint32) error {
|
||||
utxnLog.Infof("Attempting to graduate height=%v: num_kids=%v, "+
|
||||
"num_babies=%v", classHeight, len(kgtnOutputs), len(cribOutputs))
|
||||
|
||||
// Load the last finalized height, so we can determine if the
|
||||
// kindergarten sweep txn should be crafted.
|
||||
lastFinalizedHeight, err := u.cfg.Store.LastFinalizedHeight()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we haven't processed this height before, we finalize the
|
||||
// graduating kindergarten outputs, by signing a sweep transaction that
|
||||
// spends from them. This txn is persisted such that we never broadcast
|
||||
// a different txn for the same height. This allows us to recover from
|
||||
// failures, and watch for the correct txid.
|
||||
if classHeight > lastFinalizedHeight {
|
||||
// If this height has never been finalized, we have never
|
||||
// generated a sweep txn for this height. Generate one if there
|
||||
// are kindergarten outputs or cltv crib outputs to be spent.
|
||||
// Offer the outputs to the sweeper and set up notifications that will
|
||||
// transition the swept kindergarten outputs and cltvCrib into graduated
|
||||
// outputs.
|
||||
if len(kgtnOutputs) > 0 {
|
||||
sweepInputs := make([]sweep.Input, len(kgtnOutputs))
|
||||
for i := range kgtnOutputs {
|
||||
sweepInputs[i] = &kgtnOutputs[i]
|
||||
}
|
||||
|
||||
finalTx, err = u.cfg.Sweeper.CreateSweepTx(
|
||||
sweepInputs, u.cfg.SweepTxConfTarget,
|
||||
classHeight,
|
||||
)
|
||||
if err != nil {
|
||||
utxnLog.Errorf("Failed to create sweep txn at "+
|
||||
"height=%d", classHeight)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Persist the kindergarten sweep txn to the nursery store. It
|
||||
// is safe to store a nil finalTx, which happens if there are
|
||||
// no graduating kindergarten outputs.
|
||||
err = u.cfg.Store.FinalizeKinder(classHeight, finalTx)
|
||||
if err != nil {
|
||||
utxnLog.Errorf("Failed to finalize kindergarten at "+
|
||||
"height=%d", classHeight)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Log if the finalized transaction is non-trivial.
|
||||
if finalTx != nil {
|
||||
utxnLog.Infof("Finalized kindergarten at height=%d ",
|
||||
classHeight)
|
||||
}
|
||||
}
|
||||
|
||||
// Now that the kindergarten sweep txn has either been finalized or
|
||||
// restored, broadcast the txn, and set up notifications that will
|
||||
// transition the swept kindergarten outputs and cltvCrib into
|
||||
// graduated outputs.
|
||||
if finalTx != nil {
|
||||
err := u.sweepMatureOutputs(classHeight, finalTx, kgtnOutputs)
|
||||
if err != nil {
|
||||
if err := u.sweepMatureOutputs(classHeight, kgtnOutputs); err != nil {
|
||||
utxnLog.Errorf("Failed to sweep %d kindergarten "+
|
||||
"outputs at height=%d: %v",
|
||||
len(kgtnOutputs), classHeight, err)
|
||||
@ -925,8 +774,7 @@ func (u *utxoNursery) graduateClass(classHeight uint32) error {
|
||||
}
|
||||
|
||||
// Now, we broadcast all pre-signed htlc txns from the csv crib outputs
|
||||
// at this height. There is no need to finalize these txns, since the
|
||||
// txid is predetermined when signed in the wallet.
|
||||
// at this height.
|
||||
for i := range cribOutputs {
|
||||
err := u.sweepCribOutput(classHeight, &cribOutputs[i])
|
||||
if err != nil {
|
||||
@ -937,61 +785,31 @@ func (u *utxoNursery) graduateClass(classHeight uint32) error {
|
||||
}
|
||||
}
|
||||
|
||||
return u.cfg.Store.GraduateHeight(classHeight)
|
||||
return nil
|
||||
}
|
||||
|
||||
// sweepMatureOutputs generates and broadcasts the transaction that transfers
|
||||
// control of funds from a prior channel commitment transaction to the user's
|
||||
// wallet. The outputs swept were previously time locked (either absolute or
|
||||
// relative), but are not mature enough to sweep into the wallet.
|
||||
func (u *utxoNursery) sweepMatureOutputs(classHeight uint32, finalTx *wire.MsgTx,
|
||||
func (u *utxoNursery) sweepMatureOutputs(classHeight uint32,
|
||||
kgtnOutputs []kidOutput) error {
|
||||
|
||||
utxnLog.Infof("Sweeping %v CSV-delayed outputs with sweep tx "+
|
||||
"(txid=%v): %v", len(kgtnOutputs),
|
||||
finalTx.TxHash(), newLogClosure(func() string {
|
||||
return spew.Sdump(finalTx)
|
||||
}),
|
||||
)
|
||||
utxnLog.Infof("Sweeping %v CSV-delayed outputs with sweep tx for "+
|
||||
"height %v", len(kgtnOutputs), classHeight)
|
||||
|
||||
// With the sweep transaction fully signed, broadcast the transaction
|
||||
// to the network. Additionally, we can stop tracking these outputs as
|
||||
// they've just been swept.
|
||||
err := u.cfg.PublishTransaction(finalTx)
|
||||
if err != nil && err != lnwallet.ErrDoubleSpend {
|
||||
utxnLog.Errorf("unable to broadcast sweep tx: %v, %v",
|
||||
err, spew.Sdump(finalTx))
|
||||
return err
|
||||
}
|
||||
for _, output := range kgtnOutputs {
|
||||
// Create local copy to prevent pointer to loop variable to be
|
||||
// passed in with disastruous consequences.
|
||||
local := output
|
||||
|
||||
return u.registerSweepConf(finalTx, kgtnOutputs, classHeight)
|
||||
}
|
||||
|
||||
// registerSweepConf is responsible for registering a finalized kindergarten
|
||||
// sweep transaction for confirmation notifications. If the confirmation was
|
||||
// successfully registered, a goroutine will be spawned that waits for the
|
||||
// confirmation, and graduates the provided kindergarten class within the
|
||||
// nursery store.
|
||||
func (u *utxoNursery) registerSweepConf(finalTx *wire.MsgTx,
|
||||
kgtnOutputs []kidOutput, heightHint uint32) error {
|
||||
|
||||
finalTxID := finalTx.TxHash()
|
||||
|
||||
confChan, err := u.cfg.Notifier.RegisterConfirmationsNtfn(
|
||||
&finalTxID, finalTx.TxOut[0].PkScript, u.cfg.ConfDepth,
|
||||
heightHint,
|
||||
)
|
||||
resultChan, err := u.cfg.Sweeper.SweepInput(&local)
|
||||
if err != nil {
|
||||
utxnLog.Errorf("unable to register notification for "+
|
||||
"sweep confirmation: %v", finalTxID)
|
||||
return err
|
||||
}
|
||||
|
||||
utxnLog.Infof("Registering sweep tx %v for confs at height=%d",
|
||||
finalTxID, heightHint)
|
||||
|
||||
u.wg.Add(1)
|
||||
go u.waitForSweepConf(heightHint, kgtnOutputs, confChan)
|
||||
go u.waitForSweepConf(classHeight, &output, resultChan)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -1002,16 +820,30 @@ func (u *utxoNursery) registerSweepConf(finalTx *wire.MsgTx,
|
||||
// to mark any mature channels as fully closed in channeldb.
|
||||
// NOTE(conner): this method MUST be called as a go routine.
|
||||
func (u *utxoNursery) waitForSweepConf(classHeight uint32,
|
||||
kgtnOutputs []kidOutput, confChan *chainntnfs.ConfirmationEvent) {
|
||||
output *kidOutput, resultChan chan sweep.Result) {
|
||||
|
||||
defer u.wg.Done()
|
||||
|
||||
select {
|
||||
case _, ok := <-confChan.Confirmed:
|
||||
case result, ok := <-resultChan:
|
||||
if !ok {
|
||||
utxnLog.Errorf("Notification chan closed, can't" +
|
||||
" advance %v graduating outputs",
|
||||
len(kgtnOutputs))
|
||||
" advance graduating output")
|
||||
return
|
||||
}
|
||||
|
||||
// In case of a remote spend, still graduate the output. There
|
||||
// is no way to sweep it anymore.
|
||||
if result.Err == sweep.ErrRemoteSpend {
|
||||
utxnLog.Infof("Output %v was spend by remote party",
|
||||
output.OutPoint())
|
||||
break
|
||||
}
|
||||
|
||||
if result.Err != nil {
|
||||
utxnLog.Errorf("Failed to sweep %v at "+
|
||||
"height=%d", output.OutPoint(),
|
||||
classHeight)
|
||||
return
|
||||
}
|
||||
|
||||
@ -1024,34 +856,25 @@ func (u *utxoNursery) waitForSweepConf(classHeight uint32,
|
||||
|
||||
// TODO(conner): add retry logic?
|
||||
|
||||
// Mark the confirmed kindergarten outputs as graduated.
|
||||
if err := u.cfg.Store.GraduateKinder(classHeight); err != nil {
|
||||
utxnLog.Errorf("Unable to graduate %v kindergarten outputs: "+
|
||||
"%v", len(kgtnOutputs), err)
|
||||
// Mark the confirmed kindergarten output as graduated.
|
||||
if err := u.cfg.Store.GraduateKinder(classHeight, output); err != nil {
|
||||
utxnLog.Errorf("Unable to graduate kindergarten output %v: %v",
|
||||
output.OutPoint(), err)
|
||||
return
|
||||
}
|
||||
|
||||
utxnLog.Infof("Graduated %d kindergarten outputs from height=%d",
|
||||
len(kgtnOutputs), classHeight)
|
||||
utxnLog.Infof("Graduated kindergarten output from height=%d",
|
||||
classHeight)
|
||||
|
||||
// Iterate over the kid outputs and construct a set of all channel
|
||||
// points to which they belong.
|
||||
var possibleCloses = make(map[wire.OutPoint]struct{})
|
||||
for _, kid := range kgtnOutputs {
|
||||
possibleCloses[*kid.OriginChanPoint()] = struct{}{}
|
||||
|
||||
}
|
||||
|
||||
// Attempt to close each channel, only doing so if all of the channel's
|
||||
// Attempt to close the channel, only doing so if all of the channel's
|
||||
// outputs have been graduated.
|
||||
for chanPoint := range possibleCloses {
|
||||
if err := u.closeAndRemoveIfMature(&chanPoint); err != nil {
|
||||
chanPoint := output.OriginChanPoint()
|
||||
if err := u.closeAndRemoveIfMature(chanPoint); err != nil {
|
||||
utxnLog.Errorf("Failed to close and remove channel %v",
|
||||
chanPoint)
|
||||
*chanPoint)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// sweepCribOutput broadcasts the crib output's htlc timeout txn, and sets up a
|
||||
// notification that will advance it to the kindergarten bucket upon
|
||||
@ -1216,7 +1039,8 @@ func (u *utxoNursery) waitForPreschoolConf(kid *kidOutput,
|
||||
outputType = "Commitment"
|
||||
}
|
||||
|
||||
err := u.cfg.Store.PreschoolToKinder(kid)
|
||||
bestHeight := atomic.LoadUint32(&u.bestHeight)
|
||||
err := u.cfg.Store.PreschoolToKinder(kid, bestHeight)
|
||||
if err != nil {
|
||||
utxnLog.Errorf("Unable to move %v output "+
|
||||
"from preschool to kindergarten bucket: %v",
|
||||
@ -1554,8 +1378,6 @@ type kidOutput struct {
|
||||
// NOTE: This will only be set for: outgoing HTLC's on the commitment
|
||||
// transaction of the remote party.
|
||||
absoluteMaturity uint32
|
||||
|
||||
confHeight uint32
|
||||
}
|
||||
|
||||
func makeKidOutput(outpoint, originChanPoint *wire.OutPoint,
|
||||
@ -1569,9 +1391,14 @@ func makeKidOutput(outpoint, originChanPoint *wire.OutPoint,
|
||||
isHtlc := (witnessType == lnwallet.HtlcAcceptedSuccessSecondLevel ||
|
||||
witnessType == lnwallet.HtlcOfferedRemoteTimeout)
|
||||
|
||||
// heightHint can be safely set to zero here, because after this
|
||||
// function returns, nursery will set a proper confirmation height in
|
||||
// waitForTimeoutConf or waitForPreschoolConf.
|
||||
heightHint := uint32(0)
|
||||
|
||||
return kidOutput{
|
||||
breachedOutput: makeBreachedOutput(
|
||||
outpoint, witnessType, nil, signDescriptor,
|
||||
outpoint, witnessType, nil, signDescriptor, heightHint,
|
||||
),
|
||||
isHtlc: isHtlc,
|
||||
originChanPoint: *originChanPoint,
|
||||
|
@ -9,8 +9,9 @@ import (
|
||||
"github.com/lightningnetwork/lnd/sweep"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
"runtime/pprof"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -19,7 +20,6 @@ import (
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
)
|
||||
|
||||
@ -206,10 +206,10 @@ var (
|
||||
amt: btcutil.Amount(13e7),
|
||||
outpoint: outPoints[1],
|
||||
witnessType: lnwallet.CommitmentTimeLock,
|
||||
confHeight: uint32(1000),
|
||||
},
|
||||
originChanPoint: outPoints[0],
|
||||
blocksToMaturity: uint32(42),
|
||||
confHeight: uint32(1000),
|
||||
},
|
||||
|
||||
{
|
||||
@ -217,10 +217,10 @@ var (
|
||||
amt: btcutil.Amount(24e7),
|
||||
outpoint: outPoints[2],
|
||||
witnessType: lnwallet.CommitmentTimeLock,
|
||||
confHeight: uint32(1000),
|
||||
},
|
||||
originChanPoint: outPoints[0],
|
||||
blocksToMaturity: uint32(42),
|
||||
confHeight: uint32(1000),
|
||||
},
|
||||
|
||||
{
|
||||
@ -228,10 +228,10 @@ var (
|
||||
amt: btcutil.Amount(2e5),
|
||||
outpoint: outPoints[3],
|
||||
witnessType: lnwallet.CommitmentTimeLock,
|
||||
confHeight: uint32(500),
|
||||
},
|
||||
originChanPoint: outPoints[0],
|
||||
blocksToMaturity: uint32(28),
|
||||
confHeight: uint32(500),
|
||||
},
|
||||
|
||||
{
|
||||
@ -239,10 +239,10 @@ var (
|
||||
amt: btcutil.Amount(10e6),
|
||||
outpoint: outPoints[4],
|
||||
witnessType: lnwallet.CommitmentTimeLock,
|
||||
confHeight: uint32(500),
|
||||
},
|
||||
originChanPoint: outPoints[0],
|
||||
blocksToMaturity: uint32(28),
|
||||
confHeight: uint32(500),
|
||||
},
|
||||
}
|
||||
|
||||
@ -396,11 +396,14 @@ func TestBabyOutputSerialization(t *testing.T) {
|
||||
|
||||
type nurseryTestContext struct {
|
||||
nursery *utxoNursery
|
||||
notifier *nurseryMockNotifier
|
||||
notifier *sweep.MockNotifier
|
||||
chainIO *mockChainIO
|
||||
publishChan chan wire.MsgTx
|
||||
store *nurseryStoreInterceptor
|
||||
restart func() bool
|
||||
receiveTx func() wire.MsgTx
|
||||
sweeper *sweep.UtxoSweeper
|
||||
timeoutChan chan chan time.Time
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
@ -430,17 +433,50 @@ func createNurseryTestContext(t *testing.T,
|
||||
// test.
|
||||
storeIntercepter := newNurseryStoreInterceptor(store)
|
||||
|
||||
notifier := newNurseryMockNotifier(t)
|
||||
notifier := sweep.NewMockNotifier(t)
|
||||
|
||||
sweeper := sweep.New(&sweep.UtxoSweeperConfig{
|
||||
publishChan := make(chan wire.MsgTx, 1)
|
||||
publishFunc := func(tx *wire.MsgTx, source string) error {
|
||||
utxnLog.Tracef("Publishing tx %v by %v", tx.TxHash(), source)
|
||||
publishChan <- *tx
|
||||
return nil
|
||||
}
|
||||
|
||||
timeoutChan := make(chan chan time.Time)
|
||||
|
||||
chainIO := &mockChainIO{
|
||||
bestHeight: 0,
|
||||
}
|
||||
|
||||
sweeperStore := sweep.NewMockSweeperStore()
|
||||
|
||||
sweeperCfg := &sweep.UtxoSweeperConfig{
|
||||
GenSweepScript: func() ([]byte, error) {
|
||||
return []byte{}, nil
|
||||
},
|
||||
Estimator: &lnwallet.StaticFeeEstimator{},
|
||||
Signer: &nurseryMockSigner{},
|
||||
})
|
||||
Notifier: notifier,
|
||||
PublishTransaction: func(tx *wire.MsgTx) error {
|
||||
return publishFunc(tx, "sweeper")
|
||||
},
|
||||
NewBatchTimer: func() <-chan time.Time {
|
||||
c := make(chan time.Time, 1)
|
||||
timeoutChan <- c
|
||||
return c
|
||||
},
|
||||
ChainIO: chainIO,
|
||||
Store: sweeperStore,
|
||||
MaxInputsPerTx: 10,
|
||||
MaxSweepAttempts: 5,
|
||||
NextAttemptDeltaFunc: func(int) int32 { return 1 },
|
||||
}
|
||||
|
||||
cfg := NurseryConfig{
|
||||
sweeper := sweep.New(sweeperCfg)
|
||||
|
||||
sweeper.Start()
|
||||
|
||||
nurseryCfg := NurseryConfig{
|
||||
Notifier: notifier,
|
||||
FetchClosedChannels: func(pendingOnly bool) (
|
||||
[]*channeldb.ChannelCloseSummary, error) {
|
||||
@ -453,54 +489,91 @@ func createNurseryTestContext(t *testing.T,
|
||||
}, nil
|
||||
},
|
||||
Store: storeIntercepter,
|
||||
ChainIO: &mockChainIO{},
|
||||
ChainIO: chainIO,
|
||||
Sweeper: sweeper,
|
||||
PublishTransaction: func(tx *wire.MsgTx) error {
|
||||
return publishFunc(tx, "nursery")
|
||||
},
|
||||
}
|
||||
|
||||
publishChan := make(chan wire.MsgTx, 1)
|
||||
cfg.PublishTransaction = func(tx *wire.MsgTx) error {
|
||||
t.Logf("Publishing tx %v", tx.TxHash())
|
||||
publishChan <- *tx
|
||||
return nil
|
||||
}
|
||||
|
||||
nursery := newUtxoNursery(&cfg)
|
||||
nursery := newUtxoNursery(&nurseryCfg)
|
||||
nursery.Start()
|
||||
|
||||
ctx := &nurseryTestContext{
|
||||
nursery: nursery,
|
||||
notifier: notifier,
|
||||
chainIO: chainIO,
|
||||
store: storeIntercepter,
|
||||
publishChan: publishChan,
|
||||
sweeper: sweeper,
|
||||
timeoutChan: timeoutChan,
|
||||
t: t,
|
||||
}
|
||||
|
||||
ctx.restart = func() bool {
|
||||
return checkStartStop(func() {
|
||||
ctx.nursery.Stop()
|
||||
// Simulate lnd restart.
|
||||
ctx.nursery = newUtxoNursery(ctx.nursery.cfg)
|
||||
ctx.nursery.Start()
|
||||
})
|
||||
}
|
||||
|
||||
ctx.receiveTx = func() wire.MsgTx {
|
||||
var tx wire.MsgTx
|
||||
select {
|
||||
case tx = <-ctx.publishChan:
|
||||
utxnLog.Debugf("Published tx %v", tx.TxHash())
|
||||
return tx
|
||||
case <-time.After(5 * time.Second):
|
||||
case <-time.After(defaultTestTimeout):
|
||||
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
|
||||
t.Fatalf("tx not published")
|
||||
}
|
||||
return tx
|
||||
}
|
||||
|
||||
ctx.restart = func() bool {
|
||||
return checkStartStop(func() {
|
||||
utxnLog.Tracef("Restart sweeper and nursery")
|
||||
// Simulate lnd restart.
|
||||
ctx.nursery.Stop()
|
||||
|
||||
// Also restart sweeper to test behaviour as one unit.
|
||||
//
|
||||
// TODO(joostjager): Mock sweeper to test nursery in
|
||||
// isolation.
|
||||
ctx.sweeper.Stop()
|
||||
|
||||
// Find out if there is a last tx stored. If so, we
|
||||
// expect it to be republished on startup.
|
||||
hasLastTx, err := sweeperCfg.Store.GetLastPublishedTx()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Restart sweeper.
|
||||
ctx.sweeper = sweep.New(sweeperCfg)
|
||||
ctx.sweeper.Start()
|
||||
|
||||
// Receive last tx if expected.
|
||||
if hasLastTx != nil {
|
||||
utxnLog.Debugf("Expecting republish")
|
||||
ctx.receiveTx()
|
||||
} else {
|
||||
utxnLog.Debugf("Expecting no republish")
|
||||
}
|
||||
|
||||
/// Restart nursery.
|
||||
nurseryCfg.Sweeper = ctx.sweeper
|
||||
ctx.nursery = newUtxoNursery(&nurseryCfg)
|
||||
ctx.nursery.Start()
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
// Start with testing an immediate restart.
|
||||
ctx.restart()
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (ctx *nurseryTestContext) notifyEpoch(height int32) {
|
||||
ctx.chainIO.bestHeight = height
|
||||
ctx.notifier.NotifyEpoch(height)
|
||||
}
|
||||
|
||||
func (ctx *nurseryTestContext) finish() {
|
||||
// Add a final restart point in this state
|
||||
ctx.restart()
|
||||
@ -556,6 +629,8 @@ func (ctx *nurseryTestContext) finish() {
|
||||
if len(activeHeights) > 0 {
|
||||
ctx.t.Fatalf("Expected height index to be empty")
|
||||
}
|
||||
|
||||
ctx.sweeper.Stop()
|
||||
}
|
||||
|
||||
func createOutgoingRes(onLocalCommitment bool) *lnwallet.OutgoingHtlcResolution {
|
||||
@ -703,6 +778,8 @@ func testRestartLoop(t *testing.T, test func(*testing.T,
|
||||
|
||||
return true
|
||||
}
|
||||
utxnLog.Debugf("Skipping restart point %v",
|
||||
currentStartStopIdx)
|
||||
return false
|
||||
}
|
||||
|
||||
@ -739,7 +816,7 @@ func testNurseryOutgoingHtlcSuccessOnLocal(t *testing.T,
|
||||
ctx.restart()
|
||||
|
||||
// Notify arrival of block where HTLC CLTV expires.
|
||||
ctx.notifier.notifyEpoch(125)
|
||||
ctx.notifyEpoch(125)
|
||||
|
||||
// This should trigger nursery to publish the timeout tx.
|
||||
ctx.receiveTx()
|
||||
@ -751,7 +828,7 @@ func testNurseryOutgoingHtlcSuccessOnLocal(t *testing.T,
|
||||
|
||||
// Confirm the timeout tx. This should promote the HTLC to KNDR state.
|
||||
timeoutTxHash := outgoingRes.SignedTimeoutTx.TxHash()
|
||||
if err := ctx.notifier.confirmTx(&timeoutTxHash, 126); err != nil {
|
||||
if err := ctx.notifier.ConfirmTx(&timeoutTxHash, 126); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@ -765,7 +842,7 @@ func testNurseryOutgoingHtlcSuccessOnLocal(t *testing.T,
|
||||
ctx.restart()
|
||||
|
||||
// Notify arrival of block where second level HTLC unlocks.
|
||||
ctx.notifier.notifyEpoch(128)
|
||||
ctx.notifyEpoch(128)
|
||||
|
||||
// Check final sweep into wallet.
|
||||
testSweepHtlc(t, ctx)
|
||||
@ -790,7 +867,7 @@ func testNurseryOutgoingHtlcSuccessOnRemote(t *testing.T,
|
||||
// resolving remote commitment tx.
|
||||
//
|
||||
// TODO(joostjager): This is probably not correct?
|
||||
err := ctx.notifier.confirmTx(&outgoingRes.ClaimOutpoint.Hash, 124)
|
||||
err := ctx.notifier.ConfirmTx(&outgoingRes.ClaimOutpoint.Hash, 124)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -805,7 +882,7 @@ func testNurseryOutgoingHtlcSuccessOnRemote(t *testing.T,
|
||||
ctx.restart()
|
||||
|
||||
// Notify arrival of block where HTLC CLTV expires.
|
||||
ctx.notifier.notifyEpoch(125)
|
||||
ctx.notifyEpoch(125)
|
||||
|
||||
// Check final sweep into wallet.
|
||||
testSweepHtlc(t, ctx)
|
||||
@ -840,7 +917,7 @@ func testNurseryCommitSuccessOnLocal(t *testing.T,
|
||||
ctx.restart()
|
||||
|
||||
// Notify confirmation of the commitment tx.
|
||||
err = ctx.notifier.confirmTx(&commitRes.SelfOutPoint.Hash, 124)
|
||||
err = ctx.notifier.ConfirmTx(&commitRes.SelfOutPoint.Hash, 124)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -855,7 +932,7 @@ func testNurseryCommitSuccessOnLocal(t *testing.T,
|
||||
ctx.restart()
|
||||
|
||||
// Notify arrival of block where commit output CSV expires.
|
||||
ctx.notifier.notifyEpoch(126)
|
||||
ctx.notifyEpoch(126)
|
||||
|
||||
// Check final sweep into wallet.
|
||||
testSweep(t, ctx, func() {
|
||||
@ -876,27 +953,28 @@ func testSweepHtlc(t *testing.T, ctx *nurseryTestContext) {
|
||||
|
||||
func testSweep(t *testing.T, ctx *nurseryTestContext,
|
||||
afterPublishAssert func()) {
|
||||
|
||||
// Wait for nursery to publish the sweep tx.
|
||||
ctx.tick()
|
||||
sweepTx := ctx.receiveTx()
|
||||
|
||||
if ctx.restart() {
|
||||
// Restart will trigger rebroadcast of sweep tx.
|
||||
sweepTx = ctx.receiveTx()
|
||||
// Nursery reoffers its input. Sweeper needs a tick to create the sweep
|
||||
// tx.
|
||||
ctx.tick()
|
||||
ctx.receiveTx()
|
||||
}
|
||||
|
||||
afterPublishAssert()
|
||||
|
||||
// Confirm the sweep tx.
|
||||
sweepTxHash := sweepTx.TxHash()
|
||||
err := ctx.notifier.confirmTx(&sweepTxHash, 129)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ctx.notifier.SpendOutpoint(sweepTx.TxIn[0].PreviousOutPoint, sweepTx)
|
||||
|
||||
// Wait for output to be promoted in store to GRAD.
|
||||
select {
|
||||
case <-ctx.store.graduateKinderChan:
|
||||
case <-time.After(defaultTestTimeout):
|
||||
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
t.Fatalf("output not graduated")
|
||||
}
|
||||
|
||||
@ -907,6 +985,19 @@ func testSweep(t *testing.T, ctx *nurseryTestContext,
|
||||
assertNurseryReportUnavailable(t, ctx.nursery)
|
||||
}
|
||||
|
||||
func (ctx *nurseryTestContext) tick() {
|
||||
select {
|
||||
case c := <-ctx.timeoutChan:
|
||||
select {
|
||||
case c <- time.Time{}:
|
||||
case <-time.After(defaultTestTimeout):
|
||||
ctx.t.Fatal("tick timeout - tick not consumed")
|
||||
}
|
||||
case <-time.After(defaultTestTimeout):
|
||||
ctx.t.Fatal("tick timeout - no new timer created")
|
||||
}
|
||||
}
|
||||
|
||||
type nurseryStoreInterceptor struct {
|
||||
ns NurseryStore
|
||||
|
||||
@ -941,16 +1032,18 @@ func (i *nurseryStoreInterceptor) CribToKinder(babyOutput *babyOutput) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (i *nurseryStoreInterceptor) PreschoolToKinder(kidOutput *kidOutput) error {
|
||||
err := i.ns.PreschoolToKinder(kidOutput)
|
||||
func (i *nurseryStoreInterceptor) PreschoolToKinder(kidOutput *kidOutput,
|
||||
lastGradHeight uint32) error {
|
||||
|
||||
err := i.ns.PreschoolToKinder(kidOutput, lastGradHeight)
|
||||
|
||||
i.preschoolToKinderChan <- struct{}{}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (i *nurseryStoreInterceptor) GraduateKinder(height uint32) error {
|
||||
err := i.ns.GraduateKinder(height)
|
||||
func (i *nurseryStoreInterceptor) GraduateKinder(height uint32, kid *kidOutput) error {
|
||||
err := i.ns.GraduateKinder(height, kid)
|
||||
|
||||
i.graduateKinderChan <- struct{}{}
|
||||
|
||||
@ -961,30 +1054,12 @@ func (i *nurseryStoreInterceptor) FetchPreschools() ([]kidOutput, error) {
|
||||
return i.ns.FetchPreschools()
|
||||
}
|
||||
|
||||
func (i *nurseryStoreInterceptor) FetchClass(height uint32) (*wire.MsgTx,
|
||||
func (i *nurseryStoreInterceptor) FetchClass(height uint32) (
|
||||
[]kidOutput, []babyOutput, error) {
|
||||
|
||||
return i.ns.FetchClass(height)
|
||||
}
|
||||
|
||||
func (i *nurseryStoreInterceptor) FinalizeKinder(height uint32,
|
||||
tx *wire.MsgTx) error {
|
||||
|
||||
return i.ns.FinalizeKinder(height, tx)
|
||||
}
|
||||
|
||||
func (i *nurseryStoreInterceptor) LastFinalizedHeight() (uint32, error) {
|
||||
return i.ns.LastFinalizedHeight()
|
||||
}
|
||||
|
||||
func (i *nurseryStoreInterceptor) GraduateHeight(height uint32) error {
|
||||
return i.ns.GraduateHeight(height)
|
||||
}
|
||||
|
||||
func (i *nurseryStoreInterceptor) LastGraduatedHeight() (uint32, error) {
|
||||
return i.ns.LastGraduatedHeight()
|
||||
}
|
||||
|
||||
func (i *nurseryStoreInterceptor) HeightsBelowOrEqual(height uint32) (
|
||||
[]uint32, error) {
|
||||
|
||||
@ -1025,92 +1100,3 @@ func (m *nurseryMockSigner) ComputeInputScript(tx *wire.MsgTx,
|
||||
|
||||
return &lnwallet.InputScript{}, nil
|
||||
}
|
||||
|
||||
type nurseryMockNotifier struct {
|
||||
confChannel map[chainhash.Hash]chan *chainntnfs.TxConfirmation
|
||||
epochChan chan *chainntnfs.BlockEpoch
|
||||
spendChan chan *chainntnfs.SpendDetail
|
||||
mutex sync.RWMutex
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func newNurseryMockNotifier(t *testing.T) *nurseryMockNotifier {
|
||||
return &nurseryMockNotifier{
|
||||
confChannel: make(map[chainhash.Hash]chan *chainntnfs.TxConfirmation),
|
||||
epochChan: make(chan *chainntnfs.BlockEpoch),
|
||||
spendChan: make(chan *chainntnfs.SpendDetail),
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *nurseryMockNotifier) notifyEpoch(height int32) {
|
||||
select {
|
||||
case m.epochChan <- &chainntnfs.BlockEpoch{
|
||||
Height: height,
|
||||
}:
|
||||
case <-time.After(defaultTestTimeout):
|
||||
m.t.Fatal("epoch event not consumed")
|
||||
}
|
||||
}
|
||||
|
||||
func (m *nurseryMockNotifier) confirmTx(txid *chainhash.Hash, height uint32) error {
|
||||
confirm := &chainntnfs.TxConfirmation{
|
||||
BlockHeight: height,
|
||||
}
|
||||
select {
|
||||
case m.getConfChannel(txid) <- confirm:
|
||||
case <-time.After(defaultTestTimeout):
|
||||
return fmt.Errorf("confirmation not consumed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *nurseryMockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash,
|
||||
_ []byte, numConfs, heightHint uint32) (*chainntnfs.ConfirmationEvent,
|
||||
error) {
|
||||
|
||||
return &chainntnfs.ConfirmationEvent{
|
||||
Confirmed: m.getConfChannel(txid),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *nurseryMockNotifier) getConfChannel(
|
||||
txid *chainhash.Hash) chan *chainntnfs.TxConfirmation {
|
||||
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
channel, ok := m.confChannel[*txid]
|
||||
if ok {
|
||||
return channel
|
||||
}
|
||||
channel = make(chan *chainntnfs.TxConfirmation)
|
||||
m.confChannel[*txid] = channel
|
||||
|
||||
return channel
|
||||
}
|
||||
|
||||
func (m *nurseryMockNotifier) RegisterBlockEpochNtfn(
|
||||
bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) {
|
||||
return &chainntnfs.BlockEpochEvent{
|
||||
Epochs: m.epochChan,
|
||||
Cancel: func() {},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *nurseryMockNotifier) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *nurseryMockNotifier) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *nurseryMockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
|
||||
_ []byte, heightHint uint32) (*chainntnfs.SpendEvent, error) {
|
||||
|
||||
return &chainntnfs.SpendEvent{
|
||||
Spend: m.spendChan,
|
||||
Cancel: func() {},
|
||||
}, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user