From 6886a0117faca6688ed4361f7c423483daf6a3f6 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 15 Apr 2019 14:20:25 +0200 Subject: [PATCH 01/14] cnct: always create incoming contest resolver One of the first things the incoming contest resolver does is checking if the preimage is available and if it is, convert itself into a success resolver. This behaviour makes it unnecessary to already determine earlier in the process whether an incoming contest or a success resolver is needed. By having all incoming htlcs go through the incoming contest resolver, the number of execution paths is reduced and it becomes easier to ascertain that the implemented logic is correct. The only functional change in this commit is that a forwarded htlc for which is the preimage is known, is no longer settled when the htlc is already expired. Previously a success resolver would be instantiated directly, skipping the expiry height check. This created a risk that the success resolver would never finish, because an expired htlc could already have been swept by the remote party and there is no detection of this remote spend in the success resolver currently. With the new change, the general direction that an expired htlc shouldn't be settled and instead given up on is implemented more consistently. This commit prepares for fixing edges cases related to hodl invoice on-chain resolution. --- contractcourt/channel_arbitrator.go | 16 ---------------- contractcourt/htlc_incoming_contest_resolver.go | 7 +++++-- 2 files changed, 5 insertions(+), 18 deletions(-) diff --git a/contractcourt/channel_arbitrator.go b/contractcourt/channel_arbitrator.go index 80088d73..7aeb2e35 100644 --- a/contractcourt/channel_arbitrator.go +++ b/contractcourt/channel_arbitrator.go @@ -1214,26 +1214,10 @@ func (c *ChannelArbitrator) checkChainActions(height uint32, // either learn of it eventually from the outgoing HTLC, or the sender // will timeout the HTLC. for _, htlc := range c.activeHTLCs.incomingHTLCs { - // If we have the pre-image, then we should go on-chain to - // redeem the HTLC immediately. - if _, ok := c.cfg.PreimageDB.LookupPreimage(htlc.RHash); ok { - log.Tracef("ChannelArbitrator(%v): preimage for "+ - "htlc=%x is known!", c.cfg.ChanPoint, - htlc.RHash[:]) - - actionMap[HtlcClaimAction] = append( - actionMap[HtlcClaimAction], htlc, - ) - continue - } - log.Tracef("ChannelArbitrator(%v): watching chain to decide "+ "action for incoming htlc=%x", c.cfg.ChanPoint, htlc.RHash[:]) - // Otherwise, we don't yet have the pre-image, but should watch - // on-chain to see if either: the remote party times out the - // HTLC, or we learn of the pre-image. actionMap[HtlcIncomingWatchAction] = append( actionMap[HtlcIncomingWatchAction], htlc, ) diff --git a/contractcourt/htlc_incoming_contest_resolver.go b/contractcourt/htlc_incoming_contest_resolver.go index 4f2d16ba..8889484e 100644 --- a/contractcourt/htlc_incoming_contest_resolver.go +++ b/contractcourt/htlc_incoming_contest_resolver.go @@ -57,8 +57,11 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { return nil, err } - // If we're past the point of expiry of the HTLC, then at this point - // the sender can sweep it, so we'll end our lifetime. + // If we're past the point of expiry of the HTLC, then at this point the + // sender can sweep it, so we'll end our lifetime. Here we deliberately + // forego the chance that the sender doesn't sweep and we already have + // or will learn the preimage. Otherwise the resolver could potentially + // stay active indefinitely and the channel will never close properly. if uint32(currentHeight) >= h.htlcExpiry { // TODO(roasbeef): should also somehow check if outgoing is // resolved or not From d55a8b7b294fa5512e0b7bee068c6c645682a303 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 15 Apr 2019 14:24:43 +0200 Subject: [PATCH 02/14] channel+cnct: remove preimage from channel and resolution Now that the success resolver preimage field is always populated by the incoming contest resolver, preimage lookups earlier in the process (channel and channel arbitrator) can mostly be removed. --- breacharbiter_test.go | 6 ++-- contractcourt/chain_arbitrator.go | 4 +-- contractcourt/chain_watcher.go | 9 ++---- fundingmanager.go | 4 +-- htlcswitch/test_utils.go | 10 +++--- lnwallet/channel.go | 52 +++++++++++-------------------- lnwallet/channel_test.go | 46 ++++++++++----------------- lnwallet/interface.go | 16 ---------- lnwallet/test_utils.go | 6 ++-- lnwallet/transactions_test.go | 4 +-- peer.go | 6 ++-- rpcserver.go | 2 +- test_utils.go | 4 +-- witness_beacon.go | 2 -- 14 files changed, 53 insertions(+), 118 deletions(-) diff --git a/breacharbiter_test.go b/breacharbiter_test.go index 8be6e992..3d4ceb73 100644 --- a/breacharbiter_test.go +++ b/breacharbiter_test.go @@ -1844,14 +1844,12 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa Packager: channeldb.NewChannelPackager(shortChanID), } - pCache := newMockPreimageCache() - aliceSigner := &mockSigner{aliceKeyPriv} bobSigner := &mockSigner{bobKeyPriv} alicePool := lnwallet.NewSigPool(1, aliceSigner) channelAlice, err := lnwallet.NewLightningChannel( - aliceSigner, pCache, aliceChannelState, alicePool, + aliceSigner, aliceChannelState, alicePool, ) if err != nil { return nil, nil, nil, err @@ -1860,7 +1858,7 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa bobPool := lnwallet.NewSigPool(1, bobSigner) channelBob, err := lnwallet.NewLightningChannel( - bobSigner, pCache, bobChannelState, bobPool, + bobSigner, bobChannelState, bobPool, ) if err != nil { return nil, nil, nil, err diff --git a/contractcourt/chain_arbitrator.go b/contractcourt/chain_arbitrator.go index e21ba7ca..b7c5eb0d 100644 --- a/contractcourt/chain_arbitrator.go +++ b/contractcourt/chain_arbitrator.go @@ -259,7 +259,7 @@ func newActiveChannelArbitrator(channel *channeldb.OpenChannel, // Finally, we'll force close the channel completing // the force close workflow. chanMachine, err := lnwallet.NewLightningChannel( - c.cfg.Signer, c.cfg.PreimageDB, channel, nil, + c.cfg.Signer, channel, nil, ) if err != nil { return nil, err @@ -375,7 +375,6 @@ func (c *ChainArbitrator) Start() error { chainWatcherConfig{ chanState: channel, notifier: c.cfg.Notifier, - pCache: c.cfg.PreimageDB, signer: c.cfg.Signer, isOurAddr: c.cfg.IsOurAddress, contractBreach: func(retInfo *lnwallet.BreachRetribution) error { @@ -709,7 +708,6 @@ func (c *ChainArbitrator) WatchNewChannel(newChan *channeldb.OpenChannel) error chainWatcherConfig{ chanState: newChan, notifier: c.cfg.Notifier, - pCache: c.cfg.PreimageDB, signer: c.cfg.Signer, isOurAddr: c.cfg.IsOurAddress, contractBreach: func(retInfo *lnwallet.BreachRetribution) error { diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index 34bbd6c7..746d8011 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -88,11 +88,6 @@ type chainWatcherConfig struct { // notified of output spends and when transactions are confirmed. notifier chainntnfs.ChainNotifier - // pCache is a reference to the shared preimage cache. We'll use this - // to see if we can settle any incoming HTLC's during a remote - // commitment close event. - pCache WitnessBeacon - // signer is the main signer instances that will be responsible for // signing any HTLC and commitment transaction generated by the state // machine. @@ -702,7 +697,7 @@ func (c *chainWatcher) dispatchLocalForceClose( "detected", c.cfg.chanState.FundingOutpoint) forceClose, err := lnwallet.NewLocalForceCloseSummary( - c.cfg.chanState, c.cfg.signer, c.cfg.pCache, + c.cfg.chanState, c.cfg.signer, commitSpend.SpendingTx, localCommit, ) if err != nil { @@ -795,7 +790,7 @@ func (c *chainWatcher) dispatchRemoteForceClose( // materials required to let each subscriber sweep the funds in the // channel on-chain. uniClose, err := lnwallet.NewUnilateralCloseSummary( - c.cfg.chanState, c.cfg.signer, c.cfg.pCache, commitSpend, + c.cfg.chanState, c.cfg.signer, commitSpend, remoteCommit, commitPoint, ) if err != nil { diff --git a/fundingmanager.go b/fundingmanager.go index e68f2473..240c4e9b 100644 --- a/fundingmanager.go +++ b/fundingmanager.go @@ -1712,7 +1712,7 @@ func (f *fundingManager) handleFundingSigned(fmsg *fundingSignedMsg) { // Go on adding the channel to the channel graph, and crafting // channel announcements. lnChannel, err := lnwallet.NewLightningChannel( - nil, nil, completeChan, nil, + nil, completeChan, nil, ) if err != nil { fndgLog.Errorf("failed creating lnChannel: %v", err) @@ -2005,7 +2005,7 @@ func (f *fundingManager) handleFundingConfirmation(peer lnpeer.Peer, // We create the state-machine object which wraps the database state. lnChannel, err := lnwallet.NewLightningChannel( - nil, nil, completeChan, nil, + nil, completeChan, nil, ) if err != nil { return err diff --git a/htlcswitch/test_utils.go b/htlcswitch/test_utils.go index 407c243b..bc1035b3 100644 --- a/htlcswitch/test_utils.go +++ b/htlcswitch/test_utils.go @@ -367,11 +367,9 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, aliceSigner := &mockSigner{aliceKeyPriv} bobSigner := &mockSigner{bobKeyPriv} - pCache := newMockPreimageCache() - alicePool := lnwallet.NewSigPool(runtime.NumCPU(), aliceSigner) channelAlice, err := lnwallet.NewLightningChannel( - aliceSigner, pCache, aliceChannelState, alicePool, + aliceSigner, aliceChannelState, alicePool, ) if err != nil { return nil, nil, nil, nil, err @@ -380,7 +378,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, bobPool := lnwallet.NewSigPool(runtime.NumCPU(), bobSigner) channelBob, err := lnwallet.NewLightningChannel( - bobSigner, pCache, bobChannelState, bobPool, + bobSigner, bobChannelState, bobPool, ) if err != nil { return nil, nil, nil, nil, err @@ -441,7 +439,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, } newAliceChannel, err := lnwallet.NewLightningChannel( - aliceSigner, nil, aliceStoredChannel, alicePool, + aliceSigner, aliceStoredChannel, alicePool, ) if err != nil { return nil, nil, errors.Errorf("unable to create new channel: %v", @@ -481,7 +479,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, } newBobChannel, err := lnwallet.NewLightningChannel( - bobSigner, nil, bobStoredChannel, bobPool, + bobSigner, bobStoredChannel, bobPool, ) if err != nil { return nil, nil, errors.Errorf("unable to create new channel: %v", diff --git a/lnwallet/channel.go b/lnwallet/channel.go index 02ca4160..e82293bd 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -1309,13 +1309,6 @@ type LightningChannel struct { // signatures, of which there may be hundreds. sigPool *SigPool - // pCache is the global preimage cache shared across all other - // LightningChannel instance. We'll use this cache either when we force - // close, or we detect that the remote party has force closed. If the - // preimage for an incoming HTLC is found in the cache, then we'll try - // to claim it on chain. - pCache PreimageCache - // Capacity is the total capacity of this channel. Capacity btcutil.Amount @@ -1368,7 +1361,7 @@ type LightningChannel struct { // settled channel state. Throughout state transitions, then channel will // automatically persist pertinent state to the database in an efficient // manner. -func NewLightningChannel(signer input.Signer, pCache PreimageCache, +func NewLightningChannel(signer input.Signer, state *channeldb.OpenChannel, sigPool *SigPool) (*LightningChannel, error) { @@ -1387,7 +1380,6 @@ func NewLightningChannel(signer input.Signer, pCache PreimageCache, lc := &LightningChannel{ Signer: signer, sigPool: sigPool, - pCache: pCache, currentHeight: localCommit.CommitHeight, remoteCommitChain: newCommitmentChain(), localCommitChain: newCommitmentChain(), @@ -5096,7 +5088,7 @@ type UnilateralCloseSummary struct { // which case we will attempt to sweep the non-HTLC output using the passed // commitPoint. func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Signer, - pCache PreimageCache, commitSpend *chainntnfs.SpendDetail, + commitSpend *chainntnfs.SpendDetail, remoteCommit channeldb.ChannelCommitment, commitPoint *btcec.PublicKey) (*UnilateralCloseSummary, error) { @@ -5113,7 +5105,6 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Si SatPerKWeight(remoteCommit.FeePerKw), false, signer, remoteCommit.Htlcs, keyRing, &chanState.LocalChanCfg, &chanState.RemoteChanCfg, *commitSpend.SpenderTxHash, - pCache, ) if err != nil { return nil, fmt.Errorf("unable to create htlc "+ @@ -5211,11 +5202,11 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Si // using this struct if we need to go on-chain for any reason, or if we detect // that the remote party broadcasts their commitment transaction. type IncomingHtlcResolution struct { - // Preimage is the preimage that will be used to satisfy the contract - // of the HTLC. + // Preimage is the preimage that will be used to satisfy the contract of + // the HTLC. // - // NOTE: This field will only be populated if we know the preimage at - // the time a unilateral or force close occurs. + // NOTE: This field will only be populated in the incoming contest + // resolver. Preimage [32]byte // SignedSuccessTx is the fully signed HTLC success transaction. This @@ -5448,7 +5439,7 @@ func newOutgoingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.ChannelConfig, commitHash chainhash.Hash, htlc *channeldb.HTLC, keyRing *CommitmentKeyRing, feePerKw SatPerKWeight, dustLimit btcutil.Amount, csvDelay uint32, - localCommit bool, preimage [32]byte) (*IncomingHtlcResolution, error) { + localCommit bool) (*IncomingHtlcResolution, error) { op := wire.OutPoint{ Hash: commitHash, @@ -5475,7 +5466,6 @@ func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan // With the script generated, we can completely populated the // SignDescriptor needed to sweep the output. return &IncomingHtlcResolution{ - Preimage: preimage, ClaimOutpoint: op, CsvDelay: csvDelay, SweepSignDesc: input.SignDescriptor{ @@ -5526,10 +5516,12 @@ func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan InputIndex: 0, } - // Next, we'll construct the full witness needed to satisfy the input - // of the success transaction. + // Next, we'll construct the full witness needed to satisfy the input of + // the success transaction. Don't specify the preimage yet. The preimage + // will be supplied by the contract resolver, either directly or when it + // becomes known. successWitness, err := input.ReceiverHtlcSpendRedeem( - htlc.Signature, preimage[:], signer, &successSignDesc, successTx, + htlc.Signature, nil, signer, &successSignDesc, successTx, ) if err != nil { return nil, err @@ -5554,7 +5546,6 @@ func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan keyRing.CommitPoint, localChanCfg.DelayBasePoint.PubKey, ) return &IncomingHtlcResolution{ - Preimage: preimage, SignedSuccessTx: successTx, CsvDelay: csvDelay, ClaimOutpoint: wire.OutPoint{ @@ -5604,7 +5595,7 @@ func (r *OutgoingHtlcResolution) HtlcPoint() wire.OutPoint { func extractHtlcResolutions(feePerKw SatPerKWeight, ourCommit bool, signer input.Signer, htlcs []channeldb.HTLC, keyRing *CommitmentKeyRing, localChanCfg, remoteChanCfg *channeldb.ChannelConfig, - commitHash chainhash.Hash, pCache PreimageCache) (*HtlcResolutions, error) { + commitHash chainhash.Hash) (*HtlcResolutions, error) { // TODO(roasbeef): don't need to swap csv delay? dustLimit := remoteChanCfg.DustLimit @@ -5628,19 +5619,11 @@ func extractHtlcResolutions(feePerKw SatPerKWeight, ourCommit bool, // If the HTLC is incoming, then we'll attempt to see if we // know the pre-image to the HTLC. if htlc.Incoming { - // We'll now query the preimage cache for the preimage - // for this HTLC. If it's present then we can fully - // populate this resolution. - preimage, _ := pCache.LookupPreimage(htlc.RHash) - // Otherwise, we'll create an incoming HTLC resolution // as we can satisfy the contract. - var pre [32]byte - copy(pre[:], preimage[:]) ihr, err := newIncomingHtlcResolution( signer, localChanCfg, commitHash, &htlc, keyRing, feePerKw, dustLimit, uint32(csvDelay), ourCommit, - pre, ) if err != nil { return nil, err @@ -5730,7 +5713,7 @@ func (lc *LightningChannel) ForceClose() (*LocalForceCloseSummary, error) { localCommitment := lc.channelState.LocalCommitment summary, err := NewLocalForceCloseSummary( - lc.channelState, lc.Signer, lc.pCache, commitTx, + lc.channelState, lc.Signer, commitTx, localCommitment, ) if err != nil { @@ -5748,8 +5731,8 @@ func (lc *LightningChannel) ForceClose() (*LocalForceCloseSummary, error) { // channel state. The passed commitTx must be a fully signed commitment // transaction corresponding to localCommit. func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Signer, - pCache PreimageCache, commitTx *wire.MsgTx, - localCommit channeldb.ChannelCommitment) (*LocalForceCloseSummary, error) { + commitTx *wire.MsgTx, localCommit channeldb.ChannelCommitment) ( + *LocalForceCloseSummary, error) { // Re-derive the original pkScript for to-self output within the // commitment transaction. We'll need this to find the corresponding @@ -5830,7 +5813,8 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si htlcResolutions, err := extractHtlcResolutions( SatPerKWeight(localCommit.FeePerKw), true, signer, localCommit.Htlcs, keyRing, &chanState.LocalChanCfg, - &chanState.RemoteChanCfg, txHash, pCache) + &chanState.RemoteChanCfg, txHash, + ) if err != nil { return nil, err } diff --git a/lnwallet/channel_test.go b/lnwallet/channel_test.go index 067e6258..3132fc56 100644 --- a/lnwallet/channel_test.go +++ b/lnwallet/channel_test.go @@ -19,7 +19,6 @@ import ( "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" - "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" ) @@ -544,10 +543,6 @@ func TestForceClose(t *testing.T) { t.Fatalf("Can't update the channel state: %v", err) } - // Before we force close Alice's channel, we'll add the pre-image of - // Bob's HTLC to her preimage cache. - aliceChannel.pCache.AddPreimages(lntypes.Preimage(preimageBob)) - // With the cache populated, we'll now attempt the force close // initiated by Alice. closeSummary, err := aliceChannel.ForceClose() @@ -681,8 +676,11 @@ func TestForceClose(t *testing.T) { receiverHtlcScript := closeSummary.CloseTx.TxOut[inHtlcIndex].PkScript // With the original pkscript located, we'll now verify that the second - // level transaction can spend from the multi-sig out. + // level transaction can spend from the multi-sig out. Supply the + // preimage manually. This is usually done by the contract resolver + // before publication. successTx := inHtlcResolution.SignedSuccessTx + successTx.TxIn[0].Witness[3] = preimageBob[:] vm, err = txscript.NewEngine(receiverHtlcScript, successTx, 0, txscript.StandardVerifyFlags, nil, nil, int64(htlcAmount.ToSatoshis())) @@ -778,10 +776,6 @@ func TestForceClose(t *testing.T) { "htlcs, got %v htlcs", 1, len(closeSummary.HtlcResolutions.IncomingHTLCs)) } - var zeroHash [32]byte - if closeSummary.HtlcResolutions.IncomingHTLCs[0].Preimage != zeroHash { - t.Fatalf("bob shouldn't know preimage but does") - } } // TestForceCloseDustOutput tests that if either side force closes with an @@ -1459,16 +1453,14 @@ func TestStateUpdatePersistence(t *testing.T) { } aliceChannelNew, err := NewLightningChannel( - aliceChannel.Signer, nil, aliceChannels[0], - aliceChannel.sigPool, + aliceChannel.Signer, aliceChannels[0], aliceChannel.sigPool, ) if err != nil { t.Fatalf("unable to create new channel: %v", err) } bobChannelNew, err := NewLightningChannel( - bobChannel.Signer, nil, bobChannels[0], - bobChannel.sigPool, + bobChannel.Signer, bobChannels[0], bobChannel.sigPool, ) if err != nil { t.Fatalf("unable to create new channel: %v", err) @@ -2645,13 +2637,13 @@ func TestChanSyncFullySynced(t *testing.T) { } aliceChannelNew, err := NewLightningChannel( - aliceChannel.Signer, nil, aliceChannels[0], aliceChannel.sigPool, + aliceChannel.Signer, aliceChannels[0], aliceChannel.sigPool, ) if err != nil { t.Fatalf("unable to create new channel: %v", err) } bobChannelNew, err := NewLightningChannel( - bobChannel.Signer, nil, bobChannels[0], bobChannel.sigPool, + bobChannel.Signer, bobChannels[0], bobChannel.sigPool, ) if err != nil { t.Fatalf("unable to create new channel: %v", err) @@ -2673,7 +2665,7 @@ func restartChannel(channelOld *LightningChannel) (*LightningChannel, error) { } channelNew, err := NewLightningChannel( - channelOld.Signer, channelOld.pCache, nodeChannels[0], + channelOld.Signer, nodeChannels[0], channelOld.sigPool, ) if err != nil { @@ -4943,11 +4935,6 @@ func TestChannelUnilateralCloseHtlcResolution(t *testing.T) { t.Fatalf("unable to close: %v", err) } - // Now that Bob has force closed, we'll modify Alice's pre image cache - // such that she now gains the ability to also settle the incoming HTLC - // from Bob. - aliceChannel.pCache.AddPreimages(lntypes.Preimage(preimageBob)) - // We'll then use Bob's transaction to trigger a spend notification for // Alice. closeTx := bobForceClose.CloseTx @@ -4958,7 +4945,7 @@ func TestChannelUnilateralCloseHtlcResolution(t *testing.T) { } aliceCloseSummary, err := NewUnilateralCloseSummary( aliceChannel.channelState, aliceChannel.Signer, - aliceChannel.pCache, spendDetail, + spendDetail, aliceChannel.channelState.RemoteCommitment, aliceChannel.channelState.RemoteCurrentRevocation, ) @@ -5108,7 +5095,7 @@ func TestChannelUnilateralClosePendingCommit(t *testing.T) { // our output wasn't picked up. aliceWrongCloseSummary, err := NewUnilateralCloseSummary( aliceChannel.channelState, aliceChannel.Signer, - aliceChannel.pCache, spendDetail, + spendDetail, aliceChannel.channelState.RemoteCommitment, aliceChannel.channelState.RemoteCurrentRevocation, ) @@ -5129,7 +5116,7 @@ func TestChannelUnilateralClosePendingCommit(t *testing.T) { } aliceCloseSummary, err := NewUnilateralCloseSummary( aliceChannel.channelState, aliceChannel.Signer, - aliceChannel.pCache, spendDetail, + spendDetail, aliceRemoteChainTip.Commitment, aliceChannel.channelState.RemoteNextRevocation, ) @@ -5926,7 +5913,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) { // We now re-create the channels, mimicking a restart. This should sync // the update logs up to the correct state set up above. newAliceChannel, err := NewLightningChannel( - aliceChannel.Signer, nil, aliceChannel.channelState, + aliceChannel.Signer, aliceChannel.channelState, aliceChannel.sigPool, ) if err != nil { @@ -5934,7 +5921,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) { } newBobChannel, err := NewLightningChannel( - bobChannel.Signer, nil, bobChannel.channelState, + bobChannel.Signer, bobChannel.channelState, bobChannel.sigPool, ) if err != nil { @@ -6008,7 +5995,7 @@ func restoreAndAssert(t *testing.T, channel *LightningChannel, numAddsLocal, numFailsLocal, numAddsRemote, numFailsRemote int) { newChannel, err := NewLightningChannel( - channel.Signer, nil, channel.channelState, + channel.Signer, channel.channelState, channel.sigPool, ) if err != nil { @@ -6318,8 +6305,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) { expLocal, expRemote uint64) *LightningChannel { newChannel, err := NewLightningChannel( - channel.Signer, nil, channel.channelState, - channel.sigPool, + channel.Signer, channel.channelState, channel.sigPool, ) if err != nil { t.Fatalf("unable to create new channel: %v", err) diff --git a/lnwallet/interface.go b/lnwallet/interface.go index 395da6d1..01c5ceeb 100644 --- a/lnwallet/interface.go +++ b/lnwallet/interface.go @@ -10,7 +10,6 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/btcsuite/btcwallet/wallet/txauthor" - "github.com/lightningnetwork/lnd/lntypes" ) // AddressType is an enum-like type which denotes the possible address types @@ -294,21 +293,6 @@ type MessageSigner interface { SignMessage(pubKey *btcec.PublicKey, msg []byte) (*btcec.Signature, error) } -// PreimageCache is an interface that represents a global cache for preimages. -// We'll utilize this cache to communicate the discovery of new preimages -// across sub-systems. -type PreimageCache interface { - // LookupPreimage attempts to look up a preimage according to its hash. - // If found, the preimage is returned along with true for the second - // argument. Otherwise, it'll return false. - LookupPreimage(hash lntypes.Hash) (lntypes.Preimage, bool) - - // AddPreimages adds a batch of newly discovered preimages to the global - // cache, and also signals any subscribers of the newly discovered - // witness. - AddPreimages(preimages ...lntypes.Preimage) error -} - // WalletDriver represents a "driver" for a particular concrete // WalletController implementation. A driver is identified by a globally unique // string identifier along with a 'New()' method which is responsible for diff --git a/lnwallet/test_utils.go b/lnwallet/test_utils.go index 392f4492..7d2abff4 100644 --- a/lnwallet/test_utils.go +++ b/lnwallet/test_utils.go @@ -301,13 +301,11 @@ func CreateTestChannels() (*LightningChannel, *LightningChannel, func(), error) aliceSigner := &input.MockSigner{Privkeys: aliceKeys} bobSigner := &input.MockSigner{Privkeys: bobKeys} - pCache := newMockPreimageCache() - // TODO(roasbeef): make mock version of pre-image store alicePool := NewSigPool(1, aliceSigner) channelAlice, err := NewLightningChannel( - aliceSigner, pCache, aliceChannelState, alicePool, + aliceSigner, aliceChannelState, alicePool, ) if err != nil { return nil, nil, nil, err @@ -316,7 +314,7 @@ func CreateTestChannels() (*LightningChannel, *LightningChannel, func(), error) bobPool := NewSigPool(1, bobSigner) channelBob, err := NewLightningChannel( - bobSigner, pCache, bobChannelState, bobPool, + bobSigner, bobChannelState, bobPool, ) if err != nil { return nil, nil, nil, err diff --git a/lnwallet/transactions_test.go b/lnwallet/transactions_test.go index b9eb3c47..27d6c2f2 100644 --- a/lnwallet/transactions_test.go +++ b/lnwallet/transactions_test.go @@ -780,8 +780,6 @@ func TestCommitmentAndHTLCTransactions(t *testing.T) { }, } - pCache := newMockPreimageCache() - for i, test := range testCases { expectedCommitmentTx, err := txFromHex(test.expectedCommitmentTxHex) if err != nil { @@ -848,7 +846,7 @@ func TestCommitmentAndHTLCTransactions(t *testing.T) { htlcResolutions, err := extractHtlcResolutions( SatPerKWeight(test.commitment.FeePerKw), true, signer, htlcs, keys, channel.localChanCfg, channel.remoteChanCfg, - commitTx.TxHash(), pCache, + commitTx.TxHash(), ) if err != nil { t.Errorf("Case %d: Failed to extract HTLC resolutions: %v", i, err) diff --git a/peer.go b/peer.go index 08b05f15..4c65290f 100644 --- a/peer.go +++ b/peer.go @@ -429,8 +429,7 @@ func (p *peer) QuitSignal() <-chan struct{} { func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error { for _, dbChan := range chans { lnChan, err := lnwallet.NewLightningChannel( - p.server.cc.signer, p.server.witnessBeacon, dbChan, - p.server.sigPool, + p.server.cc.signer, dbChan, p.server.sigPool, ) if err != nil { return err @@ -1789,8 +1788,7 @@ out: // set of active channels, so we can look it up later // easily according to its channel ID. lnChan, err := lnwallet.NewLightningChannel( - p.server.cc.signer, p.server.witnessBeacon, - newChan, p.server.sigPool, + p.server.cc.signer, newChan, p.server.sigPool, ) if err != nil { p.activeChanMtx.Unlock() diff --git a/rpcserver.go b/rpcserver.go index 858d6867..544393a2 100644 --- a/rpcserver.go +++ b/rpcserver.go @@ -1917,7 +1917,7 @@ func (r *rpcServer) fetchActiveChannel(chanPoint wire.OutPoint) ( // we create a fully populated channel state machine which // uses the db channel as backing storage. return lnwallet.NewLightningChannel( - r.server.cc.wallet.Cfg.Signer, nil, dbChan, nil, + r.server.cc.wallet.Cfg.Signer, dbChan, nil, ) } diff --git a/test_utils.go b/test_utils.go index 8d1fc3d1..044fee17 100644 --- a/test_utils.go +++ b/test_utils.go @@ -303,7 +303,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, alicePool := lnwallet.NewSigPool(1, aliceSigner) channelAlice, err := lnwallet.NewLightningChannel( - aliceSigner, nil, aliceChannelState, alicePool, + aliceSigner, aliceChannelState, alicePool, ) if err != nil { return nil, nil, nil, nil, err @@ -312,7 +312,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier, bobPool := lnwallet.NewSigPool(1, bobSigner) channelBob, err := lnwallet.NewLightningChannel( - bobSigner, nil, bobChannelState, bobPool, + bobSigner, bobChannelState, bobPool, ) if err != nil { return nil, nil, nil, nil, err diff --git a/witness_beacon.go b/witness_beacon.go index 82835226..2971d93d 100644 --- a/witness_beacon.go +++ b/witness_beacon.go @@ -7,7 +7,6 @@ import ( "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/invoices" "github.com/lightningnetwork/lnd/lntypes" - "github.com/lightningnetwork/lnd/lnwallet" ) // preimageSubscriber reprints an active subscription to be notified once the @@ -144,4 +143,3 @@ func (p *preimageBeacon) AddPreimages(preimages ...lntypes.Preimage) error { } var _ contractcourt.WitnessBeacon = (*preimageBeacon)(nil) -var _ lnwallet.PreimageCache = (*preimageBeacon)(nil) From ec6a35d6e85083305329a63798cdb7a98fcf5164 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 15 Apr 2019 19:05:10 +0200 Subject: [PATCH 03/14] cnct: do not depend on ChainIO in incoming contest resolver New behaviour of the chain notifier to always send the current block immediately after registration takes away the need to make a separate GetBestBlock call on ChainIO. --- .../htlc_incoming_contest_resolver.go | 39 +++++++++++-------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/contractcourt/htlc_incoming_contest_resolver.go b/contractcourt/htlc_incoming_contest_resolver.go index 8889484e..87b2cba8 100644 --- a/contractcourt/htlc_incoming_contest_resolver.go +++ b/contractcourt/htlc_incoming_contest_resolver.go @@ -50,18 +50,32 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { return nil, nil } - // We'll first check if this HTLC has been timed out, if so, we can - // return now and mark ourselves as resolved. - _, currentHeight, err := h.ChainIO.GetBestBlock() + // Register for block epochs. After registration, the current height + // will be sent on the channel immediately. + blockEpochs, err := h.Notifier.RegisterBlockEpochNtfn(nil) if err != nil { return nil, err } + defer blockEpochs.Cancel() - // If we're past the point of expiry of the HTLC, then at this point the - // sender can sweep it, so we'll end our lifetime. Here we deliberately - // forego the chance that the sender doesn't sweep and we already have - // or will learn the preimage. Otherwise the resolver could potentially - // stay active indefinitely and the channel will never close properly. + var currentHeight int32 + select { + case newBlock, ok := <-blockEpochs.Epochs: + if !ok { + return nil, fmt.Errorf("quitting") + } + currentHeight = newBlock.Height + case <-h.Quit: + return nil, fmt.Errorf("resolver stopped") + } + + // We'll first check if this HTLC has been timed out, if so, we can + // return now and mark ourselves as resolved. If we're past the point of + // expiry of the HTLC, then at this point the sender can sweep it, so + // we'll end our lifetime. Here we deliberately forego the chance that + // the sender doesn't sweep and we already have or will learn the + // preimage. Otherwise the resolver could potentially stay active + // indefinitely and the channel will never close properly. if uint32(currentHeight) >= h.htlcExpiry { // TODO(roasbeef): should also somehow check if outgoing is // resolved or not @@ -117,14 +131,7 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { // ensure the preimage can't be delivered between querying and // registering for the preimage subscription. preimageSubscription := h.PreimageDB.SubscribeUpdates() - blockEpochs, err := h.Notifier.RegisterBlockEpochNtfn(nil) - if err != nil { - return nil, err - } - defer func() { - preimageSubscription.CancelSubscription() - blockEpochs.Cancel() - }() + defer preimageSubscription.CancelSubscription() // Create a buffered hodl chan to prevent deadlock. hodlChan := make(chan interface{}, 1) From 99e42ddde651d574dc0670df124776a9c5fe0df1 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 16 Apr 2019 11:52:42 +0200 Subject: [PATCH 04/14] cnct: be stricter about matching preimages The former tryApplyPreimage function silently ignored invalid preimages. This could mask potential bugs. This commit makes the logic stricter and generates an error in case an unexpected mismatch occurs. --- .../htlc_incoming_contest_resolver.go | 35 +++++++++++++------ 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/contractcourt/htlc_incoming_contest_resolver.go b/contractcourt/htlc_incoming_contest_resolver.go index 87b2cba8..c1933e9e 100644 --- a/contractcourt/htlc_incoming_contest_resolver.go +++ b/contractcourt/htlc_incoming_contest_resolver.go @@ -2,6 +2,7 @@ package contractcourt import ( "encoding/binary" + "errors" "fmt" "io" @@ -94,10 +95,11 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { // the preimage is revealed so the inner resolver can properly complete // its duties. The boolean return value indicates whether the preimage // was properly applied. - tryApplyPreimage := func(preimage lntypes.Preimage) bool { - // Check to see if this preimage matches our htlc. + applyPreimage := func(preimage lntypes.Preimage) error { + // Sanity check to see if this preimage matches our htlc. At + // this point it should never happen that it does not match. if !preimage.Matches(h.payHash) { - return false + return errors.New("preimage does not match hash") } // Update htlcResolution with the matching preimage. @@ -120,7 +122,7 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { h.htlcResolution.SignedSuccessTx.TxIn[0].Witness[3] = preimage[:] } - return true + return nil } // If the HTLC hasn't expired yet, then we may still be able to claim @@ -148,9 +150,11 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { // If the htlc can be settled directly, we can progress to the inner // resolver immediately. if event != nil && event.Preimage != nil { - if tryApplyPreimage(*event.Preimage) { - return &h.htlcSuccessResolver, nil + if err := applyPreimage(*event.Preimage); err != nil { + return nil, err } + + return &h.htlcSuccessResolver, nil } // With the epochs and preimage subscriptions initialized, we'll query @@ -160,19 +164,27 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { // If we do, then this means we can claim the HTLC! However, // we don't know how to ourselves, so we'll return our inner // resolver which has the knowledge to do so. - if tryApplyPreimage(preimage) { - return &h.htlcSuccessResolver, nil + if err := applyPreimage(preimage); err != nil { + return nil, err } + + return &h.htlcSuccessResolver, nil } for { select { case preimage := <-preimageSubscription.WitnessUpdates: - if !tryApplyPreimage(preimage) { + // We receive all new preimages, so we need to ignore + // all except the preimage we are waiting for. + if !preimage.Matches(h.payHash) { continue } + if err := applyPreimage(preimage); err != nil { + return nil, err + } + // We've learned of the preimage and this information // has been added to our inner resolver. We return it so // it can continue contract resolution. @@ -186,9 +198,10 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { continue } - if !tryApplyPreimage(*hodlEvent.Preimage) { - continue + if err := applyPreimage(*event.Preimage); err != nil { + return nil, err } + return &h.htlcSuccessResolver, nil case newBlock, ok := <-blockEpochs.Epochs: From 1a80a1e5406bc27f8bb0974645b0c0b0bd0233bb Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 16 Apr 2019 12:32:51 +0200 Subject: [PATCH 05/14] witness_beacon: do not look up invoice preimages This commit isolates preimages of forwarded htlcs from invoice preimages. The reason to do this is to prevent the incoming contest resolver from settling exit hop htlcs for which the invoice isn't marked as settled. --- contractcourt/channel_arbitrator.go | 53 ++++++++++++++++++++++++++--- server.go | 1 - witness_beacon.go | 22 ------------ 3 files changed, 48 insertions(+), 28 deletions(-) diff --git a/contractcourt/channel_arbitrator.go b/contractcourt/channel_arbitrator.go index 7aeb2e35..bb16a3af 100644 --- a/contractcourt/channel_arbitrator.go +++ b/contractcourt/channel_arbitrator.go @@ -620,7 +620,10 @@ func (c *ChannelArbitrator) stateStep(triggerHeight uint32, // chain, we'll check to see if we need to make any on-chain // claims on behalf of the channel contract that we're // arbitrating for. - chainActions := c.checkChainActions(triggerHeight, trigger) + chainActions, err := c.checkChainActions(triggerHeight, trigger) + if err != nil { + return StateError, closeTx, err + } // If there are no actions to be made, then we'll remain in the // default state. If this isn't a self initiated event (we're @@ -1082,7 +1085,7 @@ func (c *ChannelArbitrator) shouldGoOnChain(htlcExpiry, broadcastDelta, // been sufficiently confirmed, the HTLC's should be canceled backwards. For // redeemed HTLC's, we should send the pre-image back to the incoming link. func (c *ChannelArbitrator) checkChainActions(height uint32, - trigger transitionTrigger) ChainActionMap { + trigger transitionTrigger) (ChainActionMap, error) { // TODO(roasbeef): would need to lock channel? channel totem? // * race condition if adding and we broadcast, etc @@ -1122,7 +1125,12 @@ func (c *ChannelArbitrator) checkChainActions(height uint32, // know the pre-image and it's close to timing out. We need to // ensure that we claim the funds that our rightfully ours // on-chain. - if _, ok := c.cfg.PreimageDB.LookupPreimage(htlc.RHash); !ok { + preimageAvailable, err := c.isPreimageAvailable(htlc.RHash) + if err != nil { + return nil, err + } + + if !preimageAvailable { continue } @@ -1151,7 +1159,7 @@ func (c *ChannelArbitrator) checkChainActions(height uint32, if !haveChainActions && trigger == chainTrigger { log.Tracef("ChannelArbitrator(%v): no actions to take at "+ "height=%v", c.cfg.ChanPoint, height) - return actionMap + return actionMap, nil } // Now that we know we'll need to go on-chain, we'll examine all of our @@ -1223,7 +1231,42 @@ func (c *ChannelArbitrator) checkChainActions(height uint32, ) } - return actionMap + return actionMap, nil +} + +// isPreimageAvailable returns whether the hash preimage is available in either +// the preimage cache or the invoice database. +func (c *ChannelArbitrator) isPreimageAvailable(hash lntypes.Hash) (bool, + error) { + + // Start by checking the preimage cache for preimages of + // forwarded HTLCs. + _, preimageAvailable := c.cfg.PreimageDB.LookupPreimage( + hash, + ) + if preimageAvailable { + return true, nil + } + + // Then check if we have an invoice that can be settled by this HTLC. + // + // TODO(joostjager): Check that there are still more blocks remaining + // than the invoice cltv delta. We don't want to go to chain only to + // have the incoming contest resolver decide that we don't want to + // settle this invoice. + invoice, _, err := c.cfg.Registry.LookupInvoice(hash) + switch err { + case nil: + case channeldb.ErrInvoiceNotFound, channeldb.ErrNoInvoicesCreated: + return false, nil + default: + return false, err + } + + preimageAvailable = invoice.Terms.PaymentPreimage != + channeldb.UnknownPreimage + + return preimageAvailable, nil } // prepContractResolutions is called either int he case that we decide we need diff --git a/server.go b/server.go index c71910e1..29a6b541 100644 --- a/server.go +++ b/server.go @@ -374,7 +374,6 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl, } s.witnessBeacon = &preimageBeacon{ - invoices: s.invoices, wCache: chanDB.NewWitnessCache(), subscribers: make(map[uint64]*preimageSubscriber), } diff --git a/witness_beacon.go b/witness_beacon.go index 2971d93d..1593bee7 100644 --- a/witness_beacon.go +++ b/witness_beacon.go @@ -5,7 +5,6 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/contractcourt" - "github.com/lightningnetwork/lnd/invoices" "github.com/lightningnetwork/lnd/lntypes" ) @@ -23,8 +22,6 @@ type preimageSubscriber struct { type preimageBeacon struct { sync.RWMutex - invoices *invoices.InvoiceRegistry - wCache *channeldb.WitnessCache clientCounter uint64 @@ -71,25 +68,6 @@ func (p *preimageBeacon) LookupPreimage( p.RLock() defer p.RUnlock() - // First, we'll check the invoice registry to see if we already know of - // the preimage as it's on that we created ourselves. - invoice, _, err := p.invoices.LookupInvoice(payHash) - switch { - case err == channeldb.ErrInvoiceNotFound: - // If we get this error, then it simply means that this invoice - // wasn't found, so we don't treat it as a critical error. - case err != nil: - return lntypes.Preimage{}, false - } - - // If we've found the invoice, then we can return the preimage - // directly. - if err != channeldb.ErrInvoiceNotFound && - invoice.Terms.PaymentPreimage != channeldb.UnknownPreimage { - - return invoice.Terms.PaymentPreimage, true - } - // Otherwise, we'll perform a final check using the witness cache. preimage, err := p.wCache.LookupSha256Witness(payHash) if err != nil { From d8dac49112ca9f1ea8bab615a3c6a2ee16403408 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 16 Apr 2019 10:21:02 +0200 Subject: [PATCH 06/14] cnct: add invoice registry interface Create an interface type to be able to mock the registry in unit tests. --- contractcourt/chain_arbitrator.go | 3 +-- contractcourt/interfaces.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 contractcourt/interfaces.go diff --git a/contractcourt/chain_arbitrator.go b/contractcourt/chain_arbitrator.go index b7c5eb0d..712dcc71 100644 --- a/contractcourt/chain_arbitrator.go +++ b/contractcourt/chain_arbitrator.go @@ -12,7 +12,6 @@ import ( "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" - "github.com/lightningnetwork/lnd/invoices" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/sweep" @@ -146,7 +145,7 @@ type ChainArbitratorConfig struct { // Registry is the invoice database that is used by resolvers to lookup // preimages and settle invoices. - Registry *invoices.InvoiceRegistry + Registry Registry // NotifyClosedChannel is a function closure that the ChainArbitrator // will use to notify the ChannelNotifier about a newly closed channel. diff --git a/contractcourt/interfaces.go b/contractcourt/interfaces.go new file mode 100644 index 00000000..fa702e9f --- /dev/null +++ b/contractcourt/interfaces.go @@ -0,0 +1,28 @@ +package contractcourt + +import ( + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/invoices" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" +) + +// Registry is an interface which represents the invoice registry. +type Registry interface { + // LookupInvoice attempts to look up an invoice according to its 32 + // byte payment hash. This method should also reutrn the min final CLTV + // delta for this invoice. We'll use this to ensure that the HTLC + // extended to us gives us enough time to settle as we prescribe. + LookupInvoice(lntypes.Hash) (channeldb.Invoice, uint32, error) + + // NotifyExitHopHtlc attempts to mark an invoice as settled. If the + // invoice is a debug invoice, then this method is a noop as debug + // invoices are never fully settled. The return value describes how the + // htlc should be resolved. If the htlc cannot be resolved immediately, + // the resolution is sent on the passed in hodlChan later. + NotifyExitHopHtlc(payHash lntypes.Hash, paidAmount lnwire.MilliSatoshi, + hodlChan chan<- interface{}) (*invoices.HodlEvent, error) + + // HodlUnsubscribeAll unsubscribes from all hodl events. + HodlUnsubscribeAll(subscriber chan<- interface{}) +} From b917820c5bc922a4ab1a4c50f3c60a1dae632f98 Mon Sep 17 00:00:00 2001 From: "Johan T. Halseth" Date: Mon, 6 May 2019 12:19:36 +0200 Subject: [PATCH 07/14] lntest/node: set up InvoicesClient for HarnessNode --- lntest/node.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lntest/node.go b/lntest/node.go index 91bb6238..3afbd5d0 100644 --- a/lntest/node.go +++ b/lntest/node.go @@ -27,6 +27,7 @@ import ( "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/chanbackup" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/macaroons" ) @@ -240,11 +241,14 @@ type HarnessNode struct { lnrpc.LightningClient lnrpc.WalletUnlockerClient + + invoicesrpc.InvoicesClient } // Assert *HarnessNode implements the lnrpc.LightningClient interface. var _ lnrpc.LightningClient = (*HarnessNode)(nil) var _ lnrpc.WalletUnlockerClient = (*HarnessNode)(nil) +var _ invoicesrpc.InvoicesClient = (*HarnessNode)(nil) // newNode creates a new test lightning node instance from the passed config. func newNode(cfg nodeConfig) (*HarnessNode, error) { @@ -487,6 +491,7 @@ func (hn *HarnessNode) initLightningClient(conn *grpc.ClientConn) error { // Construct the LightningClient that will allow us to use the // HarnessNode directly for normal rpc operations. hn.LightningClient = lnrpc.NewLightningClient(conn) + hn.InvoicesClient = invoicesrpc.NewInvoicesClient(conn) // Set the harness node's pubkey to what the node claims in GetInfo. err := hn.FetchNodeInfo() From bf0af12faec707ab6e427a7b79ce56ed6b7b3d70 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 8 May 2019 09:31:14 +0200 Subject: [PATCH 08/14] lnd_test: move chain claim tests to separate files This commit splits out several integration tests that will be modified in a follow up commit to separate files. The current lnd_test.go file is 10k+ loc which makes it harder to handle by tools and it isn't good for overview either. --- lnd_multi-hop_htlc_local_chain_claim_test.go | 352 +++++++ ...ulti-hop_htlc_receiver_chain_claim_test.go | 265 ++++++ lnd_multi-hop_htlc_remote_chain_claim_test.go | 293 ++++++ lnd_test.go | 867 ------------------ 4 files changed, 910 insertions(+), 867 deletions(-) create mode 100644 lnd_multi-hop_htlc_local_chain_claim_test.go create mode 100644 lnd_multi-hop_htlc_receiver_chain_claim_test.go create mode 100644 lnd_multi-hop_htlc_remote_chain_claim_test.go diff --git a/lnd_multi-hop_htlc_local_chain_claim_test.go b/lnd_multi-hop_htlc_local_chain_claim_test.go new file mode 100644 index 00000000..bdcabfcc --- /dev/null +++ b/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -0,0 +1,352 @@ +// +build rpctest + +package lnd + +import ( + "context" + "fmt" + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" +) + +// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if +// we're forced to go to chain with an incoming HTLC, then when we find out the +// preimage via the witness beacon, we properly settle the HTLC on-chain in +// order to ensure we don't lose any funds. +func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + + // First, we'll create a three hop network: Alice -> Bob -> Carol, with + // Carol refusing to actually settle or directly cancel any HTLC's + // self. + aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net) + + // Clean up carol's node when the test finishes. + defer shutdownAndAssert(net, t, carol) + + // With the network active, we'll now add a new invoice at Carol's end. + invoiceReq := &lnrpc.Invoice{ + Value: 100000, + CltvExpiry: 40, + } + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq) + if err != nil { + t.Fatalf("unable to generate carol invoice: %v", err) + } + + // Now that we've created the invoice, we'll send a single payment from + // Alice to Carol. We won't wait for the response however, as Carol + // will not immediately settle the payment. + ctx, cancel := context.WithCancel(ctxb) + defer cancel() + + alicePayStream, err := net.Alice.SendPayment(ctx) + if err != nil { + t.Fatalf("unable to create payment stream for alice: %v", err) + } + err = alicePayStream.Send(&lnrpc.SendRequest{ + PaymentRequest: carolInvoice.PaymentRequest, + }) + if err != nil { + t.Fatalf("unable to send payment: %v", err) + } + + // We'll now wait until all 3 nodes have the HTLC as just sent fully + // locked in. + var predErr error + nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + err = lntest.WaitPredicate(func() bool { + predErr = assertActiveHtlcs(nodes, carolInvoice.RHash) + if predErr != nil { + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("htlc mismatch: %v", err) + } + + // At this point, Bob decides that he wants to exit the channel + // immediately, so he force closes his commitment transaction. + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + bobForceClose := closeChannelAndAssert(ctxt, t, net, net.Bob, + aliceChanPoint, true) + + // Alice will sweep her output immediately. + _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find alice's sweep tx in miner mempool: %v", + err) + } + + // We'll now mine enough blocks so Carol decides that she needs to go + // on-chain to claim the HTLC as Bob has been inactive. + numBlocks := uint32(invoiceReq.CltvExpiry - + defaultIncomingBroadcastDelta) + + if _, err := net.Miner.Node.Generate(numBlocks); err != nil { + t.Fatalf("unable to generate blocks") + } + + // Carol's commitment transaction should now be in the mempool. + txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout) + if err != nil { + t.Fatalf("transactions not found in mempool: %v", err) + } + bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint) + if err != nil { + t.Fatalf("unable to get txid: %v", err) + } + carolFundingPoint := wire.OutPoint{ + Hash: *bobFundingTxid, + Index: bobChanPoint.OutputIndex, + } + + // The tx should be spending from the funding transaction, + commitHash := txids[0] + tx1, err := net.Miner.Node.GetRawTransaction(commitHash) + if err != nil { + t.Fatalf("unable to get txn: %v", err) + } + if tx1.MsgTx().TxIn[0].PreviousOutPoint != carolFundingPoint { + t.Fatalf("commit transaction not spending fundingtx: %v", + spew.Sdump(tx1)) + } + + // Mine a block that should confirm the commit tx. + block := mineBlocks(t, net, 1, 1)[0] + if len(block.Transactions) != 2 { + t.Fatalf("expected 2 transactions in block, got %v", + len(block.Transactions)) + } + assertTxInBlock(t, block, commitHash) + + // After the force close transacion is mined, Carol should broadcast + // her second level HTLC transacion. Bob will broadcast a sweep tx to + // sweep his output in the channel with Carol. He can do this + // immediately, as the output is not timelocked since Carol was the one + // force closing. + commitSpends, err := waitForNTxsInMempool(net.Miner.Node, 2, + minerMempoolTimeout) + if err != nil { + t.Fatalf("transactions not found in mempool: %v", err) + } + + // Both Carol's second level transaction and Bob's sweep should be + // spending from the commitment transaction. + for _, txid := range commitSpends { + tx, err := net.Miner.Node.GetRawTransaction(txid) + if err != nil { + t.Fatalf("unable to get txn: %v", err) + } + + if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash { + t.Fatalf("tx did not spend from commitment tx") + } + } + + // Mine a block to confirm the two transactions (+ the coinbase). + block = mineBlocks(t, net, 1, 2)[0] + if len(block.Transactions) != 3 { + t.Fatalf("expected 3 transactions in block, got %v", + len(block.Transactions)) + } + for _, txid := range commitSpends { + assertTxInBlock(t, block, txid) + } + + // Keep track of the second level tx maturity. + carolSecondLevelCSV := uint32(defaultCSV) + + // When Bob notices Carol's second level transaction in the block, he + // will extract the preimage and broadcast a second level tx to claim + // the HTLC in his (already closed) channel with Alice. + bobSecondLvlTx, err := waitForTxInMempool(net.Miner.Node, + minerMempoolTimeout) + if err != nil { + t.Fatalf("transactions not found in mempool: %v", err) + } + + // It should spend from the commitment in the channel with Alice. + tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx) + if err != nil { + t.Fatalf("unable to get txn: %v", err) + } + + if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobForceClose { + t.Fatalf("tx did not spend from bob's force close tx") + } + + // At this point, Bob should have broadcast his second layer success + // transaction, and should have sent it to the nursery for incubation. + pendingChansRequest := &lnrpc.PendingChannelsRequest{} + err = lntest.WaitPredicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := net.Bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + predErr = fmt.Errorf("bob should have pending for " + + "close chan but doesn't") + return false + } + + for _, forceCloseChan := range pendingChanResp.PendingForceClosingChannels { + if forceCloseChan.Channel.LocalBalance != 0 { + continue + } + + if len(forceCloseChan.PendingHtlcs) != 1 { + predErr = fmt.Errorf("bob should have pending htlc " + + "but doesn't") + return false + } + stage := forceCloseChan.PendingHtlcs[0].Stage + if stage != 1 { + predErr = fmt.Errorf("bob's htlc should have "+ + "advanced to the first stage but was "+ + "stage: %v", stage) + return false + } + } + return true + }, time.Second*15) + if err != nil { + t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) + } + + // We'll now mine a block which should confirm Bob's second layer + // transaction. + block = mineBlocks(t, net, 1, 1)[0] + if len(block.Transactions) != 2 { + t.Fatalf("expected 2 transactions in block, got %v", + len(block.Transactions)) + } + assertTxInBlock(t, block, bobSecondLvlTx) + + // Keep track of Bob's second level maturity, and decrement our track + // of Carol's. + bobSecondLevelCSV := uint32(defaultCSV) + carolSecondLevelCSV-- + + // If we then mine 3 additional blocks, Carol's second level tx should + // mature, and she can pull the funds from it with a sweep tx. + if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil { + t.Fatalf("unable to generate block: %v", err) + } + bobSecondLevelCSV -= carolSecondLevelCSV + + carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find Carol's sweeping transaction: %v", err) + } + + // Mining one additional block, Bob's second level tx is mature, and he + // can sweep the output. + block = mineBlocks(t, net, bobSecondLevelCSV, 1)[0] + assertTxInBlock(t, block, carolSweep) + + bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's sweeping transaction") + } + + // Make sure it spends from the second level tx. + tx, err = net.Miner.Node.GetRawTransaction(bobSweep) + if err != nil { + t.Fatalf("unable to get txn: %v", err) + } + if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobSecondLvlTx { + t.Fatalf("tx did not spend from bob's second level tx") + } + + // When we mine one additional block, that will confirm Bob's sweep. + // Now Bob should have no pending channels anymore, as this just + // resolved it by the confirmation of the sweep transaction. + block = mineBlocks(t, net, 1, 1)[0] + assertTxInBlock(t, block, bobSweep) + + err = lntest.WaitPredicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := net.Bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) != 0 { + predErr = fmt.Errorf("bob still has pending channels "+ + "but shouldn't: %v", spew.Sdump(pendingChanResp)) + return false + } + req := &lnrpc.ListChannelsRequest{} + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + chanInfo, err := net.Bob.ListChannels(ctxt, req) + if err != nil { + predErr = fmt.Errorf("unable to query for open "+ + "channels: %v", err) + return false + } + if len(chanInfo.Channels) != 0 { + predErr = fmt.Errorf("Bob should have no open "+ + "channels, instead he has %v", + len(chanInfo.Channels)) + return false + } + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + // Also Carol should have no channels left (open nor pending). + err = lntest.WaitPredicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := carol.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) != 0 { + predErr = fmt.Errorf("bob carol has pending channels "+ + "but shouldn't: %v", spew.Sdump(pendingChanResp)) + return false + } + + req := &lnrpc.ListChannelsRequest{} + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + chanInfo, err := carol.ListChannels(ctxt, req) + if err != nil { + predErr = fmt.Errorf("unable to query for open "+ + "channels: %v", err) + return false + } + if len(chanInfo.Channels) != 0 { + predErr = fmt.Errorf("carol should have no open "+ + "channels, instead she has %v", + len(chanInfo.Channels)) + return false + } + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } +} diff --git a/lnd_multi-hop_htlc_receiver_chain_claim_test.go b/lnd_multi-hop_htlc_receiver_chain_claim_test.go new file mode 100644 index 00000000..437f8b50 --- /dev/null +++ b/lnd_multi-hop_htlc_receiver_chain_claim_test.go @@ -0,0 +1,265 @@ +// +build rpctest + +package lnd + +import ( + "context" + "fmt" + "time" + + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" +) + +// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the +// receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC +// off-chain, then it goes on chain to claim the HTLC. In this scenario, the +// node that sent the outgoing HTLC should extract the preimage from the sweep +// transaction, and finish settling the HTLC backwards into the route. +func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + + // First, we'll create a three hop network: Alice -> Bob -> Carol, with + // Carol refusing to actually settle or directly cancel any HTLC's + // self. + aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net) + + // Clean up carol's node when the test finishes. + defer shutdownAndAssert(net, t, carol) + + // With the network active, we'll now add a new invoice at Carol's end. + // Make sure the cltv expiry delta is large enough, otherwise Bob won't + // send out the outgoing htlc. + const invoiceAmt = 100000 + invoiceReq := &lnrpc.Invoice{ + Value: invoiceAmt, + CltvExpiry: 40, + } + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq) + if err != nil { + t.Fatalf("unable to generate carol invoice: %v", err) + } + + // Now that we've created the invoice, we'll send a single payment from + // Alice to Carol. We won't wait for the response however, as Carol + // will not immediately settle the payment. + ctx, cancel := context.WithCancel(ctxb) + defer cancel() + + alicePayStream, err := net.Alice.SendPayment(ctx) + if err != nil { + t.Fatalf("unable to create payment stream for alice: %v", err) + } + err = alicePayStream.Send(&lnrpc.SendRequest{ + PaymentRequest: carolInvoice.PaymentRequest, + }) + if err != nil { + t.Fatalf("unable to send payment: %v", err) + } + + // At this point, all 3 nodes should now have an active channel with + // the created HTLC pending on all of them. + var predErr error + nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + err = lntest.WaitPredicate(func() bool { + predErr = assertActiveHtlcs(nodes, carolInvoice.RHash) + if predErr != nil { + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("htlc mismatch: %v", predErr) + } + + // Now we'll mine enough blocks to prompt carol to actually go to the + // chain in order to sweep her HTLC since the value is high enough. + // TODO(roasbeef): modify once go to chain policy changes + numBlocks := uint32( + invoiceReq.CltvExpiry - defaultIncomingBroadcastDelta, + ) + if _, err := net.Miner.Node.Generate(numBlocks); err != nil { + t.Fatalf("unable to generate blocks") + } + + // At this point, Carol should broadcast her active commitment + // transaction in order to go to the chain and sweep her HTLC. + txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout) + if err != nil { + t.Fatalf("expected transaction not found in mempool: %v", err) + } + + bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint) + if err != nil { + t.Fatalf("unable to get txid: %v", err) + } + + carolFundingPoint := wire.OutPoint{ + Hash: *bobFundingTxid, + Index: bobChanPoint.OutputIndex, + } + + // The commitment transaction should be spending from the funding + // transaction. + commitHash := txids[0] + tx, err := net.Miner.Node.GetRawTransaction(commitHash) + if err != nil { + t.Fatalf("unable to get txn: %v", err) + } + commitTx := tx.MsgTx() + + if commitTx.TxIn[0].PreviousOutPoint != carolFundingPoint { + t.Fatalf("commit transaction not spending from expected "+ + "outpoint: %v", spew.Sdump(commitTx)) + } + + // Confirm the commitment. + mineBlocks(t, net, 1, 1) + + // After the force close transaction is mined, Carol should broadcast + // her second level HTLC transaction. Bob will broadcast a sweep tx to + // sweep his output in the channel with Carol. When Bob notices Carol's + // second level transaction in the mempool, he will extract the + // preimage and settle the HTLC back off-chain. + secondLevelHashes, err := waitForNTxsInMempool(net.Miner.Node, 2, + minerMempoolTimeout) + if err != nil { + t.Fatalf("transactions not found in mempool: %v", err) + } + + // Carol's second level transaction should be spending from + // the commitment transaction. + var secondLevelHash *chainhash.Hash + for _, txid := range secondLevelHashes { + tx, err := net.Miner.Node.GetRawTransaction(txid) + if err != nil { + t.Fatalf("unable to get txn: %v", err) + } + + if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash == *commitHash { + secondLevelHash = txid + } + } + if secondLevelHash == nil { + t.Fatalf("Carol's second level tx not found") + } + + // We'll now mine an additional block which should confirm both the + // second layer transactions. + if _, err := net.Miner.Node.Generate(1); err != nil { + t.Fatalf("unable to generate block: %v", err) + } + + time.Sleep(time.Second * 4) + + // TODO(roasbeef): assert bob pending state as well + + // Carol's pending channel report should now show two outputs under + // limbo: her commitment output, as well as the second-layer claim + // output. + pendingChansRequest := &lnrpc.PendingChannelsRequest{} + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := carol.PendingChannels(ctxt, pendingChansRequest) + if err != nil { + t.Fatalf("unable to query for pending channels: %v", err) + } + + if len(pendingChanResp.PendingForceClosingChannels) == 0 { + t.Fatalf("carol should have pending for close chan but doesn't") + } + forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] + if forceCloseChan.LimboBalance == 0 { + t.Fatalf("carol should have nonzero limbo balance instead "+ + "has: %v", forceCloseChan.LimboBalance) + } + + // The pending HTLC carol has should also now be in stage 2. + if len(forceCloseChan.PendingHtlcs) != 1 { + t.Fatalf("carol should have pending htlc but doesn't") + } + if forceCloseChan.PendingHtlcs[0].Stage != 2 { + t.Fatalf("carol's htlc should have advanced to the second "+ + "stage: %v", err) + } + + // Once the second-level transaction confirmed, Bob should have + // extracted the preimage from the chain, and sent it back to Alice, + // clearing the HTLC off-chain. + nodes = []*lntest.HarnessNode{net.Alice} + err = lntest.WaitPredicate(func() bool { + predErr = assertNumActiveHtlcs(nodes, 0) + if predErr != nil { + return false + } + return true + }, time.Second*15) + if err != nil { + t.Fatalf("htlc mismatch: %v", predErr) + } + + // If we mine 4 additional blocks, then both outputs should now be + // mature. + if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + // We should have a new transaction in the mempool. + _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find bob's sweeping transaction: %v", err) + } + + // Finally, if we mine an additional block to confirm these two sweep + // transactions, Carol should not show a pending channel in her report + // afterwards. + if _, err := net.Miner.Node.Generate(1); err != nil { + t.Fatalf("unable to mine block: %v", err) + } + err = lntest.WaitPredicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err = carol.PendingChannels(ctxt, pendingChansRequest) + if err != nil { + predErr = fmt.Errorf("unable to query for pending channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) != 0 { + predErr = fmt.Errorf("carol still has pending channels: %v", + spew.Sdump(pendingChanResp)) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + // The invoice should show as settled for Carol, indicating that it was + // swept on-chain. + invoicesReq := &lnrpc.ListInvoiceRequest{} + invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq) + if err != nil { + t.Fatalf("unable to retrieve invoices: %v", err) + } + if len(invoicesResp.Invoices) != 1 { + t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices)) + } + invoice := invoicesResp.Invoices[0] + if invoice.State != lnrpc.Invoice_SETTLED { + t.Fatalf("expected invoice to be settled on chain") + } + if invoice.AmtPaidSat != invoiceAmt { + t.Fatalf("expected invoice to be settled with %d sat, got "+ + "%d sat", invoiceAmt, invoice.AmtPaidSat) + } + + // We'll close out the channel between Alice and Bob, then shutdown + // carol to conclude the test. + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) +} diff --git a/lnd_multi-hop_htlc_remote_chain_claim_test.go b/lnd_multi-hop_htlc_remote_chain_claim_test.go new file mode 100644 index 00000000..195c8315 --- /dev/null +++ b/lnd_multi-hop_htlc_remote_chain_claim_test.go @@ -0,0 +1,293 @@ +// +build rpctest + +package lnd + +import ( + "context" + "fmt" + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/davecgh/go-spew/spew" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" +) + +// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario, +// if the remote party goes to chain while we have an incoming HTLC, then when +// we found out the preimage via the witness beacon, we properly settle the +// HTLC on-chain in order to ensure that we don't lose any funds. +func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + + // First, we'll create a three hop network: Alice -> Bob -> Carol, with + // Carol refusing to actually settle or directly cancel any HTLC's + // self. + aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net) + + // Clean up carol's node when the test finishes. + defer shutdownAndAssert(net, t, carol) + + // With the network active, we'll now add a new invoice at Carol's end. + const invoiceAmt = 100000 + invoiceReq := &lnrpc.Invoice{ + Value: invoiceAmt, + CltvExpiry: 40, + } + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq) + if err != nil { + t.Fatalf("unable to generate carol invoice: %v", err) + } + + // Now that we've created the invoice, we'll send a single payment from + // Alice to Carol. We won't wait for the response however, as Carol + // will not immediately settle the payment. + ctx, cancel := context.WithCancel(ctxb) + defer cancel() + + alicePayStream, err := net.Alice.SendPayment(ctx) + if err != nil { + t.Fatalf("unable to create payment stream for alice: %v", err) + } + err = alicePayStream.Send(&lnrpc.SendRequest{ + PaymentRequest: carolInvoice.PaymentRequest, + }) + if err != nil { + t.Fatalf("unable to send payment: %v", err) + } + + // We'll now wait until all 3 nodes have the HTLC as just sent fully + // locked in. + var predErr error + nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} + err = lntest.WaitPredicate(func() bool { + predErr = assertActiveHtlcs(nodes, carolInvoice.RHash) + if predErr != nil { + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf("htlc mismatch: %v", err) + } + + // Next, Alice decides that she wants to exit the channel, so she'll + // immediately force close the channel by broadcast her commitment + // transaction. + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + aliceForceClose := closeChannelAndAssert(ctxt, t, net, net.Alice, + aliceChanPoint, true) + + // Wait for the channel to be marked pending force close. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = waitForChannelPendingForceClose(ctxt, net.Alice, aliceChanPoint) + if err != nil { + t.Fatalf("channel not pending force close: %v", err) + } + + // Mine enough blocks for Alice to sweep her funds from the force + // closed channel. + _, err = net.Miner.Node.Generate(defaultCSV) + if err != nil { + t.Fatalf("unable to generate blocks: %v", err) + } + + // Alice should now sweep her funds. + _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find sweeping tx in mempool: %v", err) + } + + // We'll now mine enough blocks so Carol decides that she needs to go + // on-chain to claim the HTLC as Bob has been inactive. + numBlocks := uint32(invoiceReq.CltvExpiry- + defaultIncomingBroadcastDelta) - defaultCSV + + if _, err := net.Miner.Node.Generate(numBlocks); err != nil { + t.Fatalf("unable to generate blocks") + } + + // Carol's commitment transaction should now be in the mempool. + txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout) + if err != nil { + t.Fatalf("transactions not found in mempool: %v", err) + } + bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint) + if err != nil { + t.Fatalf("unable to get txid: %v", err) + } + carolFundingPoint := wire.OutPoint{ + Hash: *bobFundingTxid, + Index: bobChanPoint.OutputIndex, + } + + // The transaction should be spending from the funding transaction + commitHash := txids[0] + tx1, err := net.Miner.Node.GetRawTransaction(commitHash) + if err != nil { + t.Fatalf("unable to get txn: %v", err) + } + if tx1.MsgTx().TxIn[0].PreviousOutPoint != carolFundingPoint { + t.Fatalf("commit transaction not spending fundingtx: %v", + spew.Sdump(tx1)) + } + + // Mine a block, which should contain the commitment. + block := mineBlocks(t, net, 1, 1)[0] + if len(block.Transactions) != 2 { + t.Fatalf("expected 2 transactions in block, got %v", + len(block.Transactions)) + } + assertTxInBlock(t, block, commitHash) + + // After the force close transacion is mined, Carol should broadcast + // her second level HTLC transacion. Bob will broadcast a sweep tx to + // sweep his output in the channel with Carol. He can do this + // immediately, as the output is not timelocked since Carol was the one + // force closing. + commitSpends, err := waitForNTxsInMempool(net.Miner.Node, 2, + minerMempoolTimeout) + if err != nil { + t.Fatalf("transactions not found in mempool: %v", err) + } + + // Both Carol's second level transaction and Bob's sweep should be + // spending from the commitment transaction. + for _, txid := range commitSpends { + tx, err := net.Miner.Node.GetRawTransaction(txid) + if err != nil { + t.Fatalf("unable to get txn: %v", err) + } + + if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash { + t.Fatalf("tx did not spend from commitment tx") + } + } + + // Mine a block to confirm the two transactions (+ coinbase). + block = mineBlocks(t, net, 1, 2)[0] + if len(block.Transactions) != 3 { + t.Fatalf("expected 3 transactions in block, got %v", + len(block.Transactions)) + } + for _, txid := range commitSpends { + assertTxInBlock(t, block, txid) + } + + // Keep track of the second level tx maturity. + carolSecondLevelCSV := uint32(defaultCSV) + + // When Bob notices Carol's second level transaction in the block, he + // will extract the preimage and broadcast a sweep tx to directly claim + // the HTLC in his (already closed) channel with Alice. + bobHtlcSweep, err := waitForTxInMempool(net.Miner.Node, + minerMempoolTimeout) + if err != nil { + t.Fatalf("transactions not found in mempool: %v", err) + } + + // It should spend from the commitment in the channel with Alice. + tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep) + if err != nil { + t.Fatalf("unable to get txn: %v", err) + } + if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *aliceForceClose { + t.Fatalf("tx did not spend from alice's force close tx") + } + + // We'll now mine a block which should confirm Bob's HTLC sweep + // transaction. + block = mineBlocks(t, net, 1, 1)[0] + if len(block.Transactions) != 2 { + t.Fatalf("expected 2 transactions in block, got %v", + len(block.Transactions)) + } + assertTxInBlock(t, block, bobHtlcSweep) + carolSecondLevelCSV-- + + // Now that the sweeping transaction has been confirmed, Bob should now + // recognize that all contracts have been fully resolved, and show no + // pending close channels. + pendingChansRequest := &lnrpc.PendingChannelsRequest{} + err = lntest.WaitPredicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := net.Bob.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) != 0 { + predErr = fmt.Errorf("bob still has pending channels "+ + "but shouldn't: %v", spew.Sdump(pendingChanResp)) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + // If we then mine 3 additional blocks, Carol's second level tx will + // mature, and she should pull the funds. + if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil { + t.Fatalf("unable to generate block: %v", err) + } + + carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) + if err != nil { + t.Fatalf("unable to find Carol's sweeping transaction: %v", err) + } + + // When Carol's sweep gets confirmed, she should have no more pending + // channels. + block = mineBlocks(t, net, 1, 1)[0] + assertTxInBlock(t, block, carolSweep) + + pendingChansRequest = &lnrpc.PendingChannelsRequest{} + err = lntest.WaitPredicate(func() bool { + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + pendingChanResp, err := carol.PendingChannels( + ctxt, pendingChansRequest, + ) + if err != nil { + predErr = fmt.Errorf("unable to query for pending "+ + "channels: %v", err) + return false + } + if len(pendingChanResp.PendingForceClosingChannels) != 0 { + predErr = fmt.Errorf("carol still has pending channels "+ + "but shouldn't: %v", spew.Sdump(pendingChanResp)) + return false + } + + return true + }, time.Second*15) + if err != nil { + t.Fatalf(predErr.Error()) + } + + // The invoice should show as settled for Carol, indicating that it was + // swept on-chain. + invoicesReq := &lnrpc.ListInvoiceRequest{} + invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq) + if err != nil { + t.Fatalf("unable to retrieve invoices: %v", err) + } + if len(invoicesResp.Invoices) != 1 { + t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices)) + } + invoice := invoicesResp.Invoices[0] + if invoice.State != lnrpc.Invoice_SETTLED { + t.Fatalf("expected invoice to be settled on chain") + } + if invoice.AmtPaidSat != invoiceAmt { + t.Fatalf("expected invoice to be settled with %d sat, got "+ + "%d sat", invoiceAmt, invoice.AmtPaidSat) + } +} diff --git a/lnd_test.go b/lnd_test.go index eab9bc04..7af216b8 100644 --- a/lnd_test.go +++ b/lnd_test.go @@ -9614,256 +9614,6 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) } -// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the -// receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC -// off-chain, then it goes on chain to claim the HTLC. In this scenario, the -// node that sent the outgoing HTLC should extract the preimage from the sweep -// transaction, and finish settling the HTLC backwards into the route. -func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - // With the network active, we'll now add a new invoice at Carol's end. - // Make sure the cltv expiry delta is large enough, otherwise Bob won't - // send out the outgoing htlc. - const invoiceAmt = 100000 - invoiceReq := &lnrpc.Invoice{ - Value: invoiceAmt, - CltvExpiry: 40, - } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq) - if err != nil { - t.Fatalf("unable to generate carol invoice: %v", err) - } - - // Now that we've created the invoice, we'll send a single payment from - // Alice to Carol. We won't wait for the response however, as Carol - // will not immediately settle the payment. - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - alicePayStream, err := net.Alice.SendPayment(ctx) - if err != nil { - t.Fatalf("unable to create payment stream for alice: %v", err) - } - err = alicePayStream.Send(&lnrpc.SendRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - }) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - // At this point, all 3 nodes should now have an active channel with - // the created HTLC pending on all of them. - var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = lntest.WaitPredicate(func() bool { - predErr = assertActiveHtlcs(nodes, carolInvoice.RHash) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // Now we'll mine enough blocks to prompt carol to actually go to the - // chain in order to sweep her HTLC since the value is high enough. - // TODO(roasbeef): modify once go to chain policy changes - numBlocks := uint32( - invoiceReq.CltvExpiry - defaultIncomingBroadcastDelta, - ) - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks") - } - - // At this point, Carol should broadcast her active commitment - // transaction in order to go to the chain and sweep her HTLC. - txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout) - if err != nil { - t.Fatalf("expected transaction not found in mempool: %v", err) - } - - bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - - carolFundingPoint := wire.OutPoint{ - Hash: *bobFundingTxid, - Index: bobChanPoint.OutputIndex, - } - - // The commitment transaction should be spending from the funding - // transaction. - commitHash := txids[0] - tx, err := net.Miner.Node.GetRawTransaction(commitHash) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - commitTx := tx.MsgTx() - - if commitTx.TxIn[0].PreviousOutPoint != carolFundingPoint { - t.Fatalf("commit transaction not spending from expected "+ - "outpoint: %v", spew.Sdump(commitTx)) - } - - // Confirm the commitment. - mineBlocks(t, net, 1, 1) - - // After the force close transaction is mined, Carol should broadcast - // her second level HTLC transaction. Bob will broadcast a sweep tx to - // sweep his output in the channel with Carol. When Bob notices Carol's - // second level transaction in the mempool, he will extract the - // preimage and settle the HTLC back off-chain. - secondLevelHashes, err := waitForNTxsInMempool(net.Miner.Node, 2, - minerMempoolTimeout) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } - - // Carol's second level transaction should be spending from - // the commitment transaction. - var secondLevelHash *chainhash.Hash - for _, txid := range secondLevelHashes { - tx, err := net.Miner.Node.GetRawTransaction(txid) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - - if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash == *commitHash { - secondLevelHash = txid - } - } - if secondLevelHash == nil { - t.Fatalf("Carol's second level tx not found") - } - - // We'll now mine an additional block which should confirm both the - // second layer transactions. - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - time.Sleep(time.Second * 4) - - // TODO(roasbeef): assert bob pending state as well - - // Carol's pending channel report should now show two outputs under - // limbo: her commitment output, as well as the second-layer claim - // output. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := carol.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - t.Fatalf("unable to query for pending channels: %v", err) - } - - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - t.Fatalf("carol should have pending for close chan but doesn't") - } - forceCloseChan := pendingChanResp.PendingForceClosingChannels[0] - if forceCloseChan.LimboBalance == 0 { - t.Fatalf("carol should have nonzero limbo balance instead "+ - "has: %v", forceCloseChan.LimboBalance) - } - - // The pending HTLC carol has should also now be in stage 2. - if len(forceCloseChan.PendingHtlcs) != 1 { - t.Fatalf("carol should have pending htlc but doesn't") - } - if forceCloseChan.PendingHtlcs[0].Stage != 2 { - t.Fatalf("carol's htlc should have advanced to the second "+ - "stage: %v", err) - } - - // Once the second-level transaction confirmed, Bob should have - // extracted the preimage from the chain, and sent it back to Alice, - // clearing the HTLC off-chain. - nodes = []*lntest.HarnessNode{net.Alice} - err = lntest.WaitPredicate(func() bool { - predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", predErr) - } - - // If we mine 4 additional blocks, then both outputs should now be - // mature. - if _, err := net.Miner.Node.Generate(defaultCSV); err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // We should have a new transaction in the mempool. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's sweeping transaction: %v", err) - } - - // Finally, if we mine an additional block to confirm these two sweep - // transactions, Carol should not show a pending channel in her report - // afterwards. - if _, err := net.Miner.Node.Generate(1); err != nil { - t.Fatalf("unable to mine block: %v", err) - } - err = lntest.WaitPredicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err = carol.PendingChannels(ctxt, pendingChansRequest) - if err != nil { - predErr = fmt.Errorf("unable to query for pending channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("carol still has pending channels: %v", - spew.Sdump(pendingChanResp)) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } - - // The invoice should show as settled for Carol, indicating that it was - // swept on-chain. - invoicesReq := &lnrpc.ListInvoiceRequest{} - invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq) - if err != nil { - t.Fatalf("unable to retrieve invoices: %v", err) - } - if len(invoicesResp.Invoices) != 1 { - t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices)) - } - invoice := invoicesResp.Invoices[0] - if invoice.State != lnrpc.Invoice_SETTLED { - t.Fatalf("expected invoice to be settled on chain") - } - if invoice.AmtPaidSat != invoiceAmt { - t.Fatalf("expected invoice to be settled with %d sat, got "+ - "%d sat", invoiceAmt, invoice.AmtPaidSat) - } - - // We'll close out the channel between Alice and Bob, then shutdown - // carol to conclude the test. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) -} - // testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC // scenario, if the node that extended the HTLC to the final node closes their // commitment on-chain early, then it eventually recognizes this HTLC as one @@ -10344,623 +10094,6 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false) } -// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if -// we're forced to go to chain with an incoming HTLC, then when we find out the -// preimage via the witness beacon, we properly settle the HTLC on-chain in -// order to ensure we don't lose any funds. -func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - // With the network active, we'll now add a new invoice at Carol's end. - invoiceReq := &lnrpc.Invoice{ - Value: 100000, - CltvExpiry: 40, - } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq) - if err != nil { - t.Fatalf("unable to generate carol invoice: %v", err) - } - - // Now that we've created the invoice, we'll send a single payment from - // Alice to Carol. We won't wait for the response however, as Carol - // will not immediately settle the payment. - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - alicePayStream, err := net.Alice.SendPayment(ctx) - if err != nil { - t.Fatalf("unable to create payment stream for alice: %v", err) - } - err = alicePayStream.Send(&lnrpc.SendRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - }) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - // We'll now wait until all 3 nodes have the HTLC as just sent fully - // locked in. - var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = lntest.WaitPredicate(func() bool { - predErr = assertActiveHtlcs(nodes, carolInvoice.RHash) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", err) - } - - // At this point, Bob decides that he wants to exit the channel - // immediately, so he force closes his commitment transaction. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - bobForceClose := closeChannelAndAssert(ctxt, t, net, net.Bob, - aliceChanPoint, true) - - // Alice will sweep her output immediately. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find alice's sweep tx in miner mempool: %v", - err) - } - - // We'll now mine enough blocks so Carol decides that she needs to go - // on-chain to claim the HTLC as Bob has been inactive. - numBlocks := uint32(invoiceReq.CltvExpiry - - defaultIncomingBroadcastDelta) - - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks") - } - - // Carol's commitment transaction should now be in the mempool. - txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } - bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - carolFundingPoint := wire.OutPoint{ - Hash: *bobFundingTxid, - Index: bobChanPoint.OutputIndex, - } - - // The tx should be spending from the funding transaction, - commitHash := txids[0] - tx1, err := net.Miner.Node.GetRawTransaction(commitHash) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - if tx1.MsgTx().TxIn[0].PreviousOutPoint != carolFundingPoint { - t.Fatalf("commit transaction not spending fundingtx: %v", - spew.Sdump(tx1)) - } - - // Mine a block that should confirm the commit tx. - block := mineBlocks(t, net, 1, 1)[0] - if len(block.Transactions) != 2 { - t.Fatalf("expected 2 transactions in block, got %v", - len(block.Transactions)) - } - assertTxInBlock(t, block, commitHash) - - // After the force close transacion is mined, Carol should broadcast - // her second level HTLC transacion. Bob will broadcast a sweep tx to - // sweep his output in the channel with Carol. He can do this - // immediately, as the output is not timelocked since Carol was the one - // force closing. - commitSpends, err := waitForNTxsInMempool(net.Miner.Node, 2, - minerMempoolTimeout) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } - - // Both Carol's second level transaction and Bob's sweep should be - // spending from the commitment transaction. - for _, txid := range commitSpends { - tx, err := net.Miner.Node.GetRawTransaction(txid) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - - if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash { - t.Fatalf("tx did not spend from commitment tx") - } - } - - // Mine a block to confirm the two transactions (+ the coinbase). - block = mineBlocks(t, net, 1, 2)[0] - if len(block.Transactions) != 3 { - t.Fatalf("expected 3 transactions in block, got %v", - len(block.Transactions)) - } - for _, txid := range commitSpends { - assertTxInBlock(t, block, txid) - } - - // Keep track of the second level tx maturity. - carolSecondLevelCSV := uint32(defaultCSV) - - // When Bob notices Carol's second level transaction in the block, he - // will extract the preimage and broadcast a second level tx to claim - // the HTLC in his (already closed) channel with Alice. - bobSecondLvlTx, err := waitForTxInMempool(net.Miner.Node, - minerMempoolTimeout) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } - - // It should spend from the commitment in the channel with Alice. - tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - - if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobForceClose { - t.Fatalf("tx did not spend from bob's force close tx") - } - - // At this point, Bob should have broadcast his second layer success - // transaction, and should have sent it to the nursery for incubation. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = lntest.WaitPredicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - - if len(pendingChanResp.PendingForceClosingChannels) == 0 { - predErr = fmt.Errorf("bob should have pending for " + - "close chan but doesn't") - return false - } - - for _, forceCloseChan := range pendingChanResp.PendingForceClosingChannels { - if forceCloseChan.Channel.LocalBalance != 0 { - continue - } - - if len(forceCloseChan.PendingHtlcs) != 1 { - predErr = fmt.Errorf("bob should have pending htlc " + - "but doesn't") - return false - } - stage := forceCloseChan.PendingHtlcs[0].Stage - if stage != 1 { - predErr = fmt.Errorf("bob's htlc should have "+ - "advanced to the first stage but was "+ - "stage: %v", stage) - return false - } - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr) - } - - // We'll now mine a block which should confirm Bob's second layer - // transaction. - block = mineBlocks(t, net, 1, 1)[0] - if len(block.Transactions) != 2 { - t.Fatalf("expected 2 transactions in block, got %v", - len(block.Transactions)) - } - assertTxInBlock(t, block, bobSecondLvlTx) - - // Keep track of Bob's second level maturity, and decrement our track - // of Carol's. - bobSecondLevelCSV := uint32(defaultCSV) - carolSecondLevelCSV-- - - // If we then mine 3 additional blocks, Carol's second level tx should - // mature, and she can pull the funds from it with a sweep tx. - if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil { - t.Fatalf("unable to generate block: %v", err) - } - bobSecondLevelCSV -= carolSecondLevelCSV - - carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Carol's sweeping transaction: %v", err) - } - - // Mining one additional block, Bob's second level tx is mature, and he - // can sweep the output. - block = mineBlocks(t, net, bobSecondLevelCSV, 1)[0] - assertTxInBlock(t, block, carolSweep) - - bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find bob's sweeping transaction") - } - - // Make sure it spends from the second level tx. - tx, err = net.Miner.Node.GetRawTransaction(bobSweep) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobSecondLvlTx { - t.Fatalf("tx did not spend from bob's second level tx") - } - - // When we mine one additional block, that will confirm Bob's sweep. - // Now Bob should have no pending channels anymore, as this just - // resolved it by the confirmation of the sweep transaction. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, bobSweep) - - err = lntest.WaitPredicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - req := &lnrpc.ListChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanInfo, err := net.Bob.ListChannels(ctxt, req) - if err != nil { - predErr = fmt.Errorf("unable to query for open "+ - "channels: %v", err) - return false - } - if len(chanInfo.Channels) != 0 { - predErr = fmt.Errorf("Bob should have no open "+ - "channels, instead he has %v", - len(chanInfo.Channels)) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } - - // Also Carol should have no channels left (open nor pending). - err = lntest.WaitPredicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := carol.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob carol has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - - req := &lnrpc.ListChannelsRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - chanInfo, err := carol.ListChannels(ctxt, req) - if err != nil { - predErr = fmt.Errorf("unable to query for open "+ - "channels: %v", err) - return false - } - if len(chanInfo.Channels) != 0 { - predErr = fmt.Errorf("carol should have no open "+ - "channels, instead she has %v", - len(chanInfo.Channels)) - return false - } - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } -} - -// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario, -// if the remote party goes to chain while we have an incoming HTLC, then when -// we found out the preimage via the witness beacon, we properly settle the -// HTLC on-chain in order to ensure that we don't lose any funds. -func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, we'll create a three hop network: Alice -> Bob -> Carol, with - // Carol refusing to actually settle or directly cancel any HTLC's - // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net) - - // Clean up carol's node when the test finishes. - defer shutdownAndAssert(net, t, carol) - - // With the network active, we'll now add a new invoice at Carol's end. - const invoiceAmt = 100000 - invoiceReq := &lnrpc.Invoice{ - Value: invoiceAmt, - CltvExpiry: 40, - } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq) - if err != nil { - t.Fatalf("unable to generate carol invoice: %v", err) - } - - // Now that we've created the invoice, we'll send a single payment from - // Alice to Carol. We won't wait for the response however, as Carol - // will not immediately settle the payment. - ctx, cancel := context.WithCancel(ctxb) - defer cancel() - - alicePayStream, err := net.Alice.SendPayment(ctx) - if err != nil { - t.Fatalf("unable to create payment stream for alice: %v", err) - } - err = alicePayStream.Send(&lnrpc.SendRequest{ - PaymentRequest: carolInvoice.PaymentRequest, - }) - if err != nil { - t.Fatalf("unable to send payment: %v", err) - } - - // We'll now wait until all 3 nodes have the HTLC as just sent fully - // locked in. - var predErr error - nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} - err = lntest.WaitPredicate(func() bool { - predErr = assertActiveHtlcs(nodes, carolInvoice.RHash) - if predErr != nil { - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf("htlc mismatch: %v", err) - } - - // Next, Alice decides that she wants to exit the channel, so she'll - // immediately force close the channel by broadcast her commitment - // transaction. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - aliceForceClose := closeChannelAndAssert(ctxt, t, net, net.Alice, - aliceChanPoint, true) - - // Wait for the channel to be marked pending force close. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = waitForChannelPendingForceClose(ctxt, net.Alice, aliceChanPoint) - if err != nil { - t.Fatalf("channel not pending force close: %v", err) - } - - // Mine enough blocks for Alice to sweep her funds from the force - // closed channel. - _, err = net.Miner.Node.Generate(defaultCSV) - if err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - // Alice should now sweep her funds. - _, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find sweeping tx in mempool: %v", err) - } - - // We'll now mine enough blocks so Carol decides that she needs to go - // on-chain to claim the HTLC as Bob has been inactive. - numBlocks := uint32(invoiceReq.CltvExpiry- - defaultIncomingBroadcastDelta) - defaultCSV - - if _, err := net.Miner.Node.Generate(numBlocks); err != nil { - t.Fatalf("unable to generate blocks") - } - - // Carol's commitment transaction should now be in the mempool. - txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } - bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint) - if err != nil { - t.Fatalf("unable to get txid: %v", err) - } - carolFundingPoint := wire.OutPoint{ - Hash: *bobFundingTxid, - Index: bobChanPoint.OutputIndex, - } - - // The transaction should be spending from the funding transaction - commitHash := txids[0] - tx1, err := net.Miner.Node.GetRawTransaction(commitHash) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - if tx1.MsgTx().TxIn[0].PreviousOutPoint != carolFundingPoint { - t.Fatalf("commit transaction not spending fundingtx: %v", - spew.Sdump(tx1)) - } - - // Mine a block, which should contain the commitment. - block := mineBlocks(t, net, 1, 1)[0] - if len(block.Transactions) != 2 { - t.Fatalf("expected 2 transactions in block, got %v", - len(block.Transactions)) - } - assertTxInBlock(t, block, commitHash) - - // After the force close transacion is mined, Carol should broadcast - // her second level HTLC transacion. Bob will broadcast a sweep tx to - // sweep his output in the channel with Carol. He can do this - // immediately, as the output is not timelocked since Carol was the one - // force closing. - commitSpends, err := waitForNTxsInMempool(net.Miner.Node, 2, - minerMempoolTimeout) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } - - // Both Carol's second level transaction and Bob's sweep should be - // spending from the commitment transaction. - for _, txid := range commitSpends { - tx, err := net.Miner.Node.GetRawTransaction(txid) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - - if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash { - t.Fatalf("tx did not spend from commitment tx") - } - } - - // Mine a block to confirm the two transactions (+ coinbase). - block = mineBlocks(t, net, 1, 2)[0] - if len(block.Transactions) != 3 { - t.Fatalf("expected 3 transactions in block, got %v", - len(block.Transactions)) - } - for _, txid := range commitSpends { - assertTxInBlock(t, block, txid) - } - - // Keep track of the second level tx maturity. - carolSecondLevelCSV := uint32(defaultCSV) - - // When Bob notices Carol's second level transaction in the block, he - // will extract the preimage and broadcast a sweep tx to directly claim - // the HTLC in his (already closed) channel with Alice. - bobHtlcSweep, err := waitForTxInMempool(net.Miner.Node, - minerMempoolTimeout) - if err != nil { - t.Fatalf("transactions not found in mempool: %v", err) - } - - // It should spend from the commitment in the channel with Alice. - tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep) - if err != nil { - t.Fatalf("unable to get txn: %v", err) - } - if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *aliceForceClose { - t.Fatalf("tx did not spend from alice's force close tx") - } - - // We'll now mine a block which should confirm Bob's HTLC sweep - // transaction. - block = mineBlocks(t, net, 1, 1)[0] - if len(block.Transactions) != 2 { - t.Fatalf("expected 2 transactions in block, got %v", - len(block.Transactions)) - } - assertTxInBlock(t, block, bobHtlcSweep) - carolSecondLevelCSV-- - - // Now that the sweeping transaction has been confirmed, Bob should now - // recognize that all contracts have been fully resolved, and show no - // pending close channels. - pendingChansRequest := &lnrpc.PendingChannelsRequest{} - err = lntest.WaitPredicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := net.Bob.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("bob still has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } - - // If we then mine 3 additional blocks, Carol's second level tx will - // mature, and she should pull the funds. - if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil { - t.Fatalf("unable to generate block: %v", err) - } - - carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout) - if err != nil { - t.Fatalf("unable to find Carol's sweeping transaction: %v", err) - } - - // When Carol's sweep gets confirmed, she should have no more pending - // channels. - block = mineBlocks(t, net, 1, 1)[0] - assertTxInBlock(t, block, carolSweep) - - pendingChansRequest = &lnrpc.PendingChannelsRequest{} - err = lntest.WaitPredicate(func() bool { - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - pendingChanResp, err := carol.PendingChannels( - ctxt, pendingChansRequest, - ) - if err != nil { - predErr = fmt.Errorf("unable to query for pending "+ - "channels: %v", err) - return false - } - if len(pendingChanResp.PendingForceClosingChannels) != 0 { - predErr = fmt.Errorf("carol still has pending channels "+ - "but shouldn't: %v", spew.Sdump(pendingChanResp)) - return false - } - - return true - }, time.Second*15) - if err != nil { - t.Fatalf(predErr.Error()) - } - - // The invoice should show as settled for Carol, indicating that it was - // swept on-chain. - invoicesReq := &lnrpc.ListInvoiceRequest{} - invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq) - if err != nil { - t.Fatalf("unable to retrieve invoices: %v", err) - } - if len(invoicesResp.Invoices) != 1 { - t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices)) - } - invoice := invoicesResp.Invoices[0] - if invoice.State != lnrpc.Invoice_SETTLED { - t.Fatalf("expected invoice to be settled on chain") - } - if invoice.AmtPaidSat != invoiceAmt { - t.Fatalf("expected invoice to be settled with %d sat, got "+ - "%d sat", invoiceAmt, invoice.AmtPaidSat) - } -} - // testSwitchCircuitPersistence creates a multihop network to ensure the sender // and intermediaries are persisting their open payment circuits. After // forwarding a packet via an outgoing link, all are restarted, and expected to From e0958193851dc309bd2562b98a8fcb7704122b2b Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Wed, 8 May 2019 11:22:58 +0200 Subject: [PATCH 09/14] lnd_test: add hodl parameter to createThreeHopNetwork --- lnd_multi-hop_htlc_local_chain_claim_test.go | 4 ++- ...ulti-hop_htlc_receiver_chain_claim_test.go | 4 ++- lnd_multi-hop_htlc_remote_chain_claim_test.go | 4 ++- lnd_test.go | 27 ++++++++++++------- 4 files changed, 27 insertions(+), 12 deletions(-) diff --git a/lnd_multi-hop_htlc_local_chain_claim_test.go b/lnd_multi-hop_htlc_local_chain_claim_test.go index bdcabfcc..b3453a03 100644 --- a/lnd_multi-hop_htlc_local_chain_claim_test.go +++ b/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -23,7 +23,9 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net) + aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( + t, net, true, + ) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) diff --git a/lnd_multi-hop_htlc_receiver_chain_claim_test.go b/lnd_multi-hop_htlc_receiver_chain_claim_test.go index 437f8b50..f629be63 100644 --- a/lnd_multi-hop_htlc_receiver_chain_claim_test.go +++ b/lnd_multi-hop_htlc_receiver_chain_claim_test.go @@ -25,7 +25,9 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net) + aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( + t, net, true, + ) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) diff --git a/lnd_multi-hop_htlc_remote_chain_claim_test.go b/lnd_multi-hop_htlc_remote_chain_claim_test.go index 195c8315..7493ead4 100644 --- a/lnd_multi-hop_htlc_remote_chain_claim_test.go +++ b/lnd_multi-hop_htlc_remote_chain_claim_test.go @@ -23,7 +23,9 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net) + aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( + t, net, true, + ) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) diff --git a/lnd_test.go b/lnd_test.go index 7af216b8..bf48c1bc 100644 --- a/lnd_test.go +++ b/lnd_test.go @@ -9315,8 +9315,10 @@ func assertSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client, } } -func createThreeHopHodlNetwork(t *harnessTest, - net *lntest.NetworkHarness) (*lnrpc.ChannelPoint, *lnrpc.ChannelPoint, *lntest.HarnessNode) { +func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness, + carolHodl bool) (*lnrpc.ChannelPoint, *lnrpc.ChannelPoint, + *lntest.HarnessNode) { + ctxb := context.Background() // We'll start the test by creating a channel between Alice and Bob, @@ -9342,10 +9344,14 @@ func createThreeHopHodlNetwork(t *harnessTest, t.Fatalf("bob didn't report channel: %v", err) } - // Next, we'll create a new node "carol" and have Bob connect to her. - // In this test, we'll make carol always hold onto the HTLC, this way - // it'll force Bob to go to chain to resolve the HTLC. - carol, err := net.NewNode("Carol", []string{"--debughtlc", "--hodl.exit-settle"}) + // Next, we'll create a new node "carol" and have Bob connect to her. If + // the carolHodl flag is set, we'll make carol always hold onto the + // HTLC, this way it'll force Bob to go to chain to resolve the HTLC. + carolFlags := []string{"--debughtlc"} + if carolHodl { + carolFlags = append(carolFlags, "--hodl.exit-settle") + } + carol, err := net.NewNode("Carol", carolFlags) if err != nil { t.Fatalf("unable to create new node: %v", err) } @@ -9393,7 +9399,8 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) { // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net) + aliceChanPoint, bobChanPoint, carol := + createThreeHopNetwork(t, net, true) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) @@ -9626,7 +9633,8 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net) + aliceChanPoint, bobChanPoint, carol := + createThreeHopNetwork(t, net, true) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) @@ -9887,7 +9895,8 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, // First, we'll create a three hop network: Alice -> Bob -> Carol, with // Carol refusing to actually settle or directly cancel any HTLC's // self. - aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net) + aliceChanPoint, bobChanPoint, carol := + createThreeHopNetwork(t, net, true) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) From 064e8492de6518f7b585b9a337bc17e4a9351b27 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 16 Apr 2019 12:11:20 +0200 Subject: [PATCH 10/14] cnct+htlcswitch+invoices: move invoice parameter check out of link This commit is the final step in making the link unaware of invoices. It now purely offers the htlc to the invoice registry and follows instructions from the invoice registry about how and when to respond to the htlc. The change also fixes a bug where upon restart, hodl htlcs were subjected to the invoice minimum cltv delta requirement again. If the block height has increased in the mean while, the htlc would be canceled back. Furthermore the invoice registry interaction is aligned between link and contract resolvers. --- channeldb/invoice_test.go | 26 ++- channeldb/invoices.go | 20 +- .../htlc_incoming_contest_resolver.go | 66 ++++--- contractcourt/htlc_success_resolver.go | 33 +--- contractcourt/interfaces.go | 1 + htlcswitch/interfaces.go | 1 + htlcswitch/link.go | 171 +++++----------- htlcswitch/mock.go | 12 +- htlcswitch/test_utils.go | 1 - invoices/invoiceregistry.go | 184 ++++++++++++++++-- invoices/invoiceregistry_test.go | 68 +++++-- lnd_multi-hop_htlc_local_chain_claim_test.go | 89 +++++++-- ...ulti-hop_htlc_receiver_chain_claim_test.go | 54 ++++- lnd_multi-hop_htlc_remote_chain_claim_test.go | 58 +++++- peer.go | 8 +- server.go | 6 +- 16 files changed, 533 insertions(+), 265 deletions(-) diff --git a/channeldb/invoice_test.go b/channeldb/invoice_test.go index f624350e..61e0b5d0 100644 --- a/channeldb/invoice_test.go +++ b/channeldb/invoice_test.go @@ -99,7 +99,10 @@ func TestInvoiceWorkflow(t *testing.T) { // now have the settled bit toggle to true and a non-default // SettledDate payAmt := fakeInvoice.Terms.Value * 2 - if _, err := db.AcceptOrSettleInvoice(paymentHash, payAmt); err != nil { + _, err = db.AcceptOrSettleInvoice( + paymentHash, payAmt, checkHtlcParameters, + ) + if err != nil { t.Fatalf("unable to settle invoice: %v", err) } dbInvoice2, err := db.LookupInvoice(paymentHash) @@ -261,7 +264,9 @@ func TestInvoiceAddTimeSeries(t *testing.T) { paymentHash := invoice.Terms.PaymentPreimage.Hash() - _, err := db.AcceptOrSettleInvoice(paymentHash, 0) + _, err := db.AcceptOrSettleInvoice( + paymentHash, 0, checkHtlcParameters, + ) if err != nil { t.Fatalf("unable to settle invoice: %v", err) } @@ -342,7 +347,9 @@ func TestDuplicateSettleInvoice(t *testing.T) { } // With the invoice in the DB, we'll now attempt to settle the invoice. - dbInvoice, err := db.AcceptOrSettleInvoice(payHash, amt) + dbInvoice, err := db.AcceptOrSettleInvoice( + payHash, amt, checkHtlcParameters, + ) if err != nil { t.Fatalf("unable to settle invoice: %v", err) } @@ -362,7 +369,9 @@ func TestDuplicateSettleInvoice(t *testing.T) { // If we try to settle the invoice again, then we should get the very // same invoice back, but with an error this time. - dbInvoice, err = db.AcceptOrSettleInvoice(payHash, amt) + dbInvoice, err = db.AcceptOrSettleInvoice( + payHash, amt, checkHtlcParameters, + ) if err != ErrInvoiceAlreadySettled { t.Fatalf("expected ErrInvoiceAlreadySettled") } @@ -407,7 +416,10 @@ func TestQueryInvoices(t *testing.T) { // We'll only settle half of all invoices created. if i%2 == 0 { - if _, err := db.AcceptOrSettleInvoice(paymentHash, i); err != nil { + _, err := db.AcceptOrSettleInvoice( + paymentHash, i, checkHtlcParameters, + ) + if err != nil { t.Fatalf("unable to settle invoice: %v", err) } } @@ -648,3 +660,7 @@ func TestQueryInvoices(t *testing.T) { } } } + +func checkHtlcParameters(invoice *Invoice) error { + return nil +} diff --git a/channeldb/invoices.go b/channeldb/invoices.go index 47a8e199..95e7c9fb 100644 --- a/channeldb/invoices.go +++ b/channeldb/invoices.go @@ -631,8 +631,12 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) { // // When the preimage for the invoice is unknown (hold invoice), the invoice is // marked as accepted. +// +// TODO: Store invoice cltv as separate field in database so that it doesn't +// need to be decoded from the payment request. func (d *DB) AcceptOrSettleInvoice(paymentHash [32]byte, - amtPaid lnwire.MilliSatoshi) (*Invoice, error) { + amtPaid lnwire.MilliSatoshi, + checkHtlcParameters func(invoice *Invoice) error) (*Invoice, error) { var settledInvoice *Invoice err := d.Update(func(tx *bbolt.Tx) error { @@ -662,6 +666,7 @@ func (d *DB) AcceptOrSettleInvoice(paymentHash [32]byte, settledInvoice, err = acceptOrSettleInvoice( invoices, settleIndex, invoiceNum, amtPaid, + checkHtlcParameters, ) return err @@ -988,8 +993,10 @@ func deserializeInvoice(r io.Reader) (Invoice, error) { return invoice, nil } -func acceptOrSettleInvoice(invoices, settleIndex *bbolt.Bucket, invoiceNum []byte, - amtPaid lnwire.MilliSatoshi) (*Invoice, error) { +func acceptOrSettleInvoice(invoices, settleIndex *bbolt.Bucket, + invoiceNum []byte, amtPaid lnwire.MilliSatoshi, + checkHtlcParameters func(invoice *Invoice) error) ( + *Invoice, error) { invoice, err := fetchInvoice(invoiceNum, invoices) if err != nil { @@ -1007,6 +1014,13 @@ func acceptOrSettleInvoice(invoices, settleIndex *bbolt.Bucket, invoiceNum []byt return &invoice, ErrInvoiceAlreadyCanceled } + // If the invoice is still open, check the htlc parameters. + if err := checkHtlcParameters(&invoice); err != nil { + return &invoice, err + } + + // Check to see if we can settle or this is an hold invoice and we need + // to wait for the preimage. holdInvoice := invoice.Terms.PaymentPreimage == UnknownPreimage if holdInvoice { invoice.Terms.State = ContractAccepted diff --git a/contractcourt/htlc_incoming_contest_resolver.go b/contractcourt/htlc_incoming_contest_resolver.go index c1933e9e..9af694d6 100644 --- a/contractcourt/htlc_incoming_contest_resolver.go +++ b/contractcourt/htlc_incoming_contest_resolver.go @@ -135,28 +135,52 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { preimageSubscription := h.PreimageDB.SubscribeUpdates() defer preimageSubscription.CancelSubscription() - // Create a buffered hodl chan to prevent deadlock. - hodlChan := make(chan interface{}, 1) + // Define closure to process hodl events either direct or triggered by + // later notifcation. + processHodlEvent := func(e invoices.HodlEvent) (ContractResolver, + error) { - // Notify registry that we are potentially settling as exit hop - // on-chain, so that we will get a hodl event when a corresponding hodl - // invoice is settled. - event, err := h.Registry.NotifyExitHopHtlc(h.payHash, h.htlcAmt, hodlChan) - if err != nil && err != channeldb.ErrInvoiceNotFound { - return nil, err - } - defer h.Registry.HodlUnsubscribeAll(hodlChan) + if e.Preimage == nil { + log.Infof("%T(%v): Exit hop HTLC canceled "+ + "(expiry=%v, height=%v), abandoning", h, + h.htlcResolution.ClaimOutpoint, + h.htlcExpiry, currentHeight) - // If the htlc can be settled directly, we can progress to the inner - // resolver immediately. - if event != nil && event.Preimage != nil { - if err := applyPreimage(*event.Preimage); err != nil { + h.resolved = true + return nil, h.Checkpoint(h) + } + + if err := applyPreimage(*e.Preimage); err != nil { return nil, err } return &h.htlcSuccessResolver, nil } + // Create a buffered hodl chan to prevent deadlock. + hodlChan := make(chan interface{}, 1) + + // Notify registry that we are potentially resolving as an exit hop + // on-chain. If this HTLC indeed pays to an existing invoice, the + // invoice registry will tell us what to do with the HTLC. This is + // identical to HTLC resolution in the link. + event, err := h.Registry.NotifyExitHopHtlc( + h.payHash, h.htlcAmt, h.htlcExpiry, currentHeight, + hodlChan, + ) + switch err { + case channeldb.ErrInvoiceNotFound: + case nil: + defer h.Registry.HodlUnsubscribeAll(hodlChan) + + // Resolve the htlc directly if possible. + if event != nil { + return processHodlEvent(*event) + } + default: + return nil, err + } + // With the epochs and preimage subscriptions initialized, we'll query // to see if we already know the preimage. preimage, ok := h.PreimageDB.LookupPreimage(h.payHash) @@ -193,16 +217,7 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { case hodlItem := <-hodlChan: hodlEvent := hodlItem.(invoices.HodlEvent) - // Only process settle events. - if hodlEvent.Preimage == nil { - continue - } - - if err := applyPreimage(*event.Preimage); err != nil { - return nil, err - } - - return &h.htlcSuccessResolver, nil + return processHodlEvent(hodlEvent) case newBlock, ok := <-blockEpochs.Epochs: if !ok { @@ -211,8 +226,7 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) { // If this new height expires the HTLC, then this means // we never found out the preimage, so we can mark - // resolved and - // exit. + // resolved and exit. newHeight := uint32(newBlock.Height) if newHeight >= h.htlcExpiry { log.Infof("%T(%v): HTLC has timed out "+ diff --git a/contractcourt/htlc_success_resolver.go b/contractcourt/htlc_success_resolver.go index ce3ec056..f4005525 100644 --- a/contractcourt/htlc_success_resolver.go +++ b/contractcourt/htlc_success_resolver.go @@ -7,7 +7,6 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" - "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" @@ -78,9 +77,11 @@ func (h *htlcSuccessResolver) ResolverKey() []byte { } // Resolve attempts to resolve an unresolved incoming HTLC that we know the -// preimage to. If the HTLC is on the commitment of the remote party, then -// we'll simply sweep it directly. Otherwise, we'll hand this off to the utxo -// nursery to do its duty. +// preimage to. If the HTLC is on the commitment of the remote party, then we'll +// simply sweep it directly. Otherwise, we'll hand this off to the utxo nursery +// to do its duty. There is no need to make a call to the invoice registry +// anymore. Every HTLC has already passed through the incoming contest resolver +// and in there the invoice was already marked as settled. // // TODO(roasbeef): create multi to batch // @@ -177,19 +178,6 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { return nil, fmt.Errorf("quitting") } - // With the HTLC claimed, we can attempt to settle its - // corresponding invoice if we were the original destination. As - // the htlc is already settled at this point, we don't need to - // read on the hodl channel. - hodlChan := make(chan interface{}, 1) - _, err = h.Registry.NotifyExitHopHtlc( - h.payHash, h.htlcAmt, hodlChan, - ) - if err != nil && err != channeldb.ErrInvoiceNotFound { - log.Errorf("Unable to settle invoice with payment "+ - "hash %x: %v", h.payHash, err) - } - // Once the transaction has received a sufficient number of // confirmations, we'll mark ourselves as fully resolved and exit. h.resolved = true @@ -255,17 +243,6 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { return nil, fmt.Errorf("quitting") } - // With the HTLC claimed, we can attempt to settle its corresponding - // invoice if we were the original destination. As the htlc is already - // settled at this point, we don't need to read on the hodl - // channel. - hodlChan := make(chan interface{}, 1) - _, err = h.Registry.NotifyExitHopHtlc(h.payHash, h.htlcAmt, hodlChan) - if err != nil && err != channeldb.ErrInvoiceNotFound { - log.Errorf("Unable to settle invoice with payment "+ - "hash %x: %v", h.payHash, err) - } - h.resolved = true return nil, h.Checkpoint(h) } diff --git a/contractcourt/interfaces.go b/contractcourt/interfaces.go index fa702e9f..a18ee3d6 100644 --- a/contractcourt/interfaces.go +++ b/contractcourt/interfaces.go @@ -21,6 +21,7 @@ type Registry interface { // htlc should be resolved. If the htlc cannot be resolved immediately, // the resolution is sent on the passed in hodlChan later. NotifyExitHopHtlc(payHash lntypes.Hash, paidAmount lnwire.MilliSatoshi, + expiry uint32, currentHeight int32, hodlChan chan<- interface{}) (*invoices.HodlEvent, error) // HodlUnsubscribeAll unsubscribes from all hodl events. diff --git a/htlcswitch/interfaces.go b/htlcswitch/interfaces.go index a9dc2bc5..68350f62 100644 --- a/htlcswitch/interfaces.go +++ b/htlcswitch/interfaces.go @@ -24,6 +24,7 @@ type InvoiceDatabase interface { // htlc should be resolved. If the htlc cannot be resolved immediately, // the resolution is sent on the passed in hodlChan later. NotifyExitHopHtlc(payHash lntypes.Hash, paidAmount lnwire.MilliSatoshi, + expiry uint32, currentHeight int32, hodlChan chan<- interface{}) (*invoices.HodlEvent, error) // CancelInvoice attempts to cancel the invoice corresponding to the diff --git a/htlcswitch/link.go b/htlcswitch/link.go index 1c7cb11f..dc50b4fd 100644 --- a/htlcswitch/link.go +++ b/htlcswitch/link.go @@ -235,13 +235,6 @@ type ChannelLinkConfig struct { MinFeeUpdateTimeout time.Duration MaxFeeUpdateTimeout time.Duration - // FinalCltvRejectDelta defines the number of blocks before the expiry - // of the htlc where we no longer settle it as an exit hop and instead - // cancel it back. Normally this value should be lower than the cltv - // expiry of any invoice we create and the code effectuating this should - // not be hit. - FinalCltvRejectDelta uint32 - // OutgoingCltvRejectDelta defines the number of blocks before expiry of // an htlc where we don't offer an htlc anymore. This should be at least // the outgoing broadcast delta, because in any case we don't want to @@ -1173,15 +1166,12 @@ func (l *channelLink) processHodlEvent(hodlEvent invoices.HodlEvent, htlcs ...hodlHtlc) error { hash := hodlEvent.Hash - if hodlEvent.Preimage == nil { - l.debugf("Received hodl cancel event for %v", hash) - } else { - l.debugf("Received hodl settle event for %v", hash) - } // Determine required action for the resolution. var hodlAction func(htlc hodlHtlc) error if hodlEvent.Preimage != nil { + l.debugf("Received hodl settle event for %v", hash) + hodlAction = func(htlc hodlHtlc) error { return l.settleHTLC( *hodlEvent.Preimage, htlc.pd.HtlcIndex, @@ -1189,10 +1179,30 @@ func (l *channelLink) processHodlEvent(hodlEvent invoices.HodlEvent, ) } } else { + l.debugf("Received hodl cancel event for %v, reason=%v", + hash, hodlEvent.CancelReason) + hodlAction = func(htlc hodlHtlc) error { - failure := lnwire.NewFailUnknownPaymentHash( - htlc.pd.Amount, - ) + var failure lnwire.FailureMessage + switch hodlEvent.CancelReason { + + case invoices.CancelAmountTooLow: + fallthrough + case invoices.CancelInvoiceUnknown: + fallthrough + case invoices.CancelInvoiceCanceled: + failure = lnwire.NewFailUnknownPaymentHash( + htlc.pd.Amount, + ) + + case invoices.CancelExpiryTooSoon: + failure = lnwire.FailFinalExpiryTooSoon{} + + default: + return fmt.Errorf("unknown cancel reason: %v", + hodlEvent.CancelReason) + } + l.sendHTLCError( htlc.pd.HtlcIndex, failure, htlc.obfuscator, htlc.pd.SourceRef, @@ -2739,94 +2749,14 @@ func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor, return false, nil } - // First, we'll check the expiry of the HTLC itself against, the current - // block height. If the timeout is too soon, then we'll reject the HTLC. - if pd.Timeout <= heightNow+l.cfg.FinalCltvRejectDelta { - log.Errorf("htlc(%x) has an expiry that's too soon: expiry=%v"+ - ", best_height=%v", pd.RHash[:], pd.Timeout, heightNow) - - failure := lnwire.NewFinalExpiryTooSoon() - l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef) - - return true, nil - } - - // We're the designated payment destination. Therefore we attempt to - // see if we have an invoice locally which'll allow us to settle this - // htlc. - // - // Only the immutable data from LookupInvoice is used, because otherwise - // a race condition may be created with concurrent writes to the invoice - // registry. For example: cancelation of an invoice. - invoiceHash := lntypes.Hash(pd.RHash) - invoice, minCltvDelta, err := l.cfg.Registry.LookupInvoice(invoiceHash) - if err != nil { - log.Errorf("unable to query invoice registry: %v", err) - failure := lnwire.NewFailUnknownPaymentHash(pd.Amount) - l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef) - - return true, nil - } - - // If the invoice is already settled, we choose to accept the payment to - // simplify failure recovery. - // - // NOTE: Though our recovery and forwarding logic is predominately - // batched, settling invoices happens iteratively. We may reject one of - // two payments for the same rhash at first, but then restart and reject - // both after seeing that the invoice has been settled. Without any - // record of which one settles first, it is ambiguous as to which one - // actually settled the invoice. Thus, by accepting all payments, we - // eliminate the race condition that can lead to this inconsistency. - // - // TODO(conner): track ownership of settlements to properly recover from - // failures? or add batch invoice settlement - // - // TODO(joostjager): The log statement below is not always accurate, as - // the invoice may have been canceled after the LookupInvoice call. - // Leaving it as is for now, because fixing this would involve changing - // the signature of InvoiceRegistry.SettleInvoice just because of this - // log statement. - if invoice.Terms.State == channeldb.ContractSettled { - log.Warnf("Accepting duplicate payment for hash=%x", - pd.RHash[:]) - } - - // If we're not currently in debug mode, and the extended htlc doesn't - // meet the value requested, then we'll fail the htlc. Otherwise, we - // settle this htlc within our local state update log, then send the - // update entry to the remote party. - // - // NOTE: We make an exception when the value requested by the invoice is - // zero. This means the invoice allows the payee to specify the amount - // of satoshis they wish to send. So since we expect the htlc to have a - // different amount, we should not fail. - if !l.cfg.DebugHTLC && invoice.Terms.Value > 0 && - pd.Amount < invoice.Terms.Value { - - log.Errorf("rejecting htlc due to incorrect amount: expected "+ - "%v, received %v", invoice.Terms.Value, pd.Amount) - - failure := lnwire.NewFailUnknownPaymentHash(pd.Amount) - l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef) - - return true, nil - } - // As we're the exit hop, we'll double check the hop-payload included in // the HTLC to ensure that it was crafted correctly by the sender and // matches the HTLC we were extended. - // - // NOTE: We make an exception when the value requested by the invoice is - // zero. This means the invoice allows the payee to specify the amount - // of satoshis they wish to send. So since we expect the htlc to have a - // different amount, we should not fail. - if !l.cfg.DebugHTLC && invoice.Terms.Value > 0 && - fwdInfo.AmountToForward < invoice.Terms.Value { + if !l.cfg.DebugHTLC && pd.Amount != fwdInfo.AmountToForward { log.Errorf("Onion payload of incoming htlc(%x) has incorrect "+ "value: expected %v, got %v", pd.RHash, - invoice.Terms.Value, fwdInfo.AmountToForward) + pd.Amount, fwdInfo.AmountToForward) failure := lnwire.NewFailUnknownPaymentHash(pd.Amount) l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef) @@ -2835,27 +2765,11 @@ func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor, } // We'll also ensure that our time-lock value has been computed - // correctly. Only check the final cltv expiry for invoices when the - // invoice has not yet moved to the accepted state. Otherwise hodl htlcs - // would be canceled after a restart. - expectedHeight := heightNow + minCltvDelta - switch { - case !l.cfg.DebugHTLC && - invoice.Terms.State == channeldb.ContractOpen && - pd.Timeout < expectedHeight: - - log.Errorf("Incoming htlc(%x) has an expiration that is too "+ - "soon: expected at least %v, got %v", - pd.RHash[:], expectedHeight, pd.Timeout) - - failure := lnwire.FailFinalExpiryTooSoon{} - l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef) - - return true, nil - - case !l.cfg.DebugHTLC && pd.Timeout != fwdInfo.OutgoingCTLV: - log.Errorf("HTLC(%x) has incorrect time-lock: expected %v, "+ - "got %v", pd.RHash[:], pd.Timeout, fwdInfo.OutgoingCTLV) + // correctly. + if !l.cfg.DebugHTLC && pd.Timeout != fwdInfo.OutgoingCTLV { + log.Errorf("Onion payload of incoming htlc(%x) has incorrect "+ + "time-lock: expected %v, got %v", + pd.RHash[:], pd.Timeout, fwdInfo.OutgoingCTLV) failure := lnwire.NewFinalIncorrectCltvExpiry( fwdInfo.OutgoingCTLV, @@ -2868,10 +2782,27 @@ func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor, // Notify the invoiceRegistry of the exit hop htlc. If we crash right // after this, this code will be re-executed after restart. We will // receive back a resolution event. + invoiceHash := lntypes.Hash(pd.RHash) + event, err := l.cfg.Registry.NotifyExitHopHtlc( - invoiceHash, pd.Amount, l.hodlQueue.ChanIn(), + invoiceHash, pd.Amount, pd.Timeout, int32(heightNow), + l.hodlQueue.ChanIn(), ) - if err != nil { + + switch err { + + // Cancel htlc if we don't have an invoice for it. + case channeldb.ErrInvoiceNotFound: + failure := lnwire.NewFailUnknownPaymentHash(pd.Amount) + l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef) + + return true, nil + + // No error. + case nil: + + // Pass error to caller. + default: return false, err } diff --git a/htlcswitch/mock.go b/htlcswitch/mock.go index 9f62f6c4..be9c3994 100644 --- a/htlcswitch/mock.go +++ b/htlcswitch/mock.go @@ -766,7 +766,9 @@ func newMockRegistry(minDelta uint32) *mockInvoiceRegistry { return testInvoiceCltvExpiry, nil } - registry := invoices.NewRegistry(cdb, decodeExpiry) + finalCltvRejectDelta := int32(5) + + registry := invoices.NewRegistry(cdb, decodeExpiry, finalCltvRejectDelta) registry.Start() return &mockInvoiceRegistry{ @@ -784,10 +786,12 @@ func (i *mockInvoiceRegistry) SettleHodlInvoice(preimage lntypes.Preimage) error } func (i *mockInvoiceRegistry) NotifyExitHopHtlc(rhash lntypes.Hash, - amt lnwire.MilliSatoshi, hodlChan chan<- interface{}) ( - *invoices.HodlEvent, error) { + amt lnwire.MilliSatoshi, expiry uint32, currentHeight int32, + hodlChan chan<- interface{}) (*invoices.HodlEvent, error) { - event, err := i.registry.NotifyExitHopHtlc(rhash, amt, hodlChan) + event, err := i.registry.NotifyExitHopHtlc( + rhash, amt, expiry, currentHeight, hodlChan, + ) if err != nil { return nil, err } diff --git a/htlcswitch/test_utils.go b/htlcswitch/test_utils.go index bc1035b3..d749eaa2 100644 --- a/htlcswitch/test_utils.go +++ b/htlcswitch/test_utils.go @@ -1056,7 +1056,6 @@ func (h *hopNetwork) createChannelLink(server, peer *mockServer, MinFeeUpdateTimeout: minFeeUpdateTimeout, MaxFeeUpdateTimeout: maxFeeUpdateTimeout, OnChannelFailure: func(lnwire.ChannelID, lnwire.ShortChannelID, LinkFailureError) {}, - FinalCltvRejectDelta: 5, OutgoingCltvRejectDelta: 3, }, channel, diff --git a/invoices/invoiceregistry.go b/invoices/invoiceregistry.go index 172eb075..5fe17d04 100644 --- a/invoices/invoiceregistry.go +++ b/invoices/invoiceregistry.go @@ -2,6 +2,7 @@ package invoices import ( "bytes" + "errors" "fmt" "sync" "sync/atomic" @@ -27,12 +28,64 @@ var ( DebugHash = DebugPre.Hash() ) +// HtlcCancelReason defines reasons for which htlcs can be canceled. +type HtlcCancelReason uint8 + +const ( + // CancelInvoiceUnknown is returned if the preimage is unknown. + CancelInvoiceUnknown HtlcCancelReason = iota + + // CancelExpiryTooSoon is returned when the timelock of the htlc + // does not satisfy the invoice cltv expiry requirement. + CancelExpiryTooSoon + + // CancelInvoiceCanceled is returned when the invoice is already + // canceled and can't be paid to anymore. + CancelInvoiceCanceled + + // CancelAmountTooLow is returned when the amount paid is too low. + CancelAmountTooLow +) + +// String returns a human readable identifier for the cancel reason. +func (r HtlcCancelReason) String() string { + switch r { + case CancelInvoiceUnknown: + return "InvoiceUnknown" + case CancelExpiryTooSoon: + return "ExpiryTooSoon" + case CancelInvoiceCanceled: + return "InvoiceCanceled" + case CancelAmountTooLow: + return "CancelAmountTooLow" + default: + return "Unknown" + } +} + +var ( + // ErrInvoiceExpiryTooSoon is returned when an invoice is attempted to be + // accepted or settled with not enough blocks remaining. + ErrInvoiceExpiryTooSoon = errors.New("invoice expiry too soon") + + // ErrInvoiceAmountTooLow is returned when an invoice is attempted to be + // accepted or settled with an amount that is too low. + ErrInvoiceAmountTooLow = errors.New("paid amount less than invoice amount") +) + // HodlEvent describes how an htlc should be resolved. If HodlEvent.Preimage is // set, the event indicates a settle event. If Preimage is nil, it is a cancel // event. type HodlEvent struct { + // Preimage is the htlc preimage. Its value is nil in case of a cancel. Preimage *lntypes.Preimage - Hash lntypes.Hash + + // Hash is the htlc hash. + Hash lntypes.Hash + + // CancelReason specifies the reason why invoice registry decided to + // cancel the htlc. + CancelReason HtlcCancelReason } // InvoiceRegistry is a central registry of all the outstanding invoices @@ -70,6 +123,13 @@ type InvoiceRegistry struct { // is used to unsubscribe from all hashes efficiently. hodlReverseSubscriptions map[chan<- interface{}]map[lntypes.Hash]struct{} + // finalCltvRejectDelta defines the number of blocks before the expiry + // of the htlc where we no longer settle it as an exit hop and instead + // cancel it back. Normally this value should be lower than the cltv + // expiry of any invoice we create and the code effectuating this should + // not be hit. + finalCltvRejectDelta int32 + wg sync.WaitGroup quit chan struct{} } @@ -79,7 +139,7 @@ type InvoiceRegistry struct { // layer. The in-memory layer is in place such that debug invoices can be added // which are volatile yet available system wide within the daemon. func NewRegistry(cdb *channeldb.DB, decodeFinalCltvExpiry func(invoice string) ( - uint32, error)) *InvoiceRegistry { + uint32, error), finalCltvRejectDelta int32) *InvoiceRegistry { return &InvoiceRegistry{ cdb: cdb, @@ -93,6 +153,7 @@ func NewRegistry(cdb *channeldb.DB, decodeFinalCltvExpiry func(invoice string) ( hodlSubscriptions: make(map[lntypes.Hash]map[chan<- interface{}]struct{}), hodlReverseSubscriptions: make(map[chan<- interface{}]map[lntypes.Hash]struct{}), decodeFinalCltvExpiry: decodeFinalCltvExpiry, + finalCltvRejectDelta: finalCltvRejectDelta, quit: make(chan struct{}), } } @@ -460,6 +521,35 @@ func (i *InvoiceRegistry) LookupInvoice(rHash lntypes.Hash) (channeldb.Invoice, return invoice, expiry, nil } +// checkHtlcParameters is a callback used inside invoice db transactions to +// atomically check-and-update an invoice. +func (i *InvoiceRegistry) checkHtlcParameters(invoice *channeldb.Invoice, + amtPaid lnwire.MilliSatoshi, htlcExpiry uint32, currentHeight int32) error { + + expiry, err := i.decodeFinalCltvExpiry(string(invoice.PaymentRequest)) + if err != nil { + return err + } + + if htlcExpiry < uint32(currentHeight+i.finalCltvRejectDelta) { + return ErrInvoiceExpiryTooSoon + } + + if htlcExpiry < uint32(currentHeight)+expiry { + return ErrInvoiceExpiryTooSoon + } + + // If an invoice amount is specified, check that enough is paid. This + // check is only performed for open invoices. Once a sufficiently large + // payment has been made and the invoice is in the accepted or settled + // state, any amount will be accepted on top of that. + if invoice.Terms.Value > 0 && amtPaid < invoice.Terms.Value { + return ErrInvoiceAmountTooLow + } + + return nil +} + // NotifyExitHopHtlc attempts to mark an invoice as settled. If the invoice is a // debug invoice, then this method is a noop as debug invoices are never fully // settled. The return value describes how the htlc should be resolved. @@ -472,59 +562,112 @@ func (i *InvoiceRegistry) LookupInvoice(rHash lntypes.Hash) (channeldb.Invoice, // the channel is either buffered or received on from another goroutine to // prevent deadlock. func (i *InvoiceRegistry) NotifyExitHopHtlc(rHash lntypes.Hash, - amtPaid lnwire.MilliSatoshi, hodlChan chan<- interface{}) ( - *HodlEvent, error) { + amtPaid lnwire.MilliSatoshi, expiry uint32, currentHeight int32, + hodlChan chan<- interface{}) (*HodlEvent, error) { i.Lock() defer i.Unlock() - log.Debugf("Invoice(%x): htlc accepted", rHash[:]) - - createEvent := func(preimage *lntypes.Preimage) *HodlEvent { - return &HodlEvent{ - Hash: rHash, - Preimage: preimage, - } + debugLog := func(s string) { + log.Debugf("Invoice(%x): %v, amt=%v, expiry=%v", + rHash[:], s, amtPaid, expiry) } - // First check the in-memory debug invoice index to see if this is an // existing invoice added for debugging. if invoice, ok := i.debugInvoices[rHash]; ok { + debugLog("payment to debug invoice accepted") + // Debug invoices are never fully settled, so we just settle the // htlc in this case. - return createEvent(&invoice.Terms.PaymentPreimage), nil + return &HodlEvent{ + Hash: rHash, + Preimage: &invoice.Terms.PaymentPreimage, + }, nil } // If this isn't a debug invoice, then we'll attempt to settle an // invoice matching this rHash on disk (if one exists). - invoice, err := i.cdb.AcceptOrSettleInvoice(rHash, amtPaid) + invoice, err := i.cdb.AcceptOrSettleInvoice( + rHash, amtPaid, + func(inv *channeldb.Invoice) error { + return i.checkHtlcParameters( + inv, amtPaid, expiry, currentHeight, + ) + }, + ) switch err { // If invoice is already settled, settle htlc. This means we accept more // payments to the same invoice hash. + // + // NOTE: Though our recovery and forwarding logic is predominately + // batched, settling invoices happens iteratively. We may reject one of + // two payments for the same rhash at first, but then restart and reject + // both after seeing that the invoice has been settled. Without any + // record of which one settles first, it is ambiguous as to which one + // actually settled the invoice. Thus, by accepting all payments, we + // eliminate the race condition that can lead to this inconsistency. + // + // TODO(conner): track ownership of settlements to properly recover from + // failures? or add batch invoice settlement case channeldb.ErrInvoiceAlreadySettled: - return createEvent(&invoice.Terms.PaymentPreimage), nil + debugLog("accepting duplicate payment to settled invoice") + + return &HodlEvent{ + Hash: rHash, + Preimage: &invoice.Terms.PaymentPreimage, + }, nil // If invoice is already canceled, cancel htlc. case channeldb.ErrInvoiceAlreadyCanceled: - return createEvent(nil), nil + debugLog("invoice already canceled") + + return &HodlEvent{ + Hash: rHash, + CancelReason: CancelInvoiceCanceled, + }, nil // If invoice is already accepted, add this htlc to the list of // subscribers. case channeldb.ErrInvoiceAlreadyAccepted: + debugLog("accepting duplicate payment to accepted invoice") + i.hodlSubscribe(hodlChan, rHash) return nil, nil + // If there are not enough blocks left, cancel the htlc. + case ErrInvoiceExpiryTooSoon: + debugLog("expiry too soon") + + return &HodlEvent{ + Hash: rHash, + CancelReason: CancelExpiryTooSoon, + }, nil + + // If there are not enough blocks left, cancel the htlc. + case ErrInvoiceAmountTooLow: + debugLog("amount too low") + + return &HodlEvent{ + Hash: rHash, + CancelReason: CancelAmountTooLow, + }, nil + // If this call settled the invoice, settle the htlc. Otherwise // subscribe for a future hodl event. case nil: i.notifyClients(rHash, invoice, invoice.Terms.State) switch invoice.Terms.State { case channeldb.ContractSettled: - log.Debugf("Invoice(%x): settled", rHash[:]) + debugLog("settled") - return createEvent(&invoice.Terms.PaymentPreimage), nil + return &HodlEvent{ + Hash: rHash, + Preimage: &invoice.Terms.PaymentPreimage, + }, nil case channeldb.ContractAccepted: + debugLog("accepted") + // Subscribe to updates to this invoice. i.hodlSubscribe(hodlChan, rHash) return nil, nil @@ -534,6 +677,8 @@ func (i *InvoiceRegistry) NotifyExitHopHtlc(rHash lntypes.Hash, } default: + debugLog(err.Error()) + return nil, err } } @@ -584,7 +729,8 @@ func (i *InvoiceRegistry) CancelInvoice(payHash lntypes.Hash) error { log.Debugf("Invoice(%v): canceled", payHash) i.notifyHodlSubscribers(HodlEvent{ - Hash: payHash, + Hash: payHash, + CancelReason: CancelInvoiceCanceled, }) i.notifyClients(payHash, invoice, channeldb.ContractCanceled) diff --git a/invoices/invoiceregistry_test.go b/invoices/invoiceregistry_test.go index 8ffc780c..cb554faa 100644 --- a/invoices/invoiceregistry_test.go +++ b/invoices/invoiceregistry_test.go @@ -6,11 +6,9 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/chaincfg" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" - "github.com/lightningnetwork/lnd/zpay32" ) var ( @@ -23,18 +21,15 @@ var ( hash = preimage.Hash() - // testPayReq is a dummy payment request that does parse properly. It - // has no relation with the real invoice parameters and isn't asserted - // on in this test. LookupInvoice requires this to have a valid value. - testPayReq = "lnbc500u1pwywxzwpp5nd2u9xzq02t0tuf2654as7vma42lwkcjptx4yzfq0umq4swpa7cqdqqcqzysmlpc9ewnydr8rr8dnltyxphdyf6mcqrsd6dml8zajtyhwe6a45d807kxtmzayuf0hh2d9tn478ecxkecdg7c5g85pntupug5kakm7xcpn63zqk" + testInvoiceExpiry = uint32(3) + + testCurrentHeight = int32(0) + + testFinalCltvRejectDelta = int32(3) ) func decodeExpiry(payReq string) (uint32, error) { - invoice, err := zpay32.Decode(payReq, &chaincfg.MainNetParams) - if err != nil { - return 0, err - } - return uint32(invoice.MinFinalCLTVExpiry()), nil + return uint32(testInvoiceExpiry), nil } var ( @@ -43,7 +38,6 @@ var ( PaymentPreimage: preimage, Value: lnwire.MilliSatoshi(100000), }, - PaymentRequest: []byte(testPayReq), } ) @@ -54,7 +48,7 @@ func newTestContext(t *testing.T) (*InvoiceRegistry, func()) { } // Instantiate and start the invoice registry. - registry := NewRegistry(cdb, decodeExpiry) + registry := NewRegistry(cdb, decodeExpiry, testFinalCltvRejectDelta) err = registry.Start() if err != nil { @@ -121,7 +115,9 @@ func TestSettleInvoice(t *testing.T) { // Settle invoice with a slightly higher amount. amtPaid := lnwire.MilliSatoshi(100500) - _, err = registry.NotifyExitHopHtlc(hash, amtPaid, hodlChan) + _, err = registry.NotifyExitHopHtlc( + hash, amtPaid, testInvoiceExpiry, 0, hodlChan, + ) if err != nil { t.Fatal(err) } @@ -153,13 +149,18 @@ func TestSettleInvoice(t *testing.T) { } // Try to settle again. - _, err = registry.NotifyExitHopHtlc(hash, amtPaid, hodlChan) + _, err = registry.NotifyExitHopHtlc( + hash, amtPaid, testInvoiceExpiry, testCurrentHeight, hodlChan, + ) if err != nil { t.Fatal("expected duplicate settle to succeed") } // Try to settle again with a different amount. - _, err = registry.NotifyExitHopHtlc(hash, amtPaid+600, hodlChan) + _, err = registry.NotifyExitHopHtlc( + hash, amtPaid+600, testInvoiceExpiry, testCurrentHeight, + hodlChan, + ) if err != nil { t.Fatal("expected duplicate settle to succeed") } @@ -274,7 +275,9 @@ func TestCancelInvoice(t *testing.T) { // Notify arrival of a new htlc paying to this invoice. This should // succeed. hodlChan := make(chan interface{}) - event, err := registry.NotifyExitHopHtlc(hash, amt, hodlChan) + event, err := registry.NotifyExitHopHtlc( + hash, amt, testInvoiceExpiry, testCurrentHeight, hodlChan, + ) if err != nil { t.Fatal("expected settlement of a canceled invoice to succeed") } @@ -292,7 +295,7 @@ func TestHoldInvoice(t *testing.T) { defer cleanup() // Instantiate and start the invoice registry. - registry := NewRegistry(cdb, decodeExpiry) + registry := NewRegistry(cdb, decodeExpiry, testFinalCltvRejectDelta) err = registry.Start() if err != nil { @@ -345,7 +348,9 @@ func TestHoldInvoice(t *testing.T) { // NotifyExitHopHtlc without a preimage present in the invoice registry // should be possible. - event, err := registry.NotifyExitHopHtlc(hash, amtPaid, hodlChan) + event, err := registry.NotifyExitHopHtlc( + hash, amtPaid, testInvoiceExpiry, testCurrentHeight, hodlChan, + ) if err != nil { t.Fatalf("expected settle to succeed but got %v", err) } @@ -354,7 +359,9 @@ func TestHoldInvoice(t *testing.T) { } // Test idempotency. - event, err = registry.NotifyExitHopHtlc(hash, amtPaid, hodlChan) + event, err = registry.NotifyExitHopHtlc( + hash, amtPaid, testInvoiceExpiry, testCurrentHeight, hodlChan, + ) if err != nil { t.Fatalf("expected settle to succeed but got %v", err) } @@ -434,3 +441,24 @@ func newDB() (*channeldb.DB, func(), error) { return cdb, cleanUp, nil } + +// TestUnknownInvoice tests that invoice registry returns an error when the +// invoice is unknown. This is to guard against returning a cancel hodl event +// for forwarded htlcs. In the link, NotifyExitHopHtlc is only called if we are +// the exit hop, but in htlcIncomingContestResolver it is called with forwarded +// htlc hashes as well. +func TestUnknownInvoice(t *testing.T) { + registry, cleanup := newTestContext(t) + defer cleanup() + + // Notify arrival of a new htlc paying to this invoice. This should + // succeed. + hodlChan := make(chan interface{}) + amt := lnwire.MilliSatoshi(100000) + _, err := registry.NotifyExitHopHtlc( + hash, amt, testInvoiceExpiry, testCurrentHeight, hodlChan, + ) + if err != channeldb.ErrInvoiceNotFound { + t.Fatal("expected invoice not found error") + } +} diff --git a/lnd_multi-hop_htlc_local_chain_claim_test.go b/lnd_multi-hop_htlc_local_chain_claim_test.go index b3453a03..dfea8768 100644 --- a/lnd_multi-hop_htlc_local_chain_claim_test.go +++ b/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -10,7 +10,9 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntypes" ) // testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if @@ -24,21 +26,29 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, true, + t, net, false, ) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) - // With the network active, we'll now add a new invoice at Carol's end. - invoiceReq := &lnrpc.Invoice{ - Value: 100000, + // With the network active, we'll now add a new hodl invoice at Carol's + // end. Make sure the cltv expiry delta is large enough, otherwise Bob + // won't send out the outgoing htlc. + + const invoiceAmt = 100000 + preimage := lntypes.Preimage{1, 2, 3} + payHash := preimage.Hash() + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Value: invoiceAmt, CltvExpiry: 40, + Hash: payHash[:], } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq) + ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq) if err != nil { - t.Fatalf("unable to generate carol invoice: %v", err) + t.Fatalf("unable to add invoice: %v", err) } // Now that we've created the invoice, we'll send a single payment from @@ -58,12 +68,12 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) t.Fatalf("unable to send payment: %v", err) } - // We'll now wait until all 3 nodes have the HTLC as just sent fully - // locked in. + // At this point, all 3 nodes should now have an active channel with + // the created HTLC pending on all of them. var predErr error nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} err = lntest.WaitPredicate(func() bool { - predErr = assertActiveHtlcs(nodes, carolInvoice.RHash) + predErr = assertActiveHtlcs(nodes, payHash[:]) if predErr != nil { return false } @@ -71,9 +81,14 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) return true }, time.Second*15) if err != nil { - t.Fatalf("htlc mismatch: %v", err) + t.Fatalf("htlc mismatch: %v", predErr) } + // Wait for carol to mark invoice as accepted. There is a small gap to + // bridge between adding the htlc to the channel and executing the exit + // hop logic. + waitForInvoiceAccepted(t, carol, payHash) + // At this point, Bob decides that he wants to exit the channel // immediately, so he force closes his commitment transaction. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) @@ -87,6 +102,26 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) err) } + // Suspend Bob to force Carol to go to chain. + restartBob, err := net.SuspendNode(net.Bob) + if err != nil { + t.Fatalf("unable to suspend bob: %v", err) + } + + // Settle invoice. This will just mark the invoice as settled, as there + // is no link anymore to remove the htlc from the commitment tx. For + // this test, it is important to actually settle and not leave the + // invoice in the accepted state, because without a known preimage, the + // channel arbitrator won't go to chain. + ctx, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + _, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{ + Preimage: preimage[:], + }) + if err != nil { + t.Fatalf("settle invoice: %v", err) + } + // We'll now mine enough blocks so Carol decides that she needs to go // on-chain to claim the HTLC as Bob has been inactive. numBlocks := uint32(invoiceReq.CltvExpiry - @@ -129,6 +164,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) } assertTxInBlock(t, block, commitHash) + // Restart bob again. + if err := restartBob(); err != nil { + t.Fatalf("unable to restart bob: %v", err) + } + // After the force close transacion is mined, Carol should broadcast // her second level HTLC transacion. Bob will broadcast a sweep tx to // sweep his output in the channel with Carol. He can do this @@ -352,3 +392,30 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) t.Fatalf(predErr.Error()) } } + +// waitForInvoiceAccepted waits until the specified invoice moved to the +// accepted state by the node. +func waitForInvoiceAccepted(t *harnessTest, node *lntest.HarnessNode, + payHash lntypes.Hash) { + + ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) + defer cancel() + invoiceUpdates, err := node.SubscribeSingleInvoice(ctx, + &invoicesrpc.SubscribeSingleInvoiceRequest{ + RHash: payHash[:], + }, + ) + if err != nil { + t.Fatalf("subscribe single invoice: %v", err) + } + + for { + update, err := invoiceUpdates.Recv() + if err != nil { + t.Fatalf("invoice update err: %v", err) + } + if update.State == lnrpc.Invoice_ACCEPTED { + break + } + } +} diff --git a/lnd_multi-hop_htlc_receiver_chain_claim_test.go b/lnd_multi-hop_htlc_receiver_chain_claim_test.go index f629be63..8bdd5ed4 100644 --- a/lnd_multi-hop_htlc_receiver_chain_claim_test.go +++ b/lnd_multi-hop_htlc_receiver_chain_claim_test.go @@ -11,7 +11,9 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntypes" ) // testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the @@ -26,24 +28,29 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, true, + t, net, false, ) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) - // With the network active, we'll now add a new invoice at Carol's end. - // Make sure the cltv expiry delta is large enough, otherwise Bob won't - // send out the outgoing htlc. + // With the network active, we'll now add a new hodl invoice at Carol's + // end. Make sure the cltv expiry delta is large enough, otherwise Bob + // won't send out the outgoing htlc. + const invoiceAmt = 100000 - invoiceReq := &lnrpc.Invoice{ + preimage := lntypes.Preimage{1, 2, 4} + payHash := preimage.Hash() + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ Value: invoiceAmt, CltvExpiry: 40, + Hash: payHash[:], } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq) + ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq) if err != nil { - t.Fatalf("unable to generate carol invoice: %v", err) + t.Fatalf("unable to add invoice: %v", err) } // Now that we've created the invoice, we'll send a single payment from @@ -68,7 +75,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) var predErr error nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} err = lntest.WaitPredicate(func() bool { - predErr = assertActiveHtlcs(nodes, carolInvoice.RHash) + predErr = assertActiveHtlcs(nodes, payHash[:]) if predErr != nil { return false } @@ -79,6 +86,30 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) t.Fatalf("htlc mismatch: %v", predErr) } + // Wait for carol to mark invoice as accepted. There is a small gap to + // bridge between adding the htlc to the channel and executing the exit + // hop logic. + waitForInvoiceAccepted(t, carol, payHash) + + restartBob, err := net.SuspendNode(net.Bob) + if err != nil { + t.Fatalf("unable to suspend bob: %v", err) + } + + // Settle invoice. This will just mark the invoice as settled, as there + // is no link anymore to remove the htlc from the commitment tx. For + // this test, it is important to actually settle and not leave the + // invoice in the accepted state, because without a known preimage, the + // channel arbitrator won't go to chain. + ctx, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + _, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{ + Preimage: preimage[:], + }) + if err != nil { + t.Fatalf("settle invoice: %v", err) + } + // Now we'll mine enough blocks to prompt carol to actually go to the // chain in order to sweep her HTLC since the value is high enough. // TODO(roasbeef): modify once go to chain policy changes @@ -123,6 +154,11 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) // Confirm the commitment. mineBlocks(t, net, 1, 1) + // Restart bob again. + if err := restartBob(); err != nil { + t.Fatalf("unable to restart bob: %v", err) + } + // After the force close transaction is mined, Carol should broadcast // her second level HTLC transaction. Bob will broadcast a sweep tx to // sweep his output in the channel with Carol. When Bob notices Carol's diff --git a/lnd_multi-hop_htlc_remote_chain_claim_test.go b/lnd_multi-hop_htlc_remote_chain_claim_test.go index 7493ead4..cb5bb97f 100644 --- a/lnd_multi-hop_htlc_remote_chain_claim_test.go +++ b/lnd_multi-hop_htlc_remote_chain_claim_test.go @@ -10,7 +10,9 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntypes" ) // testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario, @@ -24,22 +26,28 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // Carol refusing to actually settle or directly cancel any HTLC's // self. aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork( - t, net, true, + t, net, false, ) // Clean up carol's node when the test finishes. defer shutdownAndAssert(net, t, carol) - // With the network active, we'll now add a new invoice at Carol's end. + // With the network active, we'll now add a new hodl invoice at Carol's + // end. Make sure the cltv expiry delta is large enough, otherwise Bob + // won't send out the outgoing htlc. const invoiceAmt = 100000 - invoiceReq := &lnrpc.Invoice{ + preimage := lntypes.Preimage{1, 2, 5} + payHash := preimage.Hash() + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ Value: invoiceAmt, CltvExpiry: 40, + Hash: payHash[:], } - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq) + ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq) if err != nil { - t.Fatalf("unable to generate carol invoice: %v", err) + t.Fatalf("unable to add invoice: %v", err) } // Now that we've created the invoice, we'll send a single payment from @@ -59,12 +67,12 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest t.Fatalf("unable to send payment: %v", err) } - // We'll now wait until all 3 nodes have the HTLC as just sent fully - // locked in. + // At this point, all 3 nodes should now have an active channel with + // the created HTLC pending on all of them. var predErr error nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol} err = lntest.WaitPredicate(func() bool { - predErr = assertActiveHtlcs(nodes, carolInvoice.RHash) + predErr = assertActiveHtlcs(nodes, payHash[:]) if predErr != nil { return false } @@ -72,9 +80,14 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest return true }, time.Second*15) if err != nil { - t.Fatalf("htlc mismatch: %v", err) + t.Fatalf("htlc mismatch: %v", predErr) } + // Wait for carol to mark invoice as accepted. There is a small gap to + // bridge between adding the htlc to the channel and executing the exit + // hop logic. + waitForInvoiceAccepted(t, carol, payHash) + // Next, Alice decides that she wants to exit the channel, so she'll // immediately force close the channel by broadcast her commitment // transaction. @@ -102,6 +115,26 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest t.Fatalf("unable to find sweeping tx in mempool: %v", err) } + // Suspend bob, so Carol is forced to go on chain. + restartBob, err := net.SuspendNode(net.Bob) + if err != nil { + t.Fatalf("unable to suspend bob: %v", err) + } + + // Settle invoice. This will just mark the invoice as settled, as there + // is no link anymore to remove the htlc from the commitment tx. For + // this test, it is important to actually settle and not leave the + // invoice in the accepted state, because without a known preimage, the + // channel arbitrator won't go to chain. + ctx, cancel = context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + _, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{ + Preimage: preimage[:], + }) + if err != nil { + t.Fatalf("settle invoice: %v", err) + } + // We'll now mine enough blocks so Carol decides that she needs to go // on-chain to claim the HTLC as Bob has been inactive. numBlocks := uint32(invoiceReq.CltvExpiry- @@ -144,6 +177,11 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest } assertTxInBlock(t, block, commitHash) + // Restart bob again. + if err := restartBob(); err != nil { + t.Fatalf("unable to restart bob: %v", err) + } + // After the force close transacion is mined, Carol should broadcast // her second level HTLC transacion. Bob will broadcast a sweep tx to // sweep his output in the channel with Carol. He can do this diff --git a/peer.go b/peer.go index 4c65290f..0b36f63a 100644 --- a/peer.go +++ b/peer.go @@ -196,10 +196,6 @@ type peer struct { // remote node. localFeatures *lnwire.RawFeatureVector - // finalCltvRejectDelta defines the number of blocks before the expiry - // of the htlc where we no longer settle it as an exit hop. - finalCltvRejectDelta uint32 - // outgoingCltvRejectDelta defines the number of blocks before expiry of // an htlc where we don't offer an htlc anymore. outgoingCltvRejectDelta uint32 @@ -242,7 +238,7 @@ func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server, addr *lnwire.NetAddress, inbound bool, localFeatures *lnwire.RawFeatureVector, chanActiveTimeout time.Duration, - finalCltvRejectDelta, outgoingCltvRejectDelta uint32) ( + outgoingCltvRejectDelta uint32) ( *peer, error) { nodePub := addr.IdentityKey @@ -258,7 +254,6 @@ func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server, localFeatures: localFeatures, - finalCltvRejectDelta: finalCltvRejectDelta, outgoingCltvRejectDelta: outgoingCltvRejectDelta, sendQueue: make(chan outgoingMsg), @@ -598,7 +593,6 @@ func (p *peer) addLink(chanPoint *wire.OutPoint, UnsafeReplay: cfg.UnsafeReplay, MinFeeUpdateTimeout: htlcswitch.DefaultMinLinkFeeUpdateTimeout, MaxFeeUpdateTimeout: htlcswitch.DefaultMaxLinkFeeUpdateTimeout, - FinalCltvRejectDelta: p.finalCltvRejectDelta, OutgoingCltvRejectDelta: p.outgoingCltvRejectDelta, } diff --git a/server.go b/server.go index 29a6b541..72cbb1e3 100644 --- a/server.go +++ b/server.go @@ -342,7 +342,10 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl, readPool: readPool, chansToRestore: chansToRestore, - invoices: invoices.NewRegistry(chanDB, decodeFinalCltvExpiry), + invoices: invoices.NewRegistry( + chanDB, decodeFinalCltvExpiry, + defaultFinalCltvRejectDelta, + ), channelNotifier: channelnotifier.New(chanDB), @@ -2556,7 +2559,6 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq, p, err := newPeer( conn, connReq, s, peerAddr, inbound, localFeatures, cfg.ChanEnableTimeout, - defaultFinalCltvRejectDelta, defaultOutgoingCltvRejectDelta, ) if err != nil { From 16ff4e3ffaf3ce6b62002683902293d9545f7ef4 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 16 Apr 2019 10:22:04 +0200 Subject: [PATCH 11/14] cnct/test: extend mockWitnessBeacon --- contractcourt/htlc_timeout_resolver_test.go | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/contractcourt/htlc_timeout_resolver_test.go b/contractcourt/htlc_timeout_resolver_test.go index c83d66c9..7dcf025a 100644 --- a/contractcourt/htlc_timeout_resolver_test.go +++ b/contractcourt/htlc_timeout_resolver_test.go @@ -29,8 +29,16 @@ func (m *mockSigner) ComputeInputScript(tx *wire.MsgTx, type mockWitnessBeacon struct { preImageUpdates chan lntypes.Preimage + newPreimages chan []lntypes.Preimage + lookupPreimage map[lntypes.Hash]lntypes.Preimage +} - newPreimages chan []lntypes.Preimage +func newMockWitnessBeacon() *mockWitnessBeacon { + return &mockWitnessBeacon{ + preImageUpdates: make(chan lntypes.Preimage, 1), + newPreimages: make(chan []lntypes.Preimage), + lookupPreimage: make(map[lntypes.Hash]lntypes.Preimage), + } } func (m *mockWitnessBeacon) SubscribeUpdates() *WitnessSubscription { @@ -41,7 +49,11 @@ func (m *mockWitnessBeacon) SubscribeUpdates() *WitnessSubscription { } func (m *mockWitnessBeacon) LookupPreimage(payhash lntypes.Hash) (lntypes.Preimage, bool) { - return lntypes.Preimage{}, false + preimage, ok := m.lookupPreimage[payhash] + if !ok { + return lntypes.Preimage{}, false + } + return preimage, true } func (m *mockWitnessBeacon) AddPreimages(preimages ...lntypes.Preimage) error { @@ -190,10 +202,7 @@ func TestHtlcTimeoutResolver(t *testing.T) { spendChan: make(chan *chainntnfs.SpendDetail), confChan: make(chan *chainntnfs.TxConfirmation), } - witnessBeacon := &mockWitnessBeacon{ - preImageUpdates: make(chan lntypes.Preimage, 1), - newPreimages: make(chan []lntypes.Preimage), - } + witnessBeacon := newMockWitnessBeacon() for _, testCase := range testCases { t.Logf("Running test case: %v", testCase.name) From 3d17c2bcfe93d6771ea942ca84c4c5f362d959ad Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Tue, 16 Apr 2019 11:33:30 +0200 Subject: [PATCH 12/14] cnct/test: add incoming contest resolver test --- contractcourt/htlc_incoming_resolver_test.go | 274 +++++++++++++++++++ contractcourt/mock_registry_test.go | 45 +++ contractcourt/utils_test.go | 26 ++ 3 files changed, 345 insertions(+) create mode 100644 contractcourt/htlc_incoming_resolver_test.go create mode 100644 contractcourt/mock_registry_test.go create mode 100644 contractcourt/utils_test.go diff --git a/contractcourt/htlc_incoming_resolver_test.go b/contractcourt/htlc_incoming_resolver_test.go new file mode 100644 index 00000000..5f623428 --- /dev/null +++ b/contractcourt/htlc_incoming_resolver_test.go @@ -0,0 +1,274 @@ +package contractcourt + +import ( + "bytes" + "testing" + + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/invoices" + "github.com/lightningnetwork/lnd/lnwallet" + + "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/lntypes" +) + +const ( + testInitialBlockHeight = 100 + testHtlcExpiry = 150 +) + +var ( + testResPreimage = lntypes.Preimage{1, 2, 3} + testResHash = testResPreimage.Hash() +) + +// TestHtlcIncomingResolverFwdPreimageKnown tests resolution of a forwarded htlc +// for which the preimage is already known initially. +func TestHtlcIncomingResolverFwdPreimageKnown(t *testing.T) { + t.Parallel() + defer timeout(t)() + + ctx := newIncomingResolverTestContext(t) + ctx.registry.notifyErr = channeldb.ErrInvoiceNotFound + ctx.witnessBeacon.lookupPreimage[testResHash] = testResPreimage + ctx.resolve() + ctx.waitForResult(true) +} + +// TestHtlcIncomingResolverFwdContestedSuccess tests resolution of a forwarded +// htlc for which the preimage becomes known after the resolver has been +// started. +func TestHtlcIncomingResolverFwdContestedSuccess(t *testing.T) { + t.Parallel() + defer timeout(t)() + + ctx := newIncomingResolverTestContext(t) + ctx.registry.notifyErr = channeldb.ErrInvoiceNotFound + ctx.resolve() + + // Simulate a new block coming in. HTLC is not yet expired. + ctx.notifyEpoch(testInitialBlockHeight + 1) + + ctx.witnessBeacon.preImageUpdates <- testResPreimage + ctx.waitForResult(true) +} + +// TestHtlcIncomingResolverFwdContestedTimeout tests resolution of a forwarded +// htlc that times out after the resolver has been started. +func TestHtlcIncomingResolverFwdContestedTimeout(t *testing.T) { + t.Parallel() + defer timeout(t)() + + ctx := newIncomingResolverTestContext(t) + ctx.registry.notifyErr = channeldb.ErrInvoiceNotFound + ctx.resolve() + + // Simulate a new block coming in. HTLC expires. + ctx.notifyEpoch(testHtlcExpiry) + + ctx.waitForResult(false) +} + +// TestHtlcIncomingResolverFwdTimeout tests resolution of a forwarded htlc that +// has already expired when the resolver starts. +func TestHtlcIncomingResolverFwdTimeout(t *testing.T) { + t.Parallel() + defer timeout(t)() + + ctx := newIncomingResolverTestContext(t) + + ctx.registry.notifyErr = channeldb.ErrInvoiceNotFound + ctx.witnessBeacon.lookupPreimage[testResHash] = testResPreimage + ctx.resolver.htlcExpiry = 90 + ctx.resolve() + ctx.waitForResult(false) +} + +// TestHtlcIncomingResolverExitSettle tests resolution of an exit hop htlc for +// which the invoice has already been settled when the resolver starts. +func TestHtlcIncomingResolverExitSettle(t *testing.T) { + t.Parallel() + defer timeout(t)() + + ctx := newIncomingResolverTestContext(t) + ctx.registry.notifyEvent = &invoices.HodlEvent{ + Hash: testResHash, + Preimage: &testResPreimage, + } + ctx.resolve() + + data := <-ctx.registry.notifyChan + if data.expiry != testHtlcExpiry { + t.Fatal("incorrect expiry") + } + if data.currentHeight != testInitialBlockHeight { + t.Fatal("incorrect block height") + } + + ctx.waitForResult(true) +} + +// TestHtlcIncomingResolverExitCancel tests resolution of an exit hop htlc for +// an invoice that is already canceled when the resolver starts. +func TestHtlcIncomingResolverExitCancel(t *testing.T) { + t.Parallel() + defer timeout(t)() + + ctx := newIncomingResolverTestContext(t) + ctx.registry.notifyEvent = &invoices.HodlEvent{ + Hash: testResHash, + } + ctx.resolve() + ctx.waitForResult(false) +} + +// TestHtlcIncomingResolverExitSettleHodl tests resolution of an exit hop htlc +// for a hodl invoice that is settled after the resolver has started. +func TestHtlcIncomingResolverExitSettleHodl(t *testing.T) { + t.Parallel() + defer timeout(t)() + + ctx := newIncomingResolverTestContext(t) + ctx.resolve() + + notifyData := <-ctx.registry.notifyChan + notifyData.hodlChan <- invoices.HodlEvent{ + Hash: testResHash, + Preimage: &testResPreimage, + } + + ctx.waitForResult(true) +} + +// TestHtlcIncomingResolverExitTimeoutHodl tests resolution of an exit hop htlc +// for a hodl invoice that times out. +func TestHtlcIncomingResolverExitTimeoutHodl(t *testing.T) { + t.Parallel() + defer timeout(t)() + + ctx := newIncomingResolverTestContext(t) + ctx.resolve() + ctx.notifyEpoch(testHtlcExpiry) + ctx.waitForResult(false) +} + +// TestHtlcIncomingResolverExitCancelHodl tests resolution of an exit hop htlc +// for a hodl invoice that is canceled after the resolver has started. +func TestHtlcIncomingResolverExitCancelHodl(t *testing.T) { + t.Parallel() + defer timeout(t)() + + ctx := newIncomingResolverTestContext(t) + ctx.resolve() + notifyData := <-ctx.registry.notifyChan + notifyData.hodlChan <- invoices.HodlEvent{ + Hash: testResHash, + } + ctx.waitForResult(false) +} + +type incomingResolverTestContext struct { + registry *mockRegistry + witnessBeacon *mockWitnessBeacon + resolver *htlcIncomingContestResolver + notifier *mockNotifier + resolveErr chan error + nextResolver ContractResolver + t *testing.T +} + +func newIncomingResolverTestContext(t *testing.T) *incomingResolverTestContext { + notifier := &mockNotifier{ + epochChan: make(chan *chainntnfs.BlockEpoch), + spendChan: make(chan *chainntnfs.SpendDetail), + confChan: make(chan *chainntnfs.TxConfirmation), + } + witnessBeacon := newMockWitnessBeacon() + registry := &mockRegistry{ + notifyChan: make(chan notifyExitHopData, 1), + } + + checkPointChan := make(chan struct{}, 1) + + chainCfg := ChannelArbitratorConfig{ + ChainArbitratorConfig: ChainArbitratorConfig{ + Notifier: notifier, + PreimageDB: witnessBeacon, + Registry: registry, + }, + } + + resolver := &htlcIncomingContestResolver{ + htlcSuccessResolver: htlcSuccessResolver{ + ResolverKit: ResolverKit{ + ChannelArbitratorConfig: chainCfg, + Checkpoint: func(_ ContractResolver) error { + checkPointChan <- struct{}{} + return nil + }, + }, + htlcResolution: lnwallet.IncomingHtlcResolution{}, + payHash: testResHash, + }, + htlcExpiry: testHtlcExpiry, + } + + return &incomingResolverTestContext{ + registry: registry, + witnessBeacon: witnessBeacon, + resolver: resolver, + notifier: notifier, + t: t, + } +} + +func (i *incomingResolverTestContext) resolve() { + // Start resolver. + i.resolveErr = make(chan error, 1) + go func() { + var err error + i.nextResolver, err = i.resolver.Resolve() + i.resolveErr <- err + }() + + // Notify initial block height. + i.notifyEpoch(testInitialBlockHeight) +} + +func (i *incomingResolverTestContext) notifyEpoch(height int32) { + i.notifier.epochChan <- &chainntnfs.BlockEpoch{ + Height: height, + } +} + +func (i *incomingResolverTestContext) waitForResult(expectSuccessRes bool) { + i.t.Helper() + + err := <-i.resolveErr + if err != nil { + i.t.Fatal(err) + } + + if !expectSuccessRes { + if err != nil { + i.t.Fatal("expected no next resolver") + } + return + } + + successResolver, ok := i.nextResolver.(*htlcSuccessResolver) + if !ok { + i.t.Fatal("expected htlcSuccessResolver") + } + + if successResolver.htlcResolution.Preimage != testResPreimage { + i.t.Fatal("invalid preimage") + } + + successTx := successResolver.htlcResolution.SignedSuccessTx + if successTx != nil && + !bytes.Equal(successTx.TxIn[0].Witness[3], testResPreimage[:]) { + + i.t.Fatal("invalid preimage") + } +} diff --git a/contractcourt/mock_registry_test.go b/contractcourt/mock_registry_test.go new file mode 100644 index 00000000..f54a1465 --- /dev/null +++ b/contractcourt/mock_registry_test.go @@ -0,0 +1,45 @@ +package contractcourt + +import ( + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/invoices" + "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/lnwire" +) + +type notifyExitHopData struct { + payHash lntypes.Hash + paidAmount lnwire.MilliSatoshi + hodlChan chan<- interface{} + expiry uint32 + currentHeight int32 +} + +type mockRegistry struct { + notifyChan chan notifyExitHopData + notifyErr error + notifyEvent *invoices.HodlEvent +} + +func (r *mockRegistry) NotifyExitHopHtlc(payHash lntypes.Hash, + paidAmount lnwire.MilliSatoshi, expiry uint32, currentHeight int32, + hodlChan chan<- interface{}) (*invoices.HodlEvent, error) { + + r.notifyChan <- notifyExitHopData{ + hodlChan: hodlChan, + payHash: payHash, + paidAmount: paidAmount, + expiry: expiry, + currentHeight: currentHeight, + } + + return r.notifyEvent, r.notifyErr +} + +func (r *mockRegistry) HodlUnsubscribeAll(subscriber chan<- interface{}) {} + +func (r *mockRegistry) LookupInvoice(lntypes.Hash) (channeldb.Invoice, uint32, + error) { + + return channeldb.Invoice{}, 0, channeldb.ErrInvoiceNotFound +} diff --git a/contractcourt/utils_test.go b/contractcourt/utils_test.go new file mode 100644 index 00000000..2bf81b41 --- /dev/null +++ b/contractcourt/utils_test.go @@ -0,0 +1,26 @@ +package contractcourt + +import ( + "os" + "runtime/pprof" + "testing" + "time" +) + +// timeout implements a test level timeout. +func timeout(t *testing.T) func() { + done := make(chan struct{}) + go func() { + select { + case <-time.After(5 * time.Second): + pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + + panic("test timeout") + case <-done: + } + }() + + return func() { + close(done) + } +} From e5ead599ccedfab4c57f79323890ab640587130e Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 8 Apr 2019 11:29:18 +0200 Subject: [PATCH 13/14] htlcswitch/test: use single channel restore function This commit refactors test code around channel restoration in unit tests to make it easier to use. --- htlcswitch/link_test.go | 40 +++++++------ htlcswitch/test_utils.go | 120 ++++++++++++++++++++++++--------------- 2 files changed, 98 insertions(+), 62 deletions(-) diff --git a/htlcswitch/link_test.go b/htlcswitch/link_test.go index 7aaf1da3..2ef764b7 100644 --- a/htlcswitch/link_test.go +++ b/htlcswitch/link_test.go @@ -180,7 +180,7 @@ func TestChannelLinkSingleHopPayment(t *testing.T) { t.Parallel() // Setup a alice-bob network. - aliceChannel, bobChannel, cleanUp, err := createTwoClusterChannels( + alice, bob, cleanUp, err := createTwoClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) if err != nil { @@ -188,7 +188,9 @@ func TestChannelLinkSingleHopPayment(t *testing.T) { } defer cleanUp() - n := newTwoHopNetwork(t, aliceChannel, bobChannel, testStartingHeight) + n := newTwoHopNetwork( + t, alice.channel, bob.channel, testStartingHeight, + ) if err := n.start(); err != nil { t.Fatal(err) } @@ -1592,7 +1594,7 @@ func (m *mockPeer) Address() net.Addr { func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) ( ChannelLink, *lnwallet.LightningChannel, chan time.Time, func() error, - func(), chanRestoreFunc, error) { + func(), func() (*lnwallet.LightningChannel, error), error) { var chanIDBytes [8]byte if _, err := io.ReadFull(rand.Reader, chanIDBytes[:]); err != nil { @@ -1602,7 +1604,7 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) ( chanID := lnwire.NewShortChanIDFromInt( binary.BigEndian.Uint64(chanIDBytes[:])) - aliceChannel, bobChannel, fCleanUp, restore, err := createTestChannel( + aliceLc, bobLc, fCleanUp, err := createTestChannel( alicePrivKey, bobPrivKey, chanAmt, chanAmt, chanReserve, chanReserve, chanID, ) @@ -1628,7 +1630,7 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) ( pCache := newMockPreimageCache() - aliceDb := aliceChannel.State().Db + aliceDb := aliceLc.channel.State().Db aliceSwitch, err := initSwitchWithDB(testStartingHeight, aliceDb) if err != nil { return nil, nil, nil, nil, nil, nil, err @@ -1668,7 +1670,7 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) ( } const startingHeight = 100 - aliceLink := NewChannelLink(aliceCfg, aliceChannel) + aliceLink := NewChannelLink(aliceCfg, aliceLc.channel) start := func() error { return aliceSwitch.AddLink(aliceLink) } @@ -1687,7 +1689,8 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) ( defer fCleanUp() } - return aliceLink, bobChannel, bticker.Force, start, cleanUp, restore, nil + return aliceLink, bobLc.channel, bticker.Force, start, cleanUp, + aliceLc.restore, nil } func assertLinkBandwidth(t *testing.T, link ChannelLink, @@ -2546,7 +2549,9 @@ func TestChannelLinkTrimCircuitsPending(t *testing.T) { t.Fatalf("unable to start test harness: %v", err) } - alice := newPersistentLinkHarness(t, aliceLink, batchTicker, restore) + alice := newPersistentLinkHarness( + t, aliceLink, batchTicker, restore, + ) // Compute the static fees that will be used to determine the // correctness of Alice's bandwidth when forwarding HTLCs. @@ -2818,7 +2823,9 @@ func TestChannelLinkTrimCircuitsNoCommit(t *testing.T) { t.Fatalf("unable to start test harness: %v", err) } - alice := newPersistentLinkHarness(t, aliceLink, batchTicker, restore) + alice := newPersistentLinkHarness( + t, aliceLink, batchTicker, restore, + ) // We'll put Alice into hodl.Commit mode, such that the circuits for any // outgoing ADDs are opened, but the changes are not committed in the @@ -3980,14 +3987,15 @@ type persistentLinkHarness struct { batchTicker chan time.Time msgs chan lnwire.Message - restoreChan chanRestoreFunc + restoreChan func() (*lnwallet.LightningChannel, error) } // newPersistentLinkHarness initializes a new persistentLinkHarness and derives // the supporting references from the active link. func newPersistentLinkHarness(t *testing.T, link ChannelLink, batchTicker chan time.Time, - restore chanRestoreFunc) *persistentLinkHarness { + restore func() (*lnwallet.LightningChannel, + error)) *persistentLinkHarness { coreLink := link.(*channelLink) @@ -4034,7 +4042,7 @@ func (h *persistentLinkHarness) restart(restartSwitch bool, // state, we will restore the persisted state to ensure we always start // the link in a consistent state. var err error - h.channel, _, err = h.restoreChan() + h.channel, err = h.restoreChan() if err != nil { h.t.Fatalf("unable to restore channels: %v", err) } @@ -5573,7 +5581,7 @@ func TestChannelLinkCanceledInvoice(t *testing.T) { t.Parallel() // Setup a alice-bob network. - aliceChannel, bobChannel, cleanUp, err := createTwoClusterChannels( + alice, bob, cleanUp, err := createTwoClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5) if err != nil { @@ -5581,7 +5589,7 @@ func TestChannelLinkCanceledInvoice(t *testing.T) { } defer cleanUp() - n := newTwoHopNetwork(t, aliceChannel, bobChannel, testStartingHeight) + n := newTwoHopNetwork(t, alice.channel, bob.channel, testStartingHeight) if err := n.start(); err != nil { t.Fatal(err) } @@ -5638,7 +5646,7 @@ type hodlInvoiceTestCtx struct { func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, error) { // Setup a alice-bob network. - aliceChannel, bobChannel, cleanUp, err := createTwoClusterChannels( + alice, bob, cleanUp, err := createTwoClusterChannels( btcutil.SatoshiPerBitcoin*3, btcutil.SatoshiPerBitcoin*5, ) @@ -5646,7 +5654,7 @@ func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, error) { t.Fatalf("unable to create channel: %v", err) } - n := newTwoHopNetwork(t, aliceChannel, bobChannel, testStartingHeight) + n := newTwoHopNetwork(t, alice.channel, bob.channel, testStartingHeight) if err := n.start(); err != nil { t.Fatal(err) } diff --git a/htlcswitch/test_utils.go b/htlcswitch/test_utils.go index d749eaa2..f2557761 100644 --- a/htlcswitch/test_utils.go +++ b/htlcswitch/test_utils.go @@ -147,15 +147,19 @@ func generateRandomBytes(n int) ([]byte, error) { return b, nil } +type testLightningChannel struct { + channel *lnwallet.LightningChannel + restore func() (*lnwallet.LightningChannel, error) +} + // createTestChannel creates the channel and returns our and remote channels // representations. // // TODO(roasbeef): need to factor out, similar func re-used in many parts of codebase func createTestChannel(alicePrivKey, bobPrivKey []byte, aliceAmount, bobAmount, aliceReserve, bobReserve btcutil.Amount, - chanID lnwire.ShortChannelID) (*lnwallet.LightningChannel, *lnwallet.LightningChannel, func(), - func() (*lnwallet.LightningChannel, *lnwallet.LightningChannel, - error), error) { + chanID lnwire.ShortChannelID) (*testLightningChannel, + *testLightningChannel, func(), error) { aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), alicePrivKey) bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), bobPrivKey) @@ -187,7 +191,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, var hash [sha256.Size]byte randomSeed, err := generateRandomBytes(sha256.Size) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } copy(hash[:], randomSeed) @@ -236,23 +240,23 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize()) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot) bobFirstRevoke, err := bobPreimageProducer.AtIndex(0) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:]) aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize()) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot) aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:]) @@ -260,25 +264,25 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, bobAmount, &aliceCfg, &bobCfg, aliceCommitPoint, bobCommitPoint, *fundingTxIn) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } alicePath, err := ioutil.TempDir("", "alicedb") dbAlice, err := channeldb.Open(alicePath) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } bobPath, err := ioutil.TempDir("", "bobdb") dbBob, err := channeldb.Open(bobPath) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } estimator := lnwallet.NewStaticFeeEstimator(6000, 0) feePerKw, err := estimator.EstimateFeePerKW(1) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } commitFee := feePerKw.FeeForWeight(724) @@ -350,11 +354,11 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, } if err := aliceChannelState.SyncPending(bobAddr, broadcastHeight); err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } if err := bobChannelState.SyncPending(aliceAddr, broadcastHeight); err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } cleanUpFunc := func() { @@ -372,7 +376,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, aliceSigner, aliceChannelState, alicePool, ) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } alicePool.Start() @@ -381,7 +385,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, bobSigner, bobChannelState, bobPool, ) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } bobPool.Start() @@ -389,40 +393,38 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, // having Alice and Bob extend their revocation windows to each other. aliceNextRevoke, err := channelAlice.NextRevocationKey() if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } if err := channelBob.InitNextRevocation(aliceNextRevoke); err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } bobNextRevoke, err := channelBob.NextRevocationKey() if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } if err := channelAlice.InitNextRevocation(bobNextRevoke); err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, err } - restore := func() (*lnwallet.LightningChannel, *lnwallet.LightningChannel, - error) { - + restoreAlice := func() (*lnwallet.LightningChannel, error) { aliceStoredChannels, err := dbAlice.FetchOpenChannels(aliceKeyPub) switch err { case nil: case bbolt.ErrDatabaseNotOpen: dbAlice, err = channeldb.Open(dbAlice.Path()) if err != nil { - return nil, nil, errors.Errorf("unable to reopen alice "+ + return nil, errors.Errorf("unable to reopen alice "+ "db: %v", err) } aliceStoredChannels, err = dbAlice.FetchOpenChannels(aliceKeyPub) if err != nil { - return nil, nil, errors.Errorf("unable to fetch alice "+ + return nil, errors.Errorf("unable to fetch alice "+ "channel: %v", err) } default: - return nil, nil, errors.Errorf("unable to fetch alice channel: "+ + return nil, errors.Errorf("unable to fetch alice channel: "+ "%v", err) } @@ -435,34 +437,38 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, } if aliceStoredChannel == nil { - return nil, nil, errors.New("unable to find stored alice channel") + return nil, errors.New("unable to find stored alice channel") } newAliceChannel, err := lnwallet.NewLightningChannel( aliceSigner, aliceStoredChannel, alicePool, ) if err != nil { - return nil, nil, errors.Errorf("unable to create new channel: %v", + return nil, errors.Errorf("unable to create new channel: %v", err) } + return newAliceChannel, nil + } + + restoreBob := func() (*lnwallet.LightningChannel, error) { bobStoredChannels, err := dbBob.FetchOpenChannels(bobKeyPub) switch err { case nil: case bbolt.ErrDatabaseNotOpen: dbBob, err = channeldb.Open(dbBob.Path()) if err != nil { - return nil, nil, errors.Errorf("unable to reopen bob "+ + return nil, errors.Errorf("unable to reopen bob "+ "db: %v", err) } bobStoredChannels, err = dbBob.FetchOpenChannels(bobKeyPub) if err != nil { - return nil, nil, errors.Errorf("unable to fetch bob "+ + return nil, errors.Errorf("unable to fetch bob "+ "channel: %v", err) } default: - return nil, nil, errors.Errorf("unable to fetch bob channel: "+ + return nil, errors.Errorf("unable to fetch bob channel: "+ "%v", err) } @@ -475,20 +481,31 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, } if bobStoredChannel == nil { - return nil, nil, errors.New("unable to find stored bob channel") + return nil, errors.New("unable to find stored bob channel") } newBobChannel, err := lnwallet.NewLightningChannel( bobSigner, bobStoredChannel, bobPool, ) if err != nil { - return nil, nil, errors.Errorf("unable to create new channel: %v", + return nil, errors.Errorf("unable to create new channel: %v", err) } - return newAliceChannel, newBobChannel, nil + return newBobChannel, nil } - return channelAlice, channelBob, cleanUpFunc, restore, nil + testLightningChannelAlice := &testLightningChannel{ + channel: channelAlice, + restore: restoreAlice, + } + + testLightningChannelBob := &testLightningChannel{ + channel: channelBob, + restore: restoreBob, + } + + return testLightningChannelAlice, testLightningChannelBob, cleanUpFunc, + nil } // getChanID retrieves the channel point from an lnnwire message. @@ -825,7 +842,7 @@ func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) ( _, _, firstChanID, secondChanID := genIDs() // Create lightning channels between Alice<->Bob and Bob<->Carol - aliceChannel, firstBobChannel, cleanAliceBob, restoreAliceBob, err := + aliceChannel, firstBobChannel, cleanAliceBob, err := createTestChannel(alicePrivKey, bobPrivKey, aliceToBob, aliceToBob, 0, 0, firstChanID) if err != nil { @@ -833,7 +850,7 @@ func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) ( "alice<->bob channel: %v", err) } - secondBobChannel, carolChannel, cleanBobCarol, restoreBobCarol, err := + secondBobChannel, carolChannel, cleanBobCarol, err := createTestChannel(bobPrivKey, carolPrivKey, bobToCarol, bobToCarol, 0, 0, secondChanID) if err != nil { @@ -848,12 +865,23 @@ func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) ( } restoreFromDb := func() (*clusterChannels, error) { - a2b, b2a, err := restoreAliceBob() + + a2b, err := aliceChannel.restore() if err != nil { return nil, err } - b2c, c2b, err := restoreBobCarol() + b2a, err := firstBobChannel.restore() + if err != nil { + return nil, err + } + + b2c, err := secondBobChannel.restore() + if err != nil { + return nil, err + } + + c2b, err := carolChannel.restore() if err != nil { return nil, err } @@ -867,10 +895,10 @@ func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) ( } return &clusterChannels{ - aliceToBob: aliceChannel, - bobToAlice: firstBobChannel, - bobToCarol: secondBobChannel, - carolToBob: carolChannel, + aliceToBob: aliceChannel.channel, + bobToAlice: firstBobChannel.channel, + bobToCarol: secondBobChannel.channel, + carolToBob: carolChannel.channel, }, cleanUp, restoreFromDb, nil } @@ -969,13 +997,13 @@ func newThreeHopNetwork(t testing.TB, aliceChannel, firstBobChannel, // createTwoClusterChannels creates lightning channels which are needed for // a 2 hop network cluster to be initialized. func createTwoClusterChannels(aliceToBob, bobToCarol btcutil.Amount) ( - *lnwallet.LightningChannel, *lnwallet.LightningChannel, + *testLightningChannel, *testLightningChannel, func(), error) { _, _, firstChanID, _ := genIDs() // Create lightning channels between Alice<->Bob and Bob<->Carol - aliceChannel, firstBobChannel, cleanAliceBob, _, err := + alice, bob, cleanAliceBob, err := createTestChannel(alicePrivKey, bobPrivKey, aliceToBob, aliceToBob, 0, 0, firstChanID) if err != nil { @@ -983,7 +1011,7 @@ func createTwoClusterChannels(aliceToBob, bobToCarol btcutil.Amount) ( "alice<->bob channel: %v", err) } - return aliceChannel, firstBobChannel, cleanAliceBob, nil + return alice, bob, cleanAliceBob, nil } // hopNetwork is the base struct for two and three hop networks From 570f9ca57e759da3ecf64b239ccbacf610d39d51 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Mon, 8 Apr 2019 13:10:32 +0200 Subject: [PATCH 14/14] htlcswitch/test: hodl invoice restart test This commit adds a test that covers the hodl invoice behaviour after a link restart. --- htlcswitch/link_test.go | 156 ++++++++++++++++++++++++++++++--------- htlcswitch/test_utils.go | 24 +++++- 2 files changed, 143 insertions(+), 37 deletions(-) diff --git a/htlcswitch/link_test.go b/htlcswitch/link_test.go index 2ef764b7..c1fd4b94 100644 --- a/htlcswitch/link_test.go +++ b/htlcswitch/link_test.go @@ -4026,16 +4026,10 @@ func (h *persistentLinkHarness) restart(restartSwitch bool, // First, remove the link from the switch. h.coreLink.cfg.Switch.RemoveLink(h.link.ChanID()) - var htlcSwitch *Switch if restartSwitch { - // If a switch restart is requested, we will stop it and - // leave htlcSwitch nil, which will trigger the creation - // of a fresh instance in restartLink. + // If a switch restart is requested, we will stop it. It will be + // reinstantiated in restartLink. h.coreLink.cfg.Switch.Stop() - } else { - // Otherwise, we capture the switch's reference so that - // it can be carried over to the restarted link. - htlcSwitch = h.coreLink.cfg.Switch } // Since our in-memory state may have diverged from our persistent @@ -4051,8 +4045,8 @@ func (h *persistentLinkHarness) restart(restartSwitch bool, // adding the link to an existing switch, or creating a new one using // the database owned by the link. var cleanUp func() - h.link, h.batchTicker, cleanUp, err = restartLink( - h.channel, htlcSwitch, hodlFlags, + h.link, h.batchTicker, cleanUp, err = h.restartLink( + h.channel, restartSwitch, hodlFlags, ) if err != nil { h.t.Fatalf("unable to restart alicelink: %v", err) @@ -4128,8 +4122,10 @@ func (h *persistentLinkHarness) trySignNextCommitment() { // restartLink creates a new channel link from the given channel state, and adds // to an htlcswitch. If none is provided by the caller, a new one will be // created using Alice's database. -func restartLink(aliceChannel *lnwallet.LightningChannel, aliceSwitch *Switch, - hodlFlags []hodl.Flag) (ChannelLink, chan time.Time, func(), error) { +func (h *persistentLinkHarness) restartLink( + aliceChannel *lnwallet.LightningChannel, restartSwitch bool, + hodlFlags []hodl.Flag) ( + ChannelLink, chan time.Time, func(), error) { var ( decoder = newMockIteratorDecoder() @@ -4145,14 +4141,12 @@ func restartLink(aliceChannel *lnwallet.LightningChannel, aliceSwitch *Switch, TimeLockDelta: 6, } - invoiceRegistry = newMockRegistry(globalPolicy.TimeLockDelta) - pCache = newMockPreimageCache() ) aliceDb := aliceChannel.State().Db - - if aliceSwitch == nil { + aliceSwitch := h.coreLink.cfg.Switch + if restartSwitch { var err error aliceSwitch, err = initSwitchWithDB(testStartingHeight, aliceDb) if err != nil { @@ -4182,7 +4176,7 @@ func restartLink(aliceChannel *lnwallet.LightningChannel, aliceSwitch *Switch, UpdateContractSignals: func(*contractcourt.ContractSignals) error { return nil }, - Registry: invoiceRegistry, + Registry: h.coreLink.cfg.Registry, ChainEvents: &contractcourt.ChainEventSubscription{}, BatchTicker: bticker, FwdPkgGCTicker: ticker.New(5 * time.Second), @@ -4247,12 +4241,13 @@ func generateHtlcAndInvoice(t *testing.T, t.Helper() htlcAmt := lnwire.NewMSatFromSatoshis(10000) + htlcExpiry := testStartingHeight + testInvoiceCltvExpiry hops := []ForwardingInfo{ { Network: BitcoinHop, NextHop: exitHop, AmountToForward: htlcAmt, - OutgoingCTLV: 144, + OutgoingCTLV: uint32(htlcExpiry), }, } blob, err := generateRoute(hops...) @@ -4260,8 +4255,9 @@ func generateHtlcAndInvoice(t *testing.T, t.Fatalf("unable to generate route: %v", err) } - invoice, htlc, err := generatePayment(htlcAmt, htlcAmt, 144, - blob) + invoice, htlc, err := generatePayment( + htlcAmt, htlcAmt, uint32(htlcExpiry), blob, + ) if err != nil { t.Fatalf("unable to create payment: %v", err) } @@ -5641,6 +5637,8 @@ type hodlInvoiceTestCtx struct { amount lnwire.MilliSatoshi errChan chan error + restoreBob func() (*lnwallet.LightningChannel, error) + cleanUp func() } @@ -5720,6 +5718,7 @@ func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, error) { hash: hash, amount: amount, errChan: errChan, + restoreBob: bob.restore, cleanUp: func() { cleanUp() @@ -5732,6 +5731,8 @@ func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, error) { func TestChannelLinkHoldInvoiceSettle(t *testing.T) { t.Parallel() + defer timeout(t)() + ctx, err := newHodlInvoiceTestCtx(t) if err != nil { t.Fatal(err) @@ -5744,13 +5745,9 @@ func TestChannelLinkHoldInvoiceSettle(t *testing.T) { } // Wait for payment to succeed. - select { - case err := <-ctx.errChan: - if err != nil { - t.Fatal(err) - } - case <-time.After(5 * time.Second): - t.Fatal("timeout") + err = <-ctx.errChan + if err != nil { + t.Fatal(err) } // Wait for Bob to receive the revocation. @@ -5774,6 +5771,8 @@ func TestChannelLinkHoldInvoiceSettle(t *testing.T) { func TestChannelLinkHoldInvoiceCancel(t *testing.T) { t.Parallel() + defer timeout(t)() + ctx, err := newHodlInvoiceTestCtx(t) if err != nil { t.Fatal(err) @@ -5786,15 +5785,102 @@ func TestChannelLinkHoldInvoiceCancel(t *testing.T) { } // Wait for payment to succeed. - select { - case err := <-ctx.errChan: - if !strings.Contains(err.Error(), - lnwire.CodeUnknownPaymentHash.String()) { + err = <-ctx.errChan + if !strings.Contains(err.Error(), + lnwire.CodeUnknownPaymentHash.String()) { - t.Fatal("expected unknown payment hash") - } - case <-time.After(5 * time.Second): - t.Fatal("timeout") + t.Fatal("expected unknown payment hash") + } +} + +// TestChannelLinkHoldInvoiceRestart asserts hodl htlcs are held after blocks +// are mined and the link is restarted. The initial expiry checks should not +// apply to hodl htlcs after restart. +func TestChannelLinkHoldInvoiceRestart(t *testing.T) { + t.Parallel() + + defer timeout(t)() + + const ( + chanAmt = btcutil.SatoshiPerBitcoin * 5 + ) + + // We'll start by creating a new link with our chanAmt (5 BTC). We will + // only be testing Alice's behavior, so the reference to Bob's channel + // state is unnecessary. + aliceLink, bobChannel, _, start, cleanUp, restore, err := + newSingleLinkTestHarness(chanAmt, 0) + if err != nil { + t.Fatalf("unable to create link: %v", err) + } + defer cleanUp() + + alice := newPersistentLinkHarness( + t, aliceLink, nil, restore, + ) + + if err := start(); err != nil { + t.Fatalf("unable to start test harness: %v", err) + } + + var ( + coreLink = alice.coreLink + registry = coreLink.cfg.Registry.(*mockInvoiceRegistry) + ) + + registry.settleChan = make(chan lntypes.Hash) + + htlc, invoice := generateHtlcAndInvoice(t, 0) + + // Convert into a hodl invoice and save the preimage for later. + preimage := invoice.Terms.PaymentPreimage + invoice.Terms.PaymentPreimage = channeldb.UnknownPreimage + + // We must add the invoice to the registry, such that Alice + // expects this payment. + err = registry.AddInvoice( + *invoice, htlc.PaymentHash, + ) + if err != nil { + t.Fatalf("unable to add invoice to registry: %v", err) + } + + // Lock in htlc paying the hodl invoice. + sendHtlcBobToAlice(t, alice.link, bobChannel, htlc) + sendCommitSigBobToAlice(t, alice.link, bobChannel, 1) + receiveRevAndAckAliceToBob(t, alice.msgs, alice.link, bobChannel) + receiveCommitSigAliceToBob(t, alice.msgs, alice.link, bobChannel, 1) + sendRevAndAckBobToAlice(t, alice.link, bobChannel) + + // We expect a call to the invoice registry to notify the arrival of the + // htlc. + <-registry.settleChan + + // Increase block height. This height will be retrieved by the link + // after restart. + coreLink.cfg.Switch.bestHeight++ + + // Restart link. + alice.restart(false) + + // Expect htlc to be reprocessed. + <-registry.settleChan + + // Settle the invoice with the preimage. + registry.SettleHodlInvoice(preimage) + + // Expect alice to send a settle and commitsig message to bob. + receiveSettleAliceToBob(t, alice.msgs, alice.link, bobChannel) + receiveCommitSigAliceToBob(t, alice.msgs, alice.link, bobChannel, 0) + + // Stop the link + alice.link.Stop() + + // Check that no unexpected messages were sent. + select { + case msg := <-alice.msgs: + t.Fatalf("did not expect message %T", msg) + default: } } diff --git a/htlcswitch/test_utils.go b/htlcswitch/test_utils.go index f2557761..2e1907f2 100644 --- a/htlcswitch/test_utils.go +++ b/htlcswitch/test_utils.go @@ -11,6 +11,7 @@ import ( "net" "os" "runtime" + "runtime/pprof" "sync/atomic" "testing" "time" @@ -91,6 +92,8 @@ var ( }, LockTime: 5, } + + testBatchTimeout = 50 * time.Millisecond ) var idSeqNum uint64 @@ -1051,7 +1054,6 @@ func (h *hopNetwork) createChannelLink(server, peer *mockServer, decoder *mockIteratorDecoder) (ChannelLink, error) { const ( - batchTimeout = 50 * time.Millisecond fwdPkgTimeout = 15 * time.Second minFeeUpdateTimeout = 30 * time.Minute maxFeeUpdateTimeout = 40 * time.Minute @@ -1079,7 +1081,7 @@ func (h *hopNetwork) createChannelLink(server, peer *mockServer, ChainEvents: &contractcourt.ChainEventSubscription{}, SyncStates: true, BatchSize: 10, - BatchTicker: ticker.NewForce(batchTimeout), + BatchTicker: ticker.NewForce(testBatchTimeout), FwdPkgGCTicker: ticker.NewForce(fwdPkgTimeout), MinFeeUpdateTimeout: minFeeUpdateTimeout, MaxFeeUpdateTimeout: maxFeeUpdateTimeout, @@ -1256,3 +1258,21 @@ func (n *twoHopNetwork) makeHoldPayment(sendingPeer, receivingPeer lnpeer.Peer, return paymentErr } + +// timeout implements a test level timeout. +func timeout(t *testing.T) func() { + done := make(chan struct{}) + go func() { + select { + case <-time.After(5 * time.Second): + pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) + + panic("test timeout") + case <-done: + } + }() + + return func() { + close(done) + } +}