Merge pull request #2913 from joostjager/hodl-restart-fix

cnct+htlcswitch+invoices: move invoice parameter check to registry
This commit is contained in:
Joost Jager 2019-05-16 21:31:26 +02:00 committed by GitHub
commit 27ae22fa6c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 2213 additions and 1397 deletions

@ -1844,14 +1844,12 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa
Packager: channeldb.NewChannelPackager(shortChanID),
}
pCache := newMockPreimageCache()
aliceSigner := &mockSigner{aliceKeyPriv}
bobSigner := &mockSigner{bobKeyPriv}
alicePool := lnwallet.NewSigPool(1, aliceSigner)
channelAlice, err := lnwallet.NewLightningChannel(
aliceSigner, pCache, aliceChannelState, alicePool,
aliceSigner, aliceChannelState, alicePool,
)
if err != nil {
return nil, nil, nil, err
@ -1860,7 +1858,7 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa
bobPool := lnwallet.NewSigPool(1, bobSigner)
channelBob, err := lnwallet.NewLightningChannel(
bobSigner, pCache, bobChannelState, bobPool,
bobSigner, bobChannelState, bobPool,
)
if err != nil {
return nil, nil, nil, err

@ -99,7 +99,10 @@ func TestInvoiceWorkflow(t *testing.T) {
// now have the settled bit toggle to true and a non-default
// SettledDate
payAmt := fakeInvoice.Terms.Value * 2
if _, err := db.AcceptOrSettleInvoice(paymentHash, payAmt); err != nil {
_, err = db.AcceptOrSettleInvoice(
paymentHash, payAmt, checkHtlcParameters,
)
if err != nil {
t.Fatalf("unable to settle invoice: %v", err)
}
dbInvoice2, err := db.LookupInvoice(paymentHash)
@ -261,7 +264,9 @@ func TestInvoiceAddTimeSeries(t *testing.T) {
paymentHash := invoice.Terms.PaymentPreimage.Hash()
_, err := db.AcceptOrSettleInvoice(paymentHash, 0)
_, err := db.AcceptOrSettleInvoice(
paymentHash, 0, checkHtlcParameters,
)
if err != nil {
t.Fatalf("unable to settle invoice: %v", err)
}
@ -342,7 +347,9 @@ func TestDuplicateSettleInvoice(t *testing.T) {
}
// With the invoice in the DB, we'll now attempt to settle the invoice.
dbInvoice, err := db.AcceptOrSettleInvoice(payHash, amt)
dbInvoice, err := db.AcceptOrSettleInvoice(
payHash, amt, checkHtlcParameters,
)
if err != nil {
t.Fatalf("unable to settle invoice: %v", err)
}
@ -362,7 +369,9 @@ func TestDuplicateSettleInvoice(t *testing.T) {
// If we try to settle the invoice again, then we should get the very
// same invoice back, but with an error this time.
dbInvoice, err = db.AcceptOrSettleInvoice(payHash, amt)
dbInvoice, err = db.AcceptOrSettleInvoice(
payHash, amt, checkHtlcParameters,
)
if err != ErrInvoiceAlreadySettled {
t.Fatalf("expected ErrInvoiceAlreadySettled")
}
@ -407,7 +416,10 @@ func TestQueryInvoices(t *testing.T) {
// We'll only settle half of all invoices created.
if i%2 == 0 {
if _, err := db.AcceptOrSettleInvoice(paymentHash, i); err != nil {
_, err := db.AcceptOrSettleInvoice(
paymentHash, i, checkHtlcParameters,
)
if err != nil {
t.Fatalf("unable to settle invoice: %v", err)
}
}
@ -648,3 +660,7 @@ func TestQueryInvoices(t *testing.T) {
}
}
}
func checkHtlcParameters(invoice *Invoice) error {
return nil
}

@ -631,8 +631,12 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) {
//
// When the preimage for the invoice is unknown (hold invoice), the invoice is
// marked as accepted.
//
// TODO: Store invoice cltv as separate field in database so that it doesn't
// need to be decoded from the payment request.
func (d *DB) AcceptOrSettleInvoice(paymentHash [32]byte,
amtPaid lnwire.MilliSatoshi) (*Invoice, error) {
amtPaid lnwire.MilliSatoshi,
checkHtlcParameters func(invoice *Invoice) error) (*Invoice, error) {
var settledInvoice *Invoice
err := d.Update(func(tx *bbolt.Tx) error {
@ -662,6 +666,7 @@ func (d *DB) AcceptOrSettleInvoice(paymentHash [32]byte,
settledInvoice, err = acceptOrSettleInvoice(
invoices, settleIndex, invoiceNum, amtPaid,
checkHtlcParameters,
)
return err
@ -988,8 +993,10 @@ func deserializeInvoice(r io.Reader) (Invoice, error) {
return invoice, nil
}
func acceptOrSettleInvoice(invoices, settleIndex *bbolt.Bucket, invoiceNum []byte,
amtPaid lnwire.MilliSatoshi) (*Invoice, error) {
func acceptOrSettleInvoice(invoices, settleIndex *bbolt.Bucket,
invoiceNum []byte, amtPaid lnwire.MilliSatoshi,
checkHtlcParameters func(invoice *Invoice) error) (
*Invoice, error) {
invoice, err := fetchInvoice(invoiceNum, invoices)
if err != nil {
@ -1007,6 +1014,13 @@ func acceptOrSettleInvoice(invoices, settleIndex *bbolt.Bucket, invoiceNum []byt
return &invoice, ErrInvoiceAlreadyCanceled
}
// If the invoice is still open, check the htlc parameters.
if err := checkHtlcParameters(&invoice); err != nil {
return &invoice, err
}
// Check to see if we can settle or this is an hold invoice and we need
// to wait for the preimage.
holdInvoice := invoice.Terms.PaymentPreimage == UnknownPreimage
if holdInvoice {
invoice.Terms.State = ContractAccepted

@ -12,7 +12,6 @@ import (
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/invoices"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/sweep"
@ -146,7 +145,7 @@ type ChainArbitratorConfig struct {
// Registry is the invoice database that is used by resolvers to lookup
// preimages and settle invoices.
Registry *invoices.InvoiceRegistry
Registry Registry
// NotifyClosedChannel is a function closure that the ChainArbitrator
// will use to notify the ChannelNotifier about a newly closed channel.
@ -259,7 +258,7 @@ func newActiveChannelArbitrator(channel *channeldb.OpenChannel,
// Finally, we'll force close the channel completing
// the force close workflow.
chanMachine, err := lnwallet.NewLightningChannel(
c.cfg.Signer, c.cfg.PreimageDB, channel, nil,
c.cfg.Signer, channel, nil,
)
if err != nil {
return nil, err
@ -375,7 +374,6 @@ func (c *ChainArbitrator) Start() error {
chainWatcherConfig{
chanState: channel,
notifier: c.cfg.Notifier,
pCache: c.cfg.PreimageDB,
signer: c.cfg.Signer,
isOurAddr: c.cfg.IsOurAddress,
contractBreach: func(retInfo *lnwallet.BreachRetribution) error {
@ -709,7 +707,6 @@ func (c *ChainArbitrator) WatchNewChannel(newChan *channeldb.OpenChannel) error
chainWatcherConfig{
chanState: newChan,
notifier: c.cfg.Notifier,
pCache: c.cfg.PreimageDB,
signer: c.cfg.Signer,
isOurAddr: c.cfg.IsOurAddress,
contractBreach: func(retInfo *lnwallet.BreachRetribution) error {

@ -88,11 +88,6 @@ type chainWatcherConfig struct {
// notified of output spends and when transactions are confirmed.
notifier chainntnfs.ChainNotifier
// pCache is a reference to the shared preimage cache. We'll use this
// to see if we can settle any incoming HTLC's during a remote
// commitment close event.
pCache WitnessBeacon
// signer is the main signer instances that will be responsible for
// signing any HTLC and commitment transaction generated by the state
// machine.
@ -702,7 +697,7 @@ func (c *chainWatcher) dispatchLocalForceClose(
"detected", c.cfg.chanState.FundingOutpoint)
forceClose, err := lnwallet.NewLocalForceCloseSummary(
c.cfg.chanState, c.cfg.signer, c.cfg.pCache,
c.cfg.chanState, c.cfg.signer,
commitSpend.SpendingTx, localCommit,
)
if err != nil {
@ -795,7 +790,7 @@ func (c *chainWatcher) dispatchRemoteForceClose(
// materials required to let each subscriber sweep the funds in the
// channel on-chain.
uniClose, err := lnwallet.NewUnilateralCloseSummary(
c.cfg.chanState, c.cfg.signer, c.cfg.pCache, commitSpend,
c.cfg.chanState, c.cfg.signer, commitSpend,
remoteCommit, commitPoint,
)
if err != nil {

@ -620,7 +620,10 @@ func (c *ChannelArbitrator) stateStep(triggerHeight uint32,
// chain, we'll check to see if we need to make any on-chain
// claims on behalf of the channel contract that we're
// arbitrating for.
chainActions := c.checkChainActions(triggerHeight, trigger)
chainActions, err := c.checkChainActions(triggerHeight, trigger)
if err != nil {
return StateError, closeTx, err
}
// If there are no actions to be made, then we'll remain in the
// default state. If this isn't a self initiated event (we're
@ -1082,7 +1085,7 @@ func (c *ChannelArbitrator) shouldGoOnChain(htlcExpiry, broadcastDelta,
// been sufficiently confirmed, the HTLC's should be canceled backwards. For
// redeemed HTLC's, we should send the pre-image back to the incoming link.
func (c *ChannelArbitrator) checkChainActions(height uint32,
trigger transitionTrigger) ChainActionMap {
trigger transitionTrigger) (ChainActionMap, error) {
// TODO(roasbeef): would need to lock channel? channel totem?
// * race condition if adding and we broadcast, etc
@ -1122,7 +1125,12 @@ func (c *ChannelArbitrator) checkChainActions(height uint32,
// know the pre-image and it's close to timing out. We need to
// ensure that we claim the funds that our rightfully ours
// on-chain.
if _, ok := c.cfg.PreimageDB.LookupPreimage(htlc.RHash); !ok {
preimageAvailable, err := c.isPreimageAvailable(htlc.RHash)
if err != nil {
return nil, err
}
if !preimageAvailable {
continue
}
@ -1151,7 +1159,7 @@ func (c *ChannelArbitrator) checkChainActions(height uint32,
if !haveChainActions && trigger == chainTrigger {
log.Tracef("ChannelArbitrator(%v): no actions to take at "+
"height=%v", c.cfg.ChanPoint, height)
return actionMap
return actionMap, nil
}
// Now that we know we'll need to go on-chain, we'll examine all of our
@ -1214,32 +1222,51 @@ func (c *ChannelArbitrator) checkChainActions(height uint32,
// either learn of it eventually from the outgoing HTLC, or the sender
// will timeout the HTLC.
for _, htlc := range c.activeHTLCs.incomingHTLCs {
// If we have the pre-image, then we should go on-chain to
// redeem the HTLC immediately.
if _, ok := c.cfg.PreimageDB.LookupPreimage(htlc.RHash); ok {
log.Tracef("ChannelArbitrator(%v): preimage for "+
"htlc=%x is known!", c.cfg.ChanPoint,
htlc.RHash[:])
actionMap[HtlcClaimAction] = append(
actionMap[HtlcClaimAction], htlc,
)
continue
}
log.Tracef("ChannelArbitrator(%v): watching chain to decide "+
"action for incoming htlc=%x", c.cfg.ChanPoint,
htlc.RHash[:])
// Otherwise, we don't yet have the pre-image, but should watch
// on-chain to see if either: the remote party times out the
// HTLC, or we learn of the pre-image.
actionMap[HtlcIncomingWatchAction] = append(
actionMap[HtlcIncomingWatchAction], htlc,
)
}
return actionMap
return actionMap, nil
}
// isPreimageAvailable returns whether the hash preimage is available in either
// the preimage cache or the invoice database.
func (c *ChannelArbitrator) isPreimageAvailable(hash lntypes.Hash) (bool,
error) {
// Start by checking the preimage cache for preimages of
// forwarded HTLCs.
_, preimageAvailable := c.cfg.PreimageDB.LookupPreimage(
hash,
)
if preimageAvailable {
return true, nil
}
// Then check if we have an invoice that can be settled by this HTLC.
//
// TODO(joostjager): Check that there are still more blocks remaining
// than the invoice cltv delta. We don't want to go to chain only to
// have the incoming contest resolver decide that we don't want to
// settle this invoice.
invoice, _, err := c.cfg.Registry.LookupInvoice(hash)
switch err {
case nil:
case channeldb.ErrInvoiceNotFound, channeldb.ErrNoInvoicesCreated:
return false, nil
default:
return false, err
}
preimageAvailable = invoice.Terms.PaymentPreimage !=
channeldb.UnknownPreimage
return preimageAvailable, nil
}
// prepContractResolutions is called either int he case that we decide we need

@ -2,6 +2,7 @@ package contractcourt
import (
"encoding/binary"
"errors"
"fmt"
"io"
@ -50,15 +51,32 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
return nil, nil
}
// We'll first check if this HTLC has been timed out, if so, we can
// return now and mark ourselves as resolved.
_, currentHeight, err := h.ChainIO.GetBestBlock()
// Register for block epochs. After registration, the current height
// will be sent on the channel immediately.
blockEpochs, err := h.Notifier.RegisterBlockEpochNtfn(nil)
if err != nil {
return nil, err
}
defer blockEpochs.Cancel()
// If we're past the point of expiry of the HTLC, then at this point
// the sender can sweep it, so we'll end our lifetime.
var currentHeight int32
select {
case newBlock, ok := <-blockEpochs.Epochs:
if !ok {
return nil, fmt.Errorf("quitting")
}
currentHeight = newBlock.Height
case <-h.Quit:
return nil, fmt.Errorf("resolver stopped")
}
// We'll first check if this HTLC has been timed out, if so, we can
// return now and mark ourselves as resolved. If we're past the point of
// expiry of the HTLC, then at this point the sender can sweep it, so
// we'll end our lifetime. Here we deliberately forego the chance that
// the sender doesn't sweep and we already have or will learn the
// preimage. Otherwise the resolver could potentially stay active
// indefinitely and the channel will never close properly.
if uint32(currentHeight) >= h.htlcExpiry {
// TODO(roasbeef): should also somehow check if outgoing is
// resolved or not
@ -77,10 +95,11 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
// the preimage is revealed so the inner resolver can properly complete
// its duties. The boolean return value indicates whether the preimage
// was properly applied.
tryApplyPreimage := func(preimage lntypes.Preimage) bool {
// Check to see if this preimage matches our htlc.
applyPreimage := func(preimage lntypes.Preimage) error {
// Sanity check to see if this preimage matches our htlc. At
// this point it should never happen that it does not match.
if !preimage.Matches(h.payHash) {
return false
return errors.New("preimage does not match hash")
}
// Update htlcResolution with the matching preimage.
@ -103,7 +122,7 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
h.htlcResolution.SignedSuccessTx.TxIn[0].Witness[3] = preimage[:]
}
return true
return nil
}
// If the HTLC hasn't expired yet, then we may still be able to claim
@ -114,33 +133,52 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
// ensure the preimage can't be delivered between querying and
// registering for the preimage subscription.
preimageSubscription := h.PreimageDB.SubscribeUpdates()
blockEpochs, err := h.Notifier.RegisterBlockEpochNtfn(nil)
if err != nil {
defer preimageSubscription.CancelSubscription()
// Define closure to process hodl events either direct or triggered by
// later notifcation.
processHodlEvent := func(e invoices.HodlEvent) (ContractResolver,
error) {
if e.Preimage == nil {
log.Infof("%T(%v): Exit hop HTLC canceled "+
"(expiry=%v, height=%v), abandoning", h,
h.htlcResolution.ClaimOutpoint,
h.htlcExpiry, currentHeight)
h.resolved = true
return nil, h.Checkpoint(h)
}
if err := applyPreimage(*e.Preimage); err != nil {
return nil, err
}
defer func() {
preimageSubscription.CancelSubscription()
blockEpochs.Cancel()
}()
return &h.htlcSuccessResolver, nil
}
// Create a buffered hodl chan to prevent deadlock.
hodlChan := make(chan interface{}, 1)
// Notify registry that we are potentially settling as exit hop
// on-chain, so that we will get a hodl event when a corresponding hodl
// invoice is settled.
event, err := h.Registry.NotifyExitHopHtlc(h.payHash, h.htlcAmt, hodlChan)
if err != nil && err != channeldb.ErrInvoiceNotFound {
return nil, err
}
// Notify registry that we are potentially resolving as an exit hop
// on-chain. If this HTLC indeed pays to an existing invoice, the
// invoice registry will tell us what to do with the HTLC. This is
// identical to HTLC resolution in the link.
event, err := h.Registry.NotifyExitHopHtlc(
h.payHash, h.htlcAmt, h.htlcExpiry, currentHeight,
hodlChan,
)
switch err {
case channeldb.ErrInvoiceNotFound:
case nil:
defer h.Registry.HodlUnsubscribeAll(hodlChan)
// If the htlc can be settled directly, we can progress to the inner
// resolver immediately.
if event != nil && event.Preimage != nil {
if tryApplyPreimage(*event.Preimage) {
return &h.htlcSuccessResolver, nil
// Resolve the htlc directly if possible.
if event != nil {
return processHodlEvent(*event)
}
default:
return nil, err
}
// With the epochs and preimage subscriptions initialized, we'll query
@ -150,19 +188,27 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
// If we do, then this means we can claim the HTLC! However,
// we don't know how to ourselves, so we'll return our inner
// resolver which has the knowledge to do so.
if tryApplyPreimage(preimage) {
return &h.htlcSuccessResolver, nil
if err := applyPreimage(preimage); err != nil {
return nil, err
}
return &h.htlcSuccessResolver, nil
}
for {
select {
case preimage := <-preimageSubscription.WitnessUpdates:
if !tryApplyPreimage(preimage) {
// We receive all new preimages, so we need to ignore
// all except the preimage we are waiting for.
if !preimage.Matches(h.payHash) {
continue
}
if err := applyPreimage(preimage); err != nil {
return nil, err
}
// We've learned of the preimage and this information
// has been added to our inner resolver. We return it so
// it can continue contract resolution.
@ -171,15 +217,7 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
case hodlItem := <-hodlChan:
hodlEvent := hodlItem.(invoices.HodlEvent)
// Only process settle events.
if hodlEvent.Preimage == nil {
continue
}
if !tryApplyPreimage(*hodlEvent.Preimage) {
continue
}
return &h.htlcSuccessResolver, nil
return processHodlEvent(hodlEvent)
case newBlock, ok := <-blockEpochs.Epochs:
if !ok {
@ -188,8 +226,7 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
// If this new height expires the HTLC, then this means
// we never found out the preimage, so we can mark
// resolved and
// exit.
// resolved and exit.
newHeight := uint32(newBlock.Height)
if newHeight >= h.htlcExpiry {
log.Infof("%T(%v): HTLC has timed out "+

@ -0,0 +1,274 @@
package contractcourt
import (
"bytes"
"testing"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/invoices"
"github.com/lightningnetwork/lnd/lnwallet"
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/lntypes"
)
const (
testInitialBlockHeight = 100
testHtlcExpiry = 150
)
var (
testResPreimage = lntypes.Preimage{1, 2, 3}
testResHash = testResPreimage.Hash()
)
// TestHtlcIncomingResolverFwdPreimageKnown tests resolution of a forwarded htlc
// for which the preimage is already known initially.
func TestHtlcIncomingResolverFwdPreimageKnown(t *testing.T) {
t.Parallel()
defer timeout(t)()
ctx := newIncomingResolverTestContext(t)
ctx.registry.notifyErr = channeldb.ErrInvoiceNotFound
ctx.witnessBeacon.lookupPreimage[testResHash] = testResPreimage
ctx.resolve()
ctx.waitForResult(true)
}
// TestHtlcIncomingResolverFwdContestedSuccess tests resolution of a forwarded
// htlc for which the preimage becomes known after the resolver has been
// started.
func TestHtlcIncomingResolverFwdContestedSuccess(t *testing.T) {
t.Parallel()
defer timeout(t)()
ctx := newIncomingResolverTestContext(t)
ctx.registry.notifyErr = channeldb.ErrInvoiceNotFound
ctx.resolve()
// Simulate a new block coming in. HTLC is not yet expired.
ctx.notifyEpoch(testInitialBlockHeight + 1)
ctx.witnessBeacon.preImageUpdates <- testResPreimage
ctx.waitForResult(true)
}
// TestHtlcIncomingResolverFwdContestedTimeout tests resolution of a forwarded
// htlc that times out after the resolver has been started.
func TestHtlcIncomingResolverFwdContestedTimeout(t *testing.T) {
t.Parallel()
defer timeout(t)()
ctx := newIncomingResolverTestContext(t)
ctx.registry.notifyErr = channeldb.ErrInvoiceNotFound
ctx.resolve()
// Simulate a new block coming in. HTLC expires.
ctx.notifyEpoch(testHtlcExpiry)
ctx.waitForResult(false)
}
// TestHtlcIncomingResolverFwdTimeout tests resolution of a forwarded htlc that
// has already expired when the resolver starts.
func TestHtlcIncomingResolverFwdTimeout(t *testing.T) {
t.Parallel()
defer timeout(t)()
ctx := newIncomingResolverTestContext(t)
ctx.registry.notifyErr = channeldb.ErrInvoiceNotFound
ctx.witnessBeacon.lookupPreimage[testResHash] = testResPreimage
ctx.resolver.htlcExpiry = 90
ctx.resolve()
ctx.waitForResult(false)
}
// TestHtlcIncomingResolverExitSettle tests resolution of an exit hop htlc for
// which the invoice has already been settled when the resolver starts.
func TestHtlcIncomingResolverExitSettle(t *testing.T) {
t.Parallel()
defer timeout(t)()
ctx := newIncomingResolverTestContext(t)
ctx.registry.notifyEvent = &invoices.HodlEvent{
Hash: testResHash,
Preimage: &testResPreimage,
}
ctx.resolve()
data := <-ctx.registry.notifyChan
if data.expiry != testHtlcExpiry {
t.Fatal("incorrect expiry")
}
if data.currentHeight != testInitialBlockHeight {
t.Fatal("incorrect block height")
}
ctx.waitForResult(true)
}
// TestHtlcIncomingResolverExitCancel tests resolution of an exit hop htlc for
// an invoice that is already canceled when the resolver starts.
func TestHtlcIncomingResolverExitCancel(t *testing.T) {
t.Parallel()
defer timeout(t)()
ctx := newIncomingResolverTestContext(t)
ctx.registry.notifyEvent = &invoices.HodlEvent{
Hash: testResHash,
}
ctx.resolve()
ctx.waitForResult(false)
}
// TestHtlcIncomingResolverExitSettleHodl tests resolution of an exit hop htlc
// for a hodl invoice that is settled after the resolver has started.
func TestHtlcIncomingResolverExitSettleHodl(t *testing.T) {
t.Parallel()
defer timeout(t)()
ctx := newIncomingResolverTestContext(t)
ctx.resolve()
notifyData := <-ctx.registry.notifyChan
notifyData.hodlChan <- invoices.HodlEvent{
Hash: testResHash,
Preimage: &testResPreimage,
}
ctx.waitForResult(true)
}
// TestHtlcIncomingResolverExitTimeoutHodl tests resolution of an exit hop htlc
// for a hodl invoice that times out.
func TestHtlcIncomingResolverExitTimeoutHodl(t *testing.T) {
t.Parallel()
defer timeout(t)()
ctx := newIncomingResolverTestContext(t)
ctx.resolve()
ctx.notifyEpoch(testHtlcExpiry)
ctx.waitForResult(false)
}
// TestHtlcIncomingResolverExitCancelHodl tests resolution of an exit hop htlc
// for a hodl invoice that is canceled after the resolver has started.
func TestHtlcIncomingResolverExitCancelHodl(t *testing.T) {
t.Parallel()
defer timeout(t)()
ctx := newIncomingResolverTestContext(t)
ctx.resolve()
notifyData := <-ctx.registry.notifyChan
notifyData.hodlChan <- invoices.HodlEvent{
Hash: testResHash,
}
ctx.waitForResult(false)
}
type incomingResolverTestContext struct {
registry *mockRegistry
witnessBeacon *mockWitnessBeacon
resolver *htlcIncomingContestResolver
notifier *mockNotifier
resolveErr chan error
nextResolver ContractResolver
t *testing.T
}
func newIncomingResolverTestContext(t *testing.T) *incomingResolverTestContext {
notifier := &mockNotifier{
epochChan: make(chan *chainntnfs.BlockEpoch),
spendChan: make(chan *chainntnfs.SpendDetail),
confChan: make(chan *chainntnfs.TxConfirmation),
}
witnessBeacon := newMockWitnessBeacon()
registry := &mockRegistry{
notifyChan: make(chan notifyExitHopData, 1),
}
checkPointChan := make(chan struct{}, 1)
chainCfg := ChannelArbitratorConfig{
ChainArbitratorConfig: ChainArbitratorConfig{
Notifier: notifier,
PreimageDB: witnessBeacon,
Registry: registry,
},
}
resolver := &htlcIncomingContestResolver{
htlcSuccessResolver: htlcSuccessResolver{
ResolverKit: ResolverKit{
ChannelArbitratorConfig: chainCfg,
Checkpoint: func(_ ContractResolver) error {
checkPointChan <- struct{}{}
return nil
},
},
htlcResolution: lnwallet.IncomingHtlcResolution{},
payHash: testResHash,
},
htlcExpiry: testHtlcExpiry,
}
return &incomingResolverTestContext{
registry: registry,
witnessBeacon: witnessBeacon,
resolver: resolver,
notifier: notifier,
t: t,
}
}
func (i *incomingResolverTestContext) resolve() {
// Start resolver.
i.resolveErr = make(chan error, 1)
go func() {
var err error
i.nextResolver, err = i.resolver.Resolve()
i.resolveErr <- err
}()
// Notify initial block height.
i.notifyEpoch(testInitialBlockHeight)
}
func (i *incomingResolverTestContext) notifyEpoch(height int32) {
i.notifier.epochChan <- &chainntnfs.BlockEpoch{
Height: height,
}
}
func (i *incomingResolverTestContext) waitForResult(expectSuccessRes bool) {
i.t.Helper()
err := <-i.resolveErr
if err != nil {
i.t.Fatal(err)
}
if !expectSuccessRes {
if err != nil {
i.t.Fatal("expected no next resolver")
}
return
}
successResolver, ok := i.nextResolver.(*htlcSuccessResolver)
if !ok {
i.t.Fatal("expected htlcSuccessResolver")
}
if successResolver.htlcResolution.Preimage != testResPreimage {
i.t.Fatal("invalid preimage")
}
successTx := successResolver.htlcResolution.SignedSuccessTx
if successTx != nil &&
!bytes.Equal(successTx.TxIn[0].Witness[3], testResPreimage[:]) {
i.t.Fatal("invalid preimage")
}
}

@ -7,7 +7,6 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwallet"
@ -78,9 +77,11 @@ func (h *htlcSuccessResolver) ResolverKey() []byte {
}
// Resolve attempts to resolve an unresolved incoming HTLC that we know the
// preimage to. If the HTLC is on the commitment of the remote party, then
// we'll simply sweep it directly. Otherwise, we'll hand this off to the utxo
// nursery to do its duty.
// preimage to. If the HTLC is on the commitment of the remote party, then we'll
// simply sweep it directly. Otherwise, we'll hand this off to the utxo nursery
// to do its duty. There is no need to make a call to the invoice registry
// anymore. Every HTLC has already passed through the incoming contest resolver
// and in there the invoice was already marked as settled.
//
// TODO(roasbeef): create multi to batch
//
@ -177,19 +178,6 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) {
return nil, fmt.Errorf("quitting")
}
// With the HTLC claimed, we can attempt to settle its
// corresponding invoice if we were the original destination. As
// the htlc is already settled at this point, we don't need to
// read on the hodl channel.
hodlChan := make(chan interface{}, 1)
_, err = h.Registry.NotifyExitHopHtlc(
h.payHash, h.htlcAmt, hodlChan,
)
if err != nil && err != channeldb.ErrInvoiceNotFound {
log.Errorf("Unable to settle invoice with payment "+
"hash %x: %v", h.payHash, err)
}
// Once the transaction has received a sufficient number of
// confirmations, we'll mark ourselves as fully resolved and exit.
h.resolved = true
@ -255,17 +243,6 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) {
return nil, fmt.Errorf("quitting")
}
// With the HTLC claimed, we can attempt to settle its corresponding
// invoice if we were the original destination. As the htlc is already
// settled at this point, we don't need to read on the hodl
// channel.
hodlChan := make(chan interface{}, 1)
_, err = h.Registry.NotifyExitHopHtlc(h.payHash, h.htlcAmt, hodlChan)
if err != nil && err != channeldb.ErrInvoiceNotFound {
log.Errorf("Unable to settle invoice with payment "+
"hash %x: %v", h.payHash, err)
}
h.resolved = true
return nil, h.Checkpoint(h)
}

@ -29,8 +29,16 @@ func (m *mockSigner) ComputeInputScript(tx *wire.MsgTx,
type mockWitnessBeacon struct {
preImageUpdates chan lntypes.Preimage
newPreimages chan []lntypes.Preimage
lookupPreimage map[lntypes.Hash]lntypes.Preimage
}
func newMockWitnessBeacon() *mockWitnessBeacon {
return &mockWitnessBeacon{
preImageUpdates: make(chan lntypes.Preimage, 1),
newPreimages: make(chan []lntypes.Preimage),
lookupPreimage: make(map[lntypes.Hash]lntypes.Preimage),
}
}
func (m *mockWitnessBeacon) SubscribeUpdates() *WitnessSubscription {
@ -41,7 +49,11 @@ func (m *mockWitnessBeacon) SubscribeUpdates() *WitnessSubscription {
}
func (m *mockWitnessBeacon) LookupPreimage(payhash lntypes.Hash) (lntypes.Preimage, bool) {
preimage, ok := m.lookupPreimage[payhash]
if !ok {
return lntypes.Preimage{}, false
}
return preimage, true
}
func (m *mockWitnessBeacon) AddPreimages(preimages ...lntypes.Preimage) error {
@ -190,10 +202,7 @@ func TestHtlcTimeoutResolver(t *testing.T) {
spendChan: make(chan *chainntnfs.SpendDetail),
confChan: make(chan *chainntnfs.TxConfirmation),
}
witnessBeacon := &mockWitnessBeacon{
preImageUpdates: make(chan lntypes.Preimage, 1),
newPreimages: make(chan []lntypes.Preimage),
}
witnessBeacon := newMockWitnessBeacon()
for _, testCase := range testCases {
t.Logf("Running test case: %v", testCase.name)

@ -0,0 +1,29 @@
package contractcourt
import (
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/invoices"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
)
// Registry is an interface which represents the invoice registry.
type Registry interface {
// LookupInvoice attempts to look up an invoice according to its 32
// byte payment hash. This method should also reutrn the min final CLTV
// delta for this invoice. We'll use this to ensure that the HTLC
// extended to us gives us enough time to settle as we prescribe.
LookupInvoice(lntypes.Hash) (channeldb.Invoice, uint32, error)
// NotifyExitHopHtlc attempts to mark an invoice as settled. If the
// invoice is a debug invoice, then this method is a noop as debug
// invoices are never fully settled. The return value describes how the
// htlc should be resolved. If the htlc cannot be resolved immediately,
// the resolution is sent on the passed in hodlChan later.
NotifyExitHopHtlc(payHash lntypes.Hash, paidAmount lnwire.MilliSatoshi,
expiry uint32, currentHeight int32,
hodlChan chan<- interface{}) (*invoices.HodlEvent, error)
// HodlUnsubscribeAll unsubscribes from all hodl events.
HodlUnsubscribeAll(subscriber chan<- interface{})
}

@ -0,0 +1,45 @@
package contractcourt
import (
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/invoices"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
)
type notifyExitHopData struct {
payHash lntypes.Hash
paidAmount lnwire.MilliSatoshi
hodlChan chan<- interface{}
expiry uint32
currentHeight int32
}
type mockRegistry struct {
notifyChan chan notifyExitHopData
notifyErr error
notifyEvent *invoices.HodlEvent
}
func (r *mockRegistry) NotifyExitHopHtlc(payHash lntypes.Hash,
paidAmount lnwire.MilliSatoshi, expiry uint32, currentHeight int32,
hodlChan chan<- interface{}) (*invoices.HodlEvent, error) {
r.notifyChan <- notifyExitHopData{
hodlChan: hodlChan,
payHash: payHash,
paidAmount: paidAmount,
expiry: expiry,
currentHeight: currentHeight,
}
return r.notifyEvent, r.notifyErr
}
func (r *mockRegistry) HodlUnsubscribeAll(subscriber chan<- interface{}) {}
func (r *mockRegistry) LookupInvoice(lntypes.Hash) (channeldb.Invoice, uint32,
error) {
return channeldb.Invoice{}, 0, channeldb.ErrInvoiceNotFound
}

@ -0,0 +1,26 @@
package contractcourt
import (
"os"
"runtime/pprof"
"testing"
"time"
)
// timeout implements a test level timeout.
func timeout(t *testing.T) func() {
done := make(chan struct{})
go func() {
select {
case <-time.After(5 * time.Second):
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
panic("test timeout")
case <-done:
}
}()
return func() {
close(done)
}
}

@ -1712,7 +1712,7 @@ func (f *fundingManager) handleFundingSigned(fmsg *fundingSignedMsg) {
// Go on adding the channel to the channel graph, and crafting
// channel announcements.
lnChannel, err := lnwallet.NewLightningChannel(
nil, nil, completeChan, nil,
nil, completeChan, nil,
)
if err != nil {
fndgLog.Errorf("failed creating lnChannel: %v", err)
@ -2005,7 +2005,7 @@ func (f *fundingManager) handleFundingConfirmation(peer lnpeer.Peer,
// We create the state-machine object which wraps the database state.
lnChannel, err := lnwallet.NewLightningChannel(
nil, nil, completeChan, nil,
nil, completeChan, nil,
)
if err != nil {
return err

@ -24,6 +24,7 @@ type InvoiceDatabase interface {
// htlc should be resolved. If the htlc cannot be resolved immediately,
// the resolution is sent on the passed in hodlChan later.
NotifyExitHopHtlc(payHash lntypes.Hash, paidAmount lnwire.MilliSatoshi,
expiry uint32, currentHeight int32,
hodlChan chan<- interface{}) (*invoices.HodlEvent, error)
// CancelInvoice attempts to cancel the invoice corresponding to the

@ -235,13 +235,6 @@ type ChannelLinkConfig struct {
MinFeeUpdateTimeout time.Duration
MaxFeeUpdateTimeout time.Duration
// FinalCltvRejectDelta defines the number of blocks before the expiry
// of the htlc where we no longer settle it as an exit hop and instead
// cancel it back. Normally this value should be lower than the cltv
// expiry of any invoice we create and the code effectuating this should
// not be hit.
FinalCltvRejectDelta uint32
// OutgoingCltvRejectDelta defines the number of blocks before expiry of
// an htlc where we don't offer an htlc anymore. This should be at least
// the outgoing broadcast delta, because in any case we don't want to
@ -1173,15 +1166,12 @@ func (l *channelLink) processHodlEvent(hodlEvent invoices.HodlEvent,
htlcs ...hodlHtlc) error {
hash := hodlEvent.Hash
if hodlEvent.Preimage == nil {
l.debugf("Received hodl cancel event for %v", hash)
} else {
l.debugf("Received hodl settle event for %v", hash)
}
// Determine required action for the resolution.
var hodlAction func(htlc hodlHtlc) error
if hodlEvent.Preimage != nil {
l.debugf("Received hodl settle event for %v", hash)
hodlAction = func(htlc hodlHtlc) error {
return l.settleHTLC(
*hodlEvent.Preimage, htlc.pd.HtlcIndex,
@ -1189,10 +1179,30 @@ func (l *channelLink) processHodlEvent(hodlEvent invoices.HodlEvent,
)
}
} else {
l.debugf("Received hodl cancel event for %v, reason=%v",
hash, hodlEvent.CancelReason)
hodlAction = func(htlc hodlHtlc) error {
failure := lnwire.NewFailUnknownPaymentHash(
var failure lnwire.FailureMessage
switch hodlEvent.CancelReason {
case invoices.CancelAmountTooLow:
fallthrough
case invoices.CancelInvoiceUnknown:
fallthrough
case invoices.CancelInvoiceCanceled:
failure = lnwire.NewFailUnknownPaymentHash(
htlc.pd.Amount,
)
case invoices.CancelExpiryTooSoon:
failure = lnwire.FailFinalExpiryTooSoon{}
default:
return fmt.Errorf("unknown cancel reason: %v",
hodlEvent.CancelReason)
}
l.sendHTLCError(
htlc.pd.HtlcIndex, failure, htlc.obfuscator,
htlc.pd.SourceRef,
@ -2739,94 +2749,14 @@ func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor,
return false, nil
}
// First, we'll check the expiry of the HTLC itself against, the current
// block height. If the timeout is too soon, then we'll reject the HTLC.
if pd.Timeout <= heightNow+l.cfg.FinalCltvRejectDelta {
log.Errorf("htlc(%x) has an expiry that's too soon: expiry=%v"+
", best_height=%v", pd.RHash[:], pd.Timeout, heightNow)
failure := lnwire.NewFinalExpiryTooSoon()
l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef)
return true, nil
}
// We're the designated payment destination. Therefore we attempt to
// see if we have an invoice locally which'll allow us to settle this
// htlc.
//
// Only the immutable data from LookupInvoice is used, because otherwise
// a race condition may be created with concurrent writes to the invoice
// registry. For example: cancelation of an invoice.
invoiceHash := lntypes.Hash(pd.RHash)
invoice, minCltvDelta, err := l.cfg.Registry.LookupInvoice(invoiceHash)
if err != nil {
log.Errorf("unable to query invoice registry: %v", err)
failure := lnwire.NewFailUnknownPaymentHash(pd.Amount)
l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef)
return true, nil
}
// If the invoice is already settled, we choose to accept the payment to
// simplify failure recovery.
//
// NOTE: Though our recovery and forwarding logic is predominately
// batched, settling invoices happens iteratively. We may reject one of
// two payments for the same rhash at first, but then restart and reject
// both after seeing that the invoice has been settled. Without any
// record of which one settles first, it is ambiguous as to which one
// actually settled the invoice. Thus, by accepting all payments, we
// eliminate the race condition that can lead to this inconsistency.
//
// TODO(conner): track ownership of settlements to properly recover from
// failures? or add batch invoice settlement
//
// TODO(joostjager): The log statement below is not always accurate, as
// the invoice may have been canceled after the LookupInvoice call.
// Leaving it as is for now, because fixing this would involve changing
// the signature of InvoiceRegistry.SettleInvoice just because of this
// log statement.
if invoice.Terms.State == channeldb.ContractSettled {
log.Warnf("Accepting duplicate payment for hash=%x",
pd.RHash[:])
}
// If we're not currently in debug mode, and the extended htlc doesn't
// meet the value requested, then we'll fail the htlc. Otherwise, we
// settle this htlc within our local state update log, then send the
// update entry to the remote party.
//
// NOTE: We make an exception when the value requested by the invoice is
// zero. This means the invoice allows the payee to specify the amount
// of satoshis they wish to send. So since we expect the htlc to have a
// different amount, we should not fail.
if !l.cfg.DebugHTLC && invoice.Terms.Value > 0 &&
pd.Amount < invoice.Terms.Value {
log.Errorf("rejecting htlc due to incorrect amount: expected "+
"%v, received %v", invoice.Terms.Value, pd.Amount)
failure := lnwire.NewFailUnknownPaymentHash(pd.Amount)
l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef)
return true, nil
}
// As we're the exit hop, we'll double check the hop-payload included in
// the HTLC to ensure that it was crafted correctly by the sender and
// matches the HTLC we were extended.
//
// NOTE: We make an exception when the value requested by the invoice is
// zero. This means the invoice allows the payee to specify the amount
// of satoshis they wish to send. So since we expect the htlc to have a
// different amount, we should not fail.
if !l.cfg.DebugHTLC && invoice.Terms.Value > 0 &&
fwdInfo.AmountToForward < invoice.Terms.Value {
if !l.cfg.DebugHTLC && pd.Amount != fwdInfo.AmountToForward {
log.Errorf("Onion payload of incoming htlc(%x) has incorrect "+
"value: expected %v, got %v", pd.RHash,
invoice.Terms.Value, fwdInfo.AmountToForward)
pd.Amount, fwdInfo.AmountToForward)
failure := lnwire.NewFailUnknownPaymentHash(pd.Amount)
l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef)
@ -2835,27 +2765,11 @@ func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor,
}
// We'll also ensure that our time-lock value has been computed
// correctly. Only check the final cltv expiry for invoices when the
// invoice has not yet moved to the accepted state. Otherwise hodl htlcs
// would be canceled after a restart.
expectedHeight := heightNow + minCltvDelta
switch {
case !l.cfg.DebugHTLC &&
invoice.Terms.State == channeldb.ContractOpen &&
pd.Timeout < expectedHeight:
log.Errorf("Incoming htlc(%x) has an expiration that is too "+
"soon: expected at least %v, got %v",
pd.RHash[:], expectedHeight, pd.Timeout)
failure := lnwire.FailFinalExpiryTooSoon{}
l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef)
return true, nil
case !l.cfg.DebugHTLC && pd.Timeout != fwdInfo.OutgoingCTLV:
log.Errorf("HTLC(%x) has incorrect time-lock: expected %v, "+
"got %v", pd.RHash[:], pd.Timeout, fwdInfo.OutgoingCTLV)
// correctly.
if !l.cfg.DebugHTLC && pd.Timeout != fwdInfo.OutgoingCTLV {
log.Errorf("Onion payload of incoming htlc(%x) has incorrect "+
"time-lock: expected %v, got %v",
pd.RHash[:], pd.Timeout, fwdInfo.OutgoingCTLV)
failure := lnwire.NewFinalIncorrectCltvExpiry(
fwdInfo.OutgoingCTLV,
@ -2868,10 +2782,27 @@ func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor,
// Notify the invoiceRegistry of the exit hop htlc. If we crash right
// after this, this code will be re-executed after restart. We will
// receive back a resolution event.
invoiceHash := lntypes.Hash(pd.RHash)
event, err := l.cfg.Registry.NotifyExitHopHtlc(
invoiceHash, pd.Amount, l.hodlQueue.ChanIn(),
invoiceHash, pd.Amount, pd.Timeout, int32(heightNow),
l.hodlQueue.ChanIn(),
)
if err != nil {
switch err {
// Cancel htlc if we don't have an invoice for it.
case channeldb.ErrInvoiceNotFound:
failure := lnwire.NewFailUnknownPaymentHash(pd.Amount)
l.sendHTLCError(pd.HtlcIndex, failure, obfuscator, pd.SourceRef)
return true, nil
// No error.
case nil:
// Pass error to caller.
default:
return false, err
}

@ -180,7 +180,7 @@ func TestChannelLinkSingleHopPayment(t *testing.T) {
t.Parallel()
// Setup a alice-bob network.
aliceChannel, bobChannel, cleanUp, err := createTwoClusterChannels(
alice, bob, cleanUp, err := createTwoClusterChannels(
btcutil.SatoshiPerBitcoin*3,
btcutil.SatoshiPerBitcoin*5)
if err != nil {
@ -188,7 +188,9 @@ func TestChannelLinkSingleHopPayment(t *testing.T) {
}
defer cleanUp()
n := newTwoHopNetwork(t, aliceChannel, bobChannel, testStartingHeight)
n := newTwoHopNetwork(
t, alice.channel, bob.channel, testStartingHeight,
)
if err := n.start(); err != nil {
t.Fatal(err)
}
@ -1592,7 +1594,7 @@ func (m *mockPeer) Address() net.Addr {
func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) (
ChannelLink, *lnwallet.LightningChannel, chan time.Time, func() error,
func(), chanRestoreFunc, error) {
func(), func() (*lnwallet.LightningChannel, error), error) {
var chanIDBytes [8]byte
if _, err := io.ReadFull(rand.Reader, chanIDBytes[:]); err != nil {
@ -1602,7 +1604,7 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) (
chanID := lnwire.NewShortChanIDFromInt(
binary.BigEndian.Uint64(chanIDBytes[:]))
aliceChannel, bobChannel, fCleanUp, restore, err := createTestChannel(
aliceLc, bobLc, fCleanUp, err := createTestChannel(
alicePrivKey, bobPrivKey, chanAmt, chanAmt,
chanReserve, chanReserve, chanID,
)
@ -1628,7 +1630,7 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) (
pCache := newMockPreimageCache()
aliceDb := aliceChannel.State().Db
aliceDb := aliceLc.channel.State().Db
aliceSwitch, err := initSwitchWithDB(testStartingHeight, aliceDb)
if err != nil {
return nil, nil, nil, nil, nil, nil, err
@ -1668,7 +1670,7 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) (
}
const startingHeight = 100
aliceLink := NewChannelLink(aliceCfg, aliceChannel)
aliceLink := NewChannelLink(aliceCfg, aliceLc.channel)
start := func() error {
return aliceSwitch.AddLink(aliceLink)
}
@ -1687,7 +1689,8 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) (
defer fCleanUp()
}
return aliceLink, bobChannel, bticker.Force, start, cleanUp, restore, nil
return aliceLink, bobLc.channel, bticker.Force, start, cleanUp,
aliceLc.restore, nil
}
func assertLinkBandwidth(t *testing.T, link ChannelLink,
@ -2546,7 +2549,9 @@ func TestChannelLinkTrimCircuitsPending(t *testing.T) {
t.Fatalf("unable to start test harness: %v", err)
}
alice := newPersistentLinkHarness(t, aliceLink, batchTicker, restore)
alice := newPersistentLinkHarness(
t, aliceLink, batchTicker, restore,
)
// Compute the static fees that will be used to determine the
// correctness of Alice's bandwidth when forwarding HTLCs.
@ -2818,7 +2823,9 @@ func TestChannelLinkTrimCircuitsNoCommit(t *testing.T) {
t.Fatalf("unable to start test harness: %v", err)
}
alice := newPersistentLinkHarness(t, aliceLink, batchTicker, restore)
alice := newPersistentLinkHarness(
t, aliceLink, batchTicker, restore,
)
// We'll put Alice into hodl.Commit mode, such that the circuits for any
// outgoing ADDs are opened, but the changes are not committed in the
@ -3980,14 +3987,15 @@ type persistentLinkHarness struct {
batchTicker chan time.Time
msgs chan lnwire.Message
restoreChan chanRestoreFunc
restoreChan func() (*lnwallet.LightningChannel, error)
}
// newPersistentLinkHarness initializes a new persistentLinkHarness and derives
// the supporting references from the active link.
func newPersistentLinkHarness(t *testing.T, link ChannelLink,
batchTicker chan time.Time,
restore chanRestoreFunc) *persistentLinkHarness {
restore func() (*lnwallet.LightningChannel,
error)) *persistentLinkHarness {
coreLink := link.(*channelLink)
@ -4018,23 +4026,17 @@ func (h *persistentLinkHarness) restart(restartSwitch bool,
// First, remove the link from the switch.
h.coreLink.cfg.Switch.RemoveLink(h.link.ChanID())
var htlcSwitch *Switch
if restartSwitch {
// If a switch restart is requested, we will stop it and
// leave htlcSwitch nil, which will trigger the creation
// of a fresh instance in restartLink.
// If a switch restart is requested, we will stop it. It will be
// reinstantiated in restartLink.
h.coreLink.cfg.Switch.Stop()
} else {
// Otherwise, we capture the switch's reference so that
// it can be carried over to the restarted link.
htlcSwitch = h.coreLink.cfg.Switch
}
// Since our in-memory state may have diverged from our persistent
// state, we will restore the persisted state to ensure we always start
// the link in a consistent state.
var err error
h.channel, _, err = h.restoreChan()
h.channel, err = h.restoreChan()
if err != nil {
h.t.Fatalf("unable to restore channels: %v", err)
}
@ -4043,8 +4045,8 @@ func (h *persistentLinkHarness) restart(restartSwitch bool,
// adding the link to an existing switch, or creating a new one using
// the database owned by the link.
var cleanUp func()
h.link, h.batchTicker, cleanUp, err = restartLink(
h.channel, htlcSwitch, hodlFlags,
h.link, h.batchTicker, cleanUp, err = h.restartLink(
h.channel, restartSwitch, hodlFlags,
)
if err != nil {
h.t.Fatalf("unable to restart alicelink: %v", err)
@ -4120,8 +4122,10 @@ func (h *persistentLinkHarness) trySignNextCommitment() {
// restartLink creates a new channel link from the given channel state, and adds
// to an htlcswitch. If none is provided by the caller, a new one will be
// created using Alice's database.
func restartLink(aliceChannel *lnwallet.LightningChannel, aliceSwitch *Switch,
hodlFlags []hodl.Flag) (ChannelLink, chan time.Time, func(), error) {
func (h *persistentLinkHarness) restartLink(
aliceChannel *lnwallet.LightningChannel, restartSwitch bool,
hodlFlags []hodl.Flag) (
ChannelLink, chan time.Time, func(), error) {
var (
decoder = newMockIteratorDecoder()
@ -4137,14 +4141,12 @@ func restartLink(aliceChannel *lnwallet.LightningChannel, aliceSwitch *Switch,
TimeLockDelta: 6,
}
invoiceRegistry = newMockRegistry(globalPolicy.TimeLockDelta)
pCache = newMockPreimageCache()
)
aliceDb := aliceChannel.State().Db
if aliceSwitch == nil {
aliceSwitch := h.coreLink.cfg.Switch
if restartSwitch {
var err error
aliceSwitch, err = initSwitchWithDB(testStartingHeight, aliceDb)
if err != nil {
@ -4174,7 +4176,7 @@ func restartLink(aliceChannel *lnwallet.LightningChannel, aliceSwitch *Switch,
UpdateContractSignals: func(*contractcourt.ContractSignals) error {
return nil
},
Registry: invoiceRegistry,
Registry: h.coreLink.cfg.Registry,
ChainEvents: &contractcourt.ChainEventSubscription{},
BatchTicker: bticker,
FwdPkgGCTicker: ticker.New(5 * time.Second),
@ -4239,12 +4241,13 @@ func generateHtlcAndInvoice(t *testing.T,
t.Helper()
htlcAmt := lnwire.NewMSatFromSatoshis(10000)
htlcExpiry := testStartingHeight + testInvoiceCltvExpiry
hops := []ForwardingInfo{
{
Network: BitcoinHop,
NextHop: exitHop,
AmountToForward: htlcAmt,
OutgoingCTLV: 144,
OutgoingCTLV: uint32(htlcExpiry),
},
}
blob, err := generateRoute(hops...)
@ -4252,8 +4255,9 @@ func generateHtlcAndInvoice(t *testing.T,
t.Fatalf("unable to generate route: %v", err)
}
invoice, htlc, err := generatePayment(htlcAmt, htlcAmt, 144,
blob)
invoice, htlc, err := generatePayment(
htlcAmt, htlcAmt, uint32(htlcExpiry), blob,
)
if err != nil {
t.Fatalf("unable to create payment: %v", err)
}
@ -5573,7 +5577,7 @@ func TestChannelLinkCanceledInvoice(t *testing.T) {
t.Parallel()
// Setup a alice-bob network.
aliceChannel, bobChannel, cleanUp, err := createTwoClusterChannels(
alice, bob, cleanUp, err := createTwoClusterChannels(
btcutil.SatoshiPerBitcoin*3,
btcutil.SatoshiPerBitcoin*5)
if err != nil {
@ -5581,7 +5585,7 @@ func TestChannelLinkCanceledInvoice(t *testing.T) {
}
defer cleanUp()
n := newTwoHopNetwork(t, aliceChannel, bobChannel, testStartingHeight)
n := newTwoHopNetwork(t, alice.channel, bob.channel, testStartingHeight)
if err := n.start(); err != nil {
t.Fatal(err)
}
@ -5633,12 +5637,14 @@ type hodlInvoiceTestCtx struct {
amount lnwire.MilliSatoshi
errChan chan error
restoreBob func() (*lnwallet.LightningChannel, error)
cleanUp func()
}
func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, error) {
// Setup a alice-bob network.
aliceChannel, bobChannel, cleanUp, err := createTwoClusterChannels(
alice, bob, cleanUp, err := createTwoClusterChannels(
btcutil.SatoshiPerBitcoin*3,
btcutil.SatoshiPerBitcoin*5,
)
@ -5646,7 +5652,7 @@ func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, error) {
t.Fatalf("unable to create channel: %v", err)
}
n := newTwoHopNetwork(t, aliceChannel, bobChannel, testStartingHeight)
n := newTwoHopNetwork(t, alice.channel, bob.channel, testStartingHeight)
if err := n.start(); err != nil {
t.Fatal(err)
}
@ -5712,6 +5718,7 @@ func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, error) {
hash: hash,
amount: amount,
errChan: errChan,
restoreBob: bob.restore,
cleanUp: func() {
cleanUp()
@ -5724,6 +5731,8 @@ func newHodlInvoiceTestCtx(t *testing.T) (*hodlInvoiceTestCtx, error) {
func TestChannelLinkHoldInvoiceSettle(t *testing.T) {
t.Parallel()
defer timeout(t)()
ctx, err := newHodlInvoiceTestCtx(t)
if err != nil {
t.Fatal(err)
@ -5736,14 +5745,10 @@ func TestChannelLinkHoldInvoiceSettle(t *testing.T) {
}
// Wait for payment to succeed.
select {
case err := <-ctx.errChan:
err = <-ctx.errChan
if err != nil {
t.Fatal(err)
}
case <-time.After(5 * time.Second):
t.Fatal("timeout")
}
// Wait for Bob to receive the revocation.
if ctx.startBandwidthAlice-ctx.amount !=
@ -5766,6 +5771,8 @@ func TestChannelLinkHoldInvoiceSettle(t *testing.T) {
func TestChannelLinkHoldInvoiceCancel(t *testing.T) {
t.Parallel()
defer timeout(t)()
ctx, err := newHodlInvoiceTestCtx(t)
if err != nil {
t.Fatal(err)
@ -5778,15 +5785,102 @@ func TestChannelLinkHoldInvoiceCancel(t *testing.T) {
}
// Wait for payment to succeed.
select {
case err := <-ctx.errChan:
err = <-ctx.errChan
if !strings.Contains(err.Error(),
lnwire.CodeUnknownPaymentHash.String()) {
t.Fatal("expected unknown payment hash")
}
case <-time.After(5 * time.Second):
t.Fatal("timeout")
}
// TestChannelLinkHoldInvoiceRestart asserts hodl htlcs are held after blocks
// are mined and the link is restarted. The initial expiry checks should not
// apply to hodl htlcs after restart.
func TestChannelLinkHoldInvoiceRestart(t *testing.T) {
t.Parallel()
defer timeout(t)()
const (
chanAmt = btcutil.SatoshiPerBitcoin * 5
)
// We'll start by creating a new link with our chanAmt (5 BTC). We will
// only be testing Alice's behavior, so the reference to Bob's channel
// state is unnecessary.
aliceLink, bobChannel, _, start, cleanUp, restore, err :=
newSingleLinkTestHarness(chanAmt, 0)
if err != nil {
t.Fatalf("unable to create link: %v", err)
}
defer cleanUp()
alice := newPersistentLinkHarness(
t, aliceLink, nil, restore,
)
if err := start(); err != nil {
t.Fatalf("unable to start test harness: %v", err)
}
var (
coreLink = alice.coreLink
registry = coreLink.cfg.Registry.(*mockInvoiceRegistry)
)
registry.settleChan = make(chan lntypes.Hash)
htlc, invoice := generateHtlcAndInvoice(t, 0)
// Convert into a hodl invoice and save the preimage for later.
preimage := invoice.Terms.PaymentPreimage
invoice.Terms.PaymentPreimage = channeldb.UnknownPreimage
// We must add the invoice to the registry, such that Alice
// expects this payment.
err = registry.AddInvoice(
*invoice, htlc.PaymentHash,
)
if err != nil {
t.Fatalf("unable to add invoice to registry: %v", err)
}
// Lock in htlc paying the hodl invoice.
sendHtlcBobToAlice(t, alice.link, bobChannel, htlc)
sendCommitSigBobToAlice(t, alice.link, bobChannel, 1)
receiveRevAndAckAliceToBob(t, alice.msgs, alice.link, bobChannel)
receiveCommitSigAliceToBob(t, alice.msgs, alice.link, bobChannel, 1)
sendRevAndAckBobToAlice(t, alice.link, bobChannel)
// We expect a call to the invoice registry to notify the arrival of the
// htlc.
<-registry.settleChan
// Increase block height. This height will be retrieved by the link
// after restart.
coreLink.cfg.Switch.bestHeight++
// Restart link.
alice.restart(false)
// Expect htlc to be reprocessed.
<-registry.settleChan
// Settle the invoice with the preimage.
registry.SettleHodlInvoice(preimage)
// Expect alice to send a settle and commitsig message to bob.
receiveSettleAliceToBob(t, alice.msgs, alice.link, bobChannel)
receiveCommitSigAliceToBob(t, alice.msgs, alice.link, bobChannel, 0)
// Stop the link
alice.link.Stop()
// Check that no unexpected messages were sent.
select {
case msg := <-alice.msgs:
t.Fatalf("did not expect message %T", msg)
default:
}
}

@ -766,7 +766,9 @@ func newMockRegistry(minDelta uint32) *mockInvoiceRegistry {
return testInvoiceCltvExpiry, nil
}
registry := invoices.NewRegistry(cdb, decodeExpiry)
finalCltvRejectDelta := int32(5)
registry := invoices.NewRegistry(cdb, decodeExpiry, finalCltvRejectDelta)
registry.Start()
return &mockInvoiceRegistry{
@ -784,10 +786,12 @@ func (i *mockInvoiceRegistry) SettleHodlInvoice(preimage lntypes.Preimage) error
}
func (i *mockInvoiceRegistry) NotifyExitHopHtlc(rhash lntypes.Hash,
amt lnwire.MilliSatoshi, hodlChan chan<- interface{}) (
*invoices.HodlEvent, error) {
amt lnwire.MilliSatoshi, expiry uint32, currentHeight int32,
hodlChan chan<- interface{}) (*invoices.HodlEvent, error) {
event, err := i.registry.NotifyExitHopHtlc(rhash, amt, hodlChan)
event, err := i.registry.NotifyExitHopHtlc(
rhash, amt, expiry, currentHeight, hodlChan,
)
if err != nil {
return nil, err
}

@ -11,6 +11,7 @@ import (
"net"
"os"
"runtime"
"runtime/pprof"
"sync/atomic"
"testing"
"time"
@ -91,6 +92,8 @@ var (
},
LockTime: 5,
}
testBatchTimeout = 50 * time.Millisecond
)
var idSeqNum uint64
@ -147,15 +150,19 @@ func generateRandomBytes(n int) ([]byte, error) {
return b, nil
}
type testLightningChannel struct {
channel *lnwallet.LightningChannel
restore func() (*lnwallet.LightningChannel, error)
}
// createTestChannel creates the channel and returns our and remote channels
// representations.
//
// TODO(roasbeef): need to factor out, similar func re-used in many parts of codebase
func createTestChannel(alicePrivKey, bobPrivKey []byte,
aliceAmount, bobAmount, aliceReserve, bobReserve btcutil.Amount,
chanID lnwire.ShortChannelID) (*lnwallet.LightningChannel, *lnwallet.LightningChannel, func(),
func() (*lnwallet.LightningChannel, *lnwallet.LightningChannel,
error), error) {
chanID lnwire.ShortChannelID) (*testLightningChannel,
*testLightningChannel, func(), error) {
aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), alicePrivKey)
bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(btcec.S256(), bobPrivKey)
@ -187,7 +194,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
var hash [sha256.Size]byte
randomSeed, err := generateRandomBytes(sha256.Size)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
copy(hash[:], randomSeed)
@ -236,23 +243,23 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize())
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot)
bobFirstRevoke, err := bobPreimageProducer.AtIndex(0)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:])
aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize())
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot)
aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:])
@ -260,25 +267,25 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
bobAmount, &aliceCfg, &bobCfg, aliceCommitPoint, bobCommitPoint,
*fundingTxIn)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
alicePath, err := ioutil.TempDir("", "alicedb")
dbAlice, err := channeldb.Open(alicePath)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
bobPath, err := ioutil.TempDir("", "bobdb")
dbBob, err := channeldb.Open(bobPath)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
estimator := lnwallet.NewStaticFeeEstimator(6000, 0)
feePerKw, err := estimator.EstimateFeePerKW(1)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
commitFee := feePerKw.FeeForWeight(724)
@ -350,11 +357,11 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
}
if err := aliceChannelState.SyncPending(bobAddr, broadcastHeight); err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
if err := bobChannelState.SyncPending(aliceAddr, broadcastHeight); err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
cleanUpFunc := func() {
@ -367,23 +374,21 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
aliceSigner := &mockSigner{aliceKeyPriv}
bobSigner := &mockSigner{bobKeyPriv}
pCache := newMockPreimageCache()
alicePool := lnwallet.NewSigPool(runtime.NumCPU(), aliceSigner)
channelAlice, err := lnwallet.NewLightningChannel(
aliceSigner, pCache, aliceChannelState, alicePool,
aliceSigner, aliceChannelState, alicePool,
)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
alicePool.Start()
bobPool := lnwallet.NewSigPool(runtime.NumCPU(), bobSigner)
channelBob, err := lnwallet.NewLightningChannel(
bobSigner, pCache, bobChannelState, bobPool,
bobSigner, bobChannelState, bobPool,
)
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
bobPool.Start()
@ -391,40 +396,38 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
// having Alice and Bob extend their revocation windows to each other.
aliceNextRevoke, err := channelAlice.NextRevocationKey()
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
if err := channelBob.InitNextRevocation(aliceNextRevoke); err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
bobNextRevoke, err := channelBob.NextRevocationKey()
if err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
if err := channelAlice.InitNextRevocation(bobNextRevoke); err != nil {
return nil, nil, nil, nil, err
return nil, nil, nil, err
}
restore := func() (*lnwallet.LightningChannel, *lnwallet.LightningChannel,
error) {
restoreAlice := func() (*lnwallet.LightningChannel, error) {
aliceStoredChannels, err := dbAlice.FetchOpenChannels(aliceKeyPub)
switch err {
case nil:
case bbolt.ErrDatabaseNotOpen:
dbAlice, err = channeldb.Open(dbAlice.Path())
if err != nil {
return nil, nil, errors.Errorf("unable to reopen alice "+
return nil, errors.Errorf("unable to reopen alice "+
"db: %v", err)
}
aliceStoredChannels, err = dbAlice.FetchOpenChannels(aliceKeyPub)
if err != nil {
return nil, nil, errors.Errorf("unable to fetch alice "+
return nil, errors.Errorf("unable to fetch alice "+
"channel: %v", err)
}
default:
return nil, nil, errors.Errorf("unable to fetch alice channel: "+
return nil, errors.Errorf("unable to fetch alice channel: "+
"%v", err)
}
@ -437,34 +440,38 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
}
if aliceStoredChannel == nil {
return nil, nil, errors.New("unable to find stored alice channel")
return nil, errors.New("unable to find stored alice channel")
}
newAliceChannel, err := lnwallet.NewLightningChannel(
aliceSigner, nil, aliceStoredChannel, alicePool,
aliceSigner, aliceStoredChannel, alicePool,
)
if err != nil {
return nil, nil, errors.Errorf("unable to create new channel: %v",
return nil, errors.Errorf("unable to create new channel: %v",
err)
}
return newAliceChannel, nil
}
restoreBob := func() (*lnwallet.LightningChannel, error) {
bobStoredChannels, err := dbBob.FetchOpenChannels(bobKeyPub)
switch err {
case nil:
case bbolt.ErrDatabaseNotOpen:
dbBob, err = channeldb.Open(dbBob.Path())
if err != nil {
return nil, nil, errors.Errorf("unable to reopen bob "+
return nil, errors.Errorf("unable to reopen bob "+
"db: %v", err)
}
bobStoredChannels, err = dbBob.FetchOpenChannels(bobKeyPub)
if err != nil {
return nil, nil, errors.Errorf("unable to fetch bob "+
return nil, errors.Errorf("unable to fetch bob "+
"channel: %v", err)
}
default:
return nil, nil, errors.Errorf("unable to fetch bob channel: "+
return nil, errors.Errorf("unable to fetch bob channel: "+
"%v", err)
}
@ -477,20 +484,31 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
}
if bobStoredChannel == nil {
return nil, nil, errors.New("unable to find stored bob channel")
return nil, errors.New("unable to find stored bob channel")
}
newBobChannel, err := lnwallet.NewLightningChannel(
bobSigner, nil, bobStoredChannel, bobPool,
bobSigner, bobStoredChannel, bobPool,
)
if err != nil {
return nil, nil, errors.Errorf("unable to create new channel: %v",
return nil, errors.Errorf("unable to create new channel: %v",
err)
}
return newAliceChannel, newBobChannel, nil
return newBobChannel, nil
}
return channelAlice, channelBob, cleanUpFunc, restore, nil
testLightningChannelAlice := &testLightningChannel{
channel: channelAlice,
restore: restoreAlice,
}
testLightningChannelBob := &testLightningChannel{
channel: channelBob,
restore: restoreBob,
}
return testLightningChannelAlice, testLightningChannelBob, cleanUpFunc,
nil
}
// getChanID retrieves the channel point from an lnnwire message.
@ -827,7 +845,7 @@ func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) (
_, _, firstChanID, secondChanID := genIDs()
// Create lightning channels between Alice<->Bob and Bob<->Carol
aliceChannel, firstBobChannel, cleanAliceBob, restoreAliceBob, err :=
aliceChannel, firstBobChannel, cleanAliceBob, err :=
createTestChannel(alicePrivKey, bobPrivKey, aliceToBob,
aliceToBob, 0, 0, firstChanID)
if err != nil {
@ -835,7 +853,7 @@ func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) (
"alice<->bob channel: %v", err)
}
secondBobChannel, carolChannel, cleanBobCarol, restoreBobCarol, err :=
secondBobChannel, carolChannel, cleanBobCarol, err :=
createTestChannel(bobPrivKey, carolPrivKey, bobToCarol,
bobToCarol, 0, 0, secondChanID)
if err != nil {
@ -850,12 +868,23 @@ func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) (
}
restoreFromDb := func() (*clusterChannels, error) {
a2b, b2a, err := restoreAliceBob()
a2b, err := aliceChannel.restore()
if err != nil {
return nil, err
}
b2c, c2b, err := restoreBobCarol()
b2a, err := firstBobChannel.restore()
if err != nil {
return nil, err
}
b2c, err := secondBobChannel.restore()
if err != nil {
return nil, err
}
c2b, err := carolChannel.restore()
if err != nil {
return nil, err
}
@ -869,10 +898,10 @@ func createClusterChannels(aliceToBob, bobToCarol btcutil.Amount) (
}
return &clusterChannels{
aliceToBob: aliceChannel,
bobToAlice: firstBobChannel,
bobToCarol: secondBobChannel,
carolToBob: carolChannel,
aliceToBob: aliceChannel.channel,
bobToAlice: firstBobChannel.channel,
bobToCarol: secondBobChannel.channel,
carolToBob: carolChannel.channel,
}, cleanUp, restoreFromDb, nil
}
@ -971,13 +1000,13 @@ func newThreeHopNetwork(t testing.TB, aliceChannel, firstBobChannel,
// createTwoClusterChannels creates lightning channels which are needed for
// a 2 hop network cluster to be initialized.
func createTwoClusterChannels(aliceToBob, bobToCarol btcutil.Amount) (
*lnwallet.LightningChannel, *lnwallet.LightningChannel,
*testLightningChannel, *testLightningChannel,
func(), error) {
_, _, firstChanID, _ := genIDs()
// Create lightning channels between Alice<->Bob and Bob<->Carol
aliceChannel, firstBobChannel, cleanAliceBob, _, err :=
alice, bob, cleanAliceBob, err :=
createTestChannel(alicePrivKey, bobPrivKey, aliceToBob,
aliceToBob, 0, 0, firstChanID)
if err != nil {
@ -985,7 +1014,7 @@ func createTwoClusterChannels(aliceToBob, bobToCarol btcutil.Amount) (
"alice<->bob channel: %v", err)
}
return aliceChannel, firstBobChannel, cleanAliceBob, nil
return alice, bob, cleanAliceBob, nil
}
// hopNetwork is the base struct for two and three hop networks
@ -1025,7 +1054,6 @@ func (h *hopNetwork) createChannelLink(server, peer *mockServer,
decoder *mockIteratorDecoder) (ChannelLink, error) {
const (
batchTimeout = 50 * time.Millisecond
fwdPkgTimeout = 15 * time.Second
minFeeUpdateTimeout = 30 * time.Minute
maxFeeUpdateTimeout = 40 * time.Minute
@ -1053,12 +1081,11 @@ func (h *hopNetwork) createChannelLink(server, peer *mockServer,
ChainEvents: &contractcourt.ChainEventSubscription{},
SyncStates: true,
BatchSize: 10,
BatchTicker: ticker.NewForce(batchTimeout),
BatchTicker: ticker.NewForce(testBatchTimeout),
FwdPkgGCTicker: ticker.NewForce(fwdPkgTimeout),
MinFeeUpdateTimeout: minFeeUpdateTimeout,
MaxFeeUpdateTimeout: maxFeeUpdateTimeout,
OnChannelFailure: func(lnwire.ChannelID, lnwire.ShortChannelID, LinkFailureError) {},
FinalCltvRejectDelta: 5,
OutgoingCltvRejectDelta: 3,
},
channel,
@ -1231,3 +1258,21 @@ func (n *twoHopNetwork) makeHoldPayment(sendingPeer, receivingPeer lnpeer.Peer,
return paymentErr
}
// timeout implements a test level timeout.
func timeout(t *testing.T) func() {
done := make(chan struct{})
go func() {
select {
case <-time.After(5 * time.Second):
pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
panic("test timeout")
case <-done:
}
}()
return func() {
close(done)
}
}

@ -2,6 +2,7 @@ package invoices
import (
"bytes"
"errors"
"fmt"
"sync"
"sync/atomic"
@ -27,12 +28,64 @@ var (
DebugHash = DebugPre.Hash()
)
// HtlcCancelReason defines reasons for which htlcs can be canceled.
type HtlcCancelReason uint8
const (
// CancelInvoiceUnknown is returned if the preimage is unknown.
CancelInvoiceUnknown HtlcCancelReason = iota
// CancelExpiryTooSoon is returned when the timelock of the htlc
// does not satisfy the invoice cltv expiry requirement.
CancelExpiryTooSoon
// CancelInvoiceCanceled is returned when the invoice is already
// canceled and can't be paid to anymore.
CancelInvoiceCanceled
// CancelAmountTooLow is returned when the amount paid is too low.
CancelAmountTooLow
)
// String returns a human readable identifier for the cancel reason.
func (r HtlcCancelReason) String() string {
switch r {
case CancelInvoiceUnknown:
return "InvoiceUnknown"
case CancelExpiryTooSoon:
return "ExpiryTooSoon"
case CancelInvoiceCanceled:
return "InvoiceCanceled"
case CancelAmountTooLow:
return "CancelAmountTooLow"
default:
return "Unknown"
}
}
var (
// ErrInvoiceExpiryTooSoon is returned when an invoice is attempted to be
// accepted or settled with not enough blocks remaining.
ErrInvoiceExpiryTooSoon = errors.New("invoice expiry too soon")
// ErrInvoiceAmountTooLow is returned when an invoice is attempted to be
// accepted or settled with an amount that is too low.
ErrInvoiceAmountTooLow = errors.New("paid amount less than invoice amount")
)
// HodlEvent describes how an htlc should be resolved. If HodlEvent.Preimage is
// set, the event indicates a settle event. If Preimage is nil, it is a cancel
// event.
type HodlEvent struct {
// Preimage is the htlc preimage. Its value is nil in case of a cancel.
Preimage *lntypes.Preimage
// Hash is the htlc hash.
Hash lntypes.Hash
// CancelReason specifies the reason why invoice registry decided to
// cancel the htlc.
CancelReason HtlcCancelReason
}
// InvoiceRegistry is a central registry of all the outstanding invoices
@ -70,6 +123,13 @@ type InvoiceRegistry struct {
// is used to unsubscribe from all hashes efficiently.
hodlReverseSubscriptions map[chan<- interface{}]map[lntypes.Hash]struct{}
// finalCltvRejectDelta defines the number of blocks before the expiry
// of the htlc where we no longer settle it as an exit hop and instead
// cancel it back. Normally this value should be lower than the cltv
// expiry of any invoice we create and the code effectuating this should
// not be hit.
finalCltvRejectDelta int32
wg sync.WaitGroup
quit chan struct{}
}
@ -79,7 +139,7 @@ type InvoiceRegistry struct {
// layer. The in-memory layer is in place such that debug invoices can be added
// which are volatile yet available system wide within the daemon.
func NewRegistry(cdb *channeldb.DB, decodeFinalCltvExpiry func(invoice string) (
uint32, error)) *InvoiceRegistry {
uint32, error), finalCltvRejectDelta int32) *InvoiceRegistry {
return &InvoiceRegistry{
cdb: cdb,
@ -93,6 +153,7 @@ func NewRegistry(cdb *channeldb.DB, decodeFinalCltvExpiry func(invoice string) (
hodlSubscriptions: make(map[lntypes.Hash]map[chan<- interface{}]struct{}),
hodlReverseSubscriptions: make(map[chan<- interface{}]map[lntypes.Hash]struct{}),
decodeFinalCltvExpiry: decodeFinalCltvExpiry,
finalCltvRejectDelta: finalCltvRejectDelta,
quit: make(chan struct{}),
}
}
@ -460,6 +521,35 @@ func (i *InvoiceRegistry) LookupInvoice(rHash lntypes.Hash) (channeldb.Invoice,
return invoice, expiry, nil
}
// checkHtlcParameters is a callback used inside invoice db transactions to
// atomically check-and-update an invoice.
func (i *InvoiceRegistry) checkHtlcParameters(invoice *channeldb.Invoice,
amtPaid lnwire.MilliSatoshi, htlcExpiry uint32, currentHeight int32) error {
expiry, err := i.decodeFinalCltvExpiry(string(invoice.PaymentRequest))
if err != nil {
return err
}
if htlcExpiry < uint32(currentHeight+i.finalCltvRejectDelta) {
return ErrInvoiceExpiryTooSoon
}
if htlcExpiry < uint32(currentHeight)+expiry {
return ErrInvoiceExpiryTooSoon
}
// If an invoice amount is specified, check that enough is paid. This
// check is only performed for open invoices. Once a sufficiently large
// payment has been made and the invoice is in the accepted or settled
// state, any amount will be accepted on top of that.
if invoice.Terms.Value > 0 && amtPaid < invoice.Terms.Value {
return ErrInvoiceAmountTooLow
}
return nil
}
// NotifyExitHopHtlc attempts to mark an invoice as settled. If the invoice is a
// debug invoice, then this method is a noop as debug invoices are never fully
// settled. The return value describes how the htlc should be resolved.
@ -472,59 +562,112 @@ func (i *InvoiceRegistry) LookupInvoice(rHash lntypes.Hash) (channeldb.Invoice,
// the channel is either buffered or received on from another goroutine to
// prevent deadlock.
func (i *InvoiceRegistry) NotifyExitHopHtlc(rHash lntypes.Hash,
amtPaid lnwire.MilliSatoshi, hodlChan chan<- interface{}) (
*HodlEvent, error) {
amtPaid lnwire.MilliSatoshi, expiry uint32, currentHeight int32,
hodlChan chan<- interface{}) (*HodlEvent, error) {
i.Lock()
defer i.Unlock()
log.Debugf("Invoice(%x): htlc accepted", rHash[:])
createEvent := func(preimage *lntypes.Preimage) *HodlEvent {
return &HodlEvent{
Hash: rHash,
Preimage: preimage,
debugLog := func(s string) {
log.Debugf("Invoice(%x): %v, amt=%v, expiry=%v",
rHash[:], s, amtPaid, expiry)
}
}
// First check the in-memory debug invoice index to see if this is an
// existing invoice added for debugging.
if invoice, ok := i.debugInvoices[rHash]; ok {
debugLog("payment to debug invoice accepted")
// Debug invoices are never fully settled, so we just settle the
// htlc in this case.
return createEvent(&invoice.Terms.PaymentPreimage), nil
return &HodlEvent{
Hash: rHash,
Preimage: &invoice.Terms.PaymentPreimage,
}, nil
}
// If this isn't a debug invoice, then we'll attempt to settle an
// invoice matching this rHash on disk (if one exists).
invoice, err := i.cdb.AcceptOrSettleInvoice(rHash, amtPaid)
invoice, err := i.cdb.AcceptOrSettleInvoice(
rHash, amtPaid,
func(inv *channeldb.Invoice) error {
return i.checkHtlcParameters(
inv, amtPaid, expiry, currentHeight,
)
},
)
switch err {
// If invoice is already settled, settle htlc. This means we accept more
// payments to the same invoice hash.
//
// NOTE: Though our recovery and forwarding logic is predominately
// batched, settling invoices happens iteratively. We may reject one of
// two payments for the same rhash at first, but then restart and reject
// both after seeing that the invoice has been settled. Without any
// record of which one settles first, it is ambiguous as to which one
// actually settled the invoice. Thus, by accepting all payments, we
// eliminate the race condition that can lead to this inconsistency.
//
// TODO(conner): track ownership of settlements to properly recover from
// failures? or add batch invoice settlement
case channeldb.ErrInvoiceAlreadySettled:
return createEvent(&invoice.Terms.PaymentPreimage), nil
debugLog("accepting duplicate payment to settled invoice")
return &HodlEvent{
Hash: rHash,
Preimage: &invoice.Terms.PaymentPreimage,
}, nil
// If invoice is already canceled, cancel htlc.
case channeldb.ErrInvoiceAlreadyCanceled:
return createEvent(nil), nil
debugLog("invoice already canceled")
return &HodlEvent{
Hash: rHash,
CancelReason: CancelInvoiceCanceled,
}, nil
// If invoice is already accepted, add this htlc to the list of
// subscribers.
case channeldb.ErrInvoiceAlreadyAccepted:
debugLog("accepting duplicate payment to accepted invoice")
i.hodlSubscribe(hodlChan, rHash)
return nil, nil
// If there are not enough blocks left, cancel the htlc.
case ErrInvoiceExpiryTooSoon:
debugLog("expiry too soon")
return &HodlEvent{
Hash: rHash,
CancelReason: CancelExpiryTooSoon,
}, nil
// If there are not enough blocks left, cancel the htlc.
case ErrInvoiceAmountTooLow:
debugLog("amount too low")
return &HodlEvent{
Hash: rHash,
CancelReason: CancelAmountTooLow,
}, nil
// If this call settled the invoice, settle the htlc. Otherwise
// subscribe for a future hodl event.
case nil:
i.notifyClients(rHash, invoice, invoice.Terms.State)
switch invoice.Terms.State {
case channeldb.ContractSettled:
log.Debugf("Invoice(%x): settled", rHash[:])
debugLog("settled")
return createEvent(&invoice.Terms.PaymentPreimage), nil
return &HodlEvent{
Hash: rHash,
Preimage: &invoice.Terms.PaymentPreimage,
}, nil
case channeldb.ContractAccepted:
debugLog("accepted")
// Subscribe to updates to this invoice.
i.hodlSubscribe(hodlChan, rHash)
return nil, nil
@ -534,6 +677,8 @@ func (i *InvoiceRegistry) NotifyExitHopHtlc(rHash lntypes.Hash,
}
default:
debugLog(err.Error())
return nil, err
}
}
@ -585,6 +730,7 @@ func (i *InvoiceRegistry) CancelInvoice(payHash lntypes.Hash) error {
log.Debugf("Invoice(%v): canceled", payHash)
i.notifyHodlSubscribers(HodlEvent{
Hash: payHash,
CancelReason: CancelInvoiceCanceled,
})
i.notifyClients(payHash, invoice, channeldb.ContractCanceled)

@ -6,11 +6,9 @@ import (
"testing"
"time"
"github.com/btcsuite/btcd/chaincfg"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/zpay32"
)
var (
@ -23,18 +21,15 @@ var (
hash = preimage.Hash()
// testPayReq is a dummy payment request that does parse properly. It
// has no relation with the real invoice parameters and isn't asserted
// on in this test. LookupInvoice requires this to have a valid value.
testPayReq = "lnbc500u1pwywxzwpp5nd2u9xzq02t0tuf2654as7vma42lwkcjptx4yzfq0umq4swpa7cqdqqcqzysmlpc9ewnydr8rr8dnltyxphdyf6mcqrsd6dml8zajtyhwe6a45d807kxtmzayuf0hh2d9tn478ecxkecdg7c5g85pntupug5kakm7xcpn63zqk"
testInvoiceExpiry = uint32(3)
testCurrentHeight = int32(0)
testFinalCltvRejectDelta = int32(3)
)
func decodeExpiry(payReq string) (uint32, error) {
invoice, err := zpay32.Decode(payReq, &chaincfg.MainNetParams)
if err != nil {
return 0, err
}
return uint32(invoice.MinFinalCLTVExpiry()), nil
return uint32(testInvoiceExpiry), nil
}
var (
@ -43,7 +38,6 @@ var (
PaymentPreimage: preimage,
Value: lnwire.MilliSatoshi(100000),
},
PaymentRequest: []byte(testPayReq),
}
)
@ -54,7 +48,7 @@ func newTestContext(t *testing.T) (*InvoiceRegistry, func()) {
}
// Instantiate and start the invoice registry.
registry := NewRegistry(cdb, decodeExpiry)
registry := NewRegistry(cdb, decodeExpiry, testFinalCltvRejectDelta)
err = registry.Start()
if err != nil {
@ -121,7 +115,9 @@ func TestSettleInvoice(t *testing.T) {
// Settle invoice with a slightly higher amount.
amtPaid := lnwire.MilliSatoshi(100500)
_, err = registry.NotifyExitHopHtlc(hash, amtPaid, hodlChan)
_, err = registry.NotifyExitHopHtlc(
hash, amtPaid, testInvoiceExpiry, 0, hodlChan,
)
if err != nil {
t.Fatal(err)
}
@ -153,13 +149,18 @@ func TestSettleInvoice(t *testing.T) {
}
// Try to settle again.
_, err = registry.NotifyExitHopHtlc(hash, amtPaid, hodlChan)
_, err = registry.NotifyExitHopHtlc(
hash, amtPaid, testInvoiceExpiry, testCurrentHeight, hodlChan,
)
if err != nil {
t.Fatal("expected duplicate settle to succeed")
}
// Try to settle again with a different amount.
_, err = registry.NotifyExitHopHtlc(hash, amtPaid+600, hodlChan)
_, err = registry.NotifyExitHopHtlc(
hash, amtPaid+600, testInvoiceExpiry, testCurrentHeight,
hodlChan,
)
if err != nil {
t.Fatal("expected duplicate settle to succeed")
}
@ -274,7 +275,9 @@ func TestCancelInvoice(t *testing.T) {
// Notify arrival of a new htlc paying to this invoice. This should
// succeed.
hodlChan := make(chan interface{})
event, err := registry.NotifyExitHopHtlc(hash, amt, hodlChan)
event, err := registry.NotifyExitHopHtlc(
hash, amt, testInvoiceExpiry, testCurrentHeight, hodlChan,
)
if err != nil {
t.Fatal("expected settlement of a canceled invoice to succeed")
}
@ -292,7 +295,7 @@ func TestHoldInvoice(t *testing.T) {
defer cleanup()
// Instantiate and start the invoice registry.
registry := NewRegistry(cdb, decodeExpiry)
registry := NewRegistry(cdb, decodeExpiry, testFinalCltvRejectDelta)
err = registry.Start()
if err != nil {
@ -345,7 +348,9 @@ func TestHoldInvoice(t *testing.T) {
// NotifyExitHopHtlc without a preimage present in the invoice registry
// should be possible.
event, err := registry.NotifyExitHopHtlc(hash, amtPaid, hodlChan)
event, err := registry.NotifyExitHopHtlc(
hash, amtPaid, testInvoiceExpiry, testCurrentHeight, hodlChan,
)
if err != nil {
t.Fatalf("expected settle to succeed but got %v", err)
}
@ -354,7 +359,9 @@ func TestHoldInvoice(t *testing.T) {
}
// Test idempotency.
event, err = registry.NotifyExitHopHtlc(hash, amtPaid, hodlChan)
event, err = registry.NotifyExitHopHtlc(
hash, amtPaid, testInvoiceExpiry, testCurrentHeight, hodlChan,
)
if err != nil {
t.Fatalf("expected settle to succeed but got %v", err)
}
@ -434,3 +441,24 @@ func newDB() (*channeldb.DB, func(), error) {
return cdb, cleanUp, nil
}
// TestUnknownInvoice tests that invoice registry returns an error when the
// invoice is unknown. This is to guard against returning a cancel hodl event
// for forwarded htlcs. In the link, NotifyExitHopHtlc is only called if we are
// the exit hop, but in htlcIncomingContestResolver it is called with forwarded
// htlc hashes as well.
func TestUnknownInvoice(t *testing.T) {
registry, cleanup := newTestContext(t)
defer cleanup()
// Notify arrival of a new htlc paying to this invoice. This should
// succeed.
hodlChan := make(chan interface{})
amt := lnwire.MilliSatoshi(100000)
_, err := registry.NotifyExitHopHtlc(
hash, amt, testInvoiceExpiry, testCurrentHeight, hodlChan,
)
if err != channeldb.ErrInvoiceNotFound {
t.Fatal("expected invoice not found error")
}
}

@ -0,0 +1,421 @@
// +build rpctest
package lnd
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntypes"
)
// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if
// we're forced to go to chain with an incoming HTLC, then when we find out the
// preimage via the witness beacon, we properly settle the HTLC on-chain in
// order to ensure we don't lose any funds.
func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
t, net, false,
)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
// With the network active, we'll now add a new hodl invoice at Carol's
// end. Make sure the cltv expiry delta is large enough, otherwise Bob
// won't send out the outgoing htlc.
const invoiceAmt = 100000
preimage := lntypes.Preimage{1, 2, 3}
payHash := preimage.Hash()
invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
Value: invoiceAmt,
CltvExpiry: 40,
Hash: payHash[:],
}
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
// will not immediately settle the payment.
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
alicePayStream, err := net.Alice.SendPayment(ctx)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
err = alicePayStream.Send(&lnrpc.SendRequest{
PaymentRequest: carolInvoice.PaymentRequest,
})
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash[:])
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit
// hop logic.
waitForInvoiceAccepted(t, carol, payHash)
// At this point, Bob decides that he wants to exit the channel
// immediately, so he force closes his commitment transaction.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
bobForceClose := closeChannelAndAssert(ctxt, t, net, net.Bob,
aliceChanPoint, true)
// Alice will sweep her output immediately.
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find alice's sweep tx in miner mempool: %v",
err)
}
// Suspend Bob to force Carol to go to chain.
restartBob, err := net.SuspendNode(net.Bob)
if err != nil {
t.Fatalf("unable to suspend bob: %v", err)
}
// Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For
// this test, it is important to actually settle and not leave the
// invoice in the accepted state, because without a known preimage, the
// channel arbitrator won't go to chain.
ctx, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:],
})
if err != nil {
t.Fatalf("settle invoice: %v", err)
}
// We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive.
numBlocks := uint32(invoiceReq.CltvExpiry -
defaultIncomingBroadcastDelta)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
// Carol's commitment transaction should now be in the mempool.
txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
}
// The tx should be spending from the funding transaction,
commitHash := txids[0]
tx1, err := net.Miner.Node.GetRawTransaction(commitHash)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx1.MsgTx().TxIn[0].PreviousOutPoint != carolFundingPoint {
t.Fatalf("commit transaction not spending fundingtx: %v",
spew.Sdump(tx1))
}
// Mine a block that should confirm the commit tx.
block := mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 {
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
assertTxInBlock(t, block, commitHash)
// Restart bob again.
if err := restartBob(); err != nil {
t.Fatalf("unable to restart bob: %v", err)
}
// After the force close transacion is mined, Carol should broadcast
// her second level HTLC transacion. Bob will broadcast a sweep tx to
// sweep his output in the channel with Carol. He can do this
// immediately, as the output is not timelocked since Carol was the one
// force closing.
commitSpends, err := waitForNTxsInMempool(net.Miner.Node, 2,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
// Both Carol's second level transaction and Bob's sweep should be
// spending from the commitment transaction.
for _, txid := range commitSpends {
tx, err := net.Miner.Node.GetRawTransaction(txid)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash {
t.Fatalf("tx did not spend from commitment tx")
}
}
// Mine a block to confirm the two transactions (+ the coinbase).
block = mineBlocks(t, net, 1, 2)[0]
if len(block.Transactions) != 3 {
t.Fatalf("expected 3 transactions in block, got %v",
len(block.Transactions))
}
for _, txid := range commitSpends {
assertTxInBlock(t, block, txid)
}
// Keep track of the second level tx maturity.
carolSecondLevelCSV := uint32(defaultCSV)
// When Bob notices Carol's second level transaction in the block, he
// will extract the preimage and broadcast a second level tx to claim
// the HTLC in his (already closed) channel with Alice.
bobSecondLvlTx, err := waitForTxInMempool(net.Miner.Node,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
// It should spend from the commitment in the channel with Alice.
tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobForceClose {
t.Fatalf("tx did not spend from bob's force close tx")
}
// At this point, Bob should have broadcast his second layer success
// transaction, and should have sent it to the nursery for incubation.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = lntest.WaitPredicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := net.Bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
for _, forceCloseChan := range pendingChanResp.PendingForceClosingChannels {
if forceCloseChan.Channel.LocalBalance != 0 {
continue
}
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
stage := forceCloseChan.PendingHtlcs[0].Stage
if stage != 1 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the first stage but was "+
"stage: %v", stage)
return false
}
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// We'll now mine a block which should confirm Bob's second layer
// transaction.
block = mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 {
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
assertTxInBlock(t, block, bobSecondLvlTx)
// Keep track of Bob's second level maturity, and decrement our track
// of Carol's.
bobSecondLevelCSV := uint32(defaultCSV)
carolSecondLevelCSV--
// If we then mine 3 additional blocks, Carol's second level tx should
// mature, and she can pull the funds from it with a sweep tx.
if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
bobSecondLevelCSV -= carolSecondLevelCSV
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find Carol's sweeping transaction: %v", err)
}
// Mining one additional block, Bob's second level tx is mature, and he
// can sweep the output.
block = mineBlocks(t, net, bobSecondLevelCSV, 1)[0]
assertTxInBlock(t, block, carolSweep)
bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction")
}
// Make sure it spends from the second level tx.
tx, err = net.Miner.Node.GetRawTransaction(bobSweep)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobSecondLvlTx {
t.Fatalf("tx did not spend from bob's second level tx")
}
// When we mine one additional block, that will confirm Bob's sweep.
// Now Bob should have no pending channels anymore, as this just
// resolved it by the confirmation of the sweep transaction.
block = mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, bobSweep)
err = lntest.WaitPredicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := net.Bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
req := &lnrpc.ListChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanInfo, err := net.Bob.ListChannels(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query for open "+
"channels: %v", err)
return false
}
if len(chanInfo.Channels) != 0 {
predErr = fmt.Errorf("Bob should have no open "+
"channels, instead he has %v",
len(chanInfo.Channels))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// Also Carol should have no channels left (open nor pending).
err = lntest.WaitPredicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob carol has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
req := &lnrpc.ListChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanInfo, err := carol.ListChannels(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query for open "+
"channels: %v", err)
return false
}
if len(chanInfo.Channels) != 0 {
predErr = fmt.Errorf("carol should have no open "+
"channels, instead she has %v",
len(chanInfo.Channels))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
}
// waitForInvoiceAccepted waits until the specified invoice moved to the
// accepted state by the node.
func waitForInvoiceAccepted(t *harnessTest, node *lntest.HarnessNode,
payHash lntypes.Hash) {
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout)
defer cancel()
invoiceUpdates, err := node.SubscribeSingleInvoice(ctx,
&invoicesrpc.SubscribeSingleInvoiceRequest{
RHash: payHash[:],
},
)
if err != nil {
t.Fatalf("subscribe single invoice: %v", err)
}
for {
update, err := invoiceUpdates.Recv()
if err != nil {
t.Fatalf("invoice update err: %v", err)
}
if update.State == lnrpc.Invoice_ACCEPTED {
break
}
}
}

@ -0,0 +1,303 @@
// +build rpctest
package lnd
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntypes"
)
// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the
// receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC
// off-chain, then it goes on chain to claim the HTLC. In this scenario, the
// node that sent the outgoing HTLC should extract the preimage from the sweep
// transaction, and finish settling the HTLC backwards into the route.
func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
t, net, false,
)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
// With the network active, we'll now add a new hodl invoice at Carol's
// end. Make sure the cltv expiry delta is large enough, otherwise Bob
// won't send out the outgoing htlc.
const invoiceAmt = 100000
preimage := lntypes.Preimage{1, 2, 4}
payHash := preimage.Hash()
invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
Value: invoiceAmt,
CltvExpiry: 40,
Hash: payHash[:],
}
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
// will not immediately settle the payment.
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
alicePayStream, err := net.Alice.SendPayment(ctx)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
err = alicePayStream.Send(&lnrpc.SendRequest{
PaymentRequest: carolInvoice.PaymentRequest,
})
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash[:])
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit
// hop logic.
waitForInvoiceAccepted(t, carol, payHash)
restartBob, err := net.SuspendNode(net.Bob)
if err != nil {
t.Fatalf("unable to suspend bob: %v", err)
}
// Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For
// this test, it is important to actually settle and not leave the
// invoice in the accepted state, because without a known preimage, the
// channel arbitrator won't go to chain.
ctx, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:],
})
if err != nil {
t.Fatalf("settle invoice: %v", err)
}
// Now we'll mine enough blocks to prompt carol to actually go to the
// chain in order to sweep her HTLC since the value is high enough.
// TODO(roasbeef): modify once go to chain policy changes
numBlocks := uint32(
invoiceReq.CltvExpiry - defaultIncomingBroadcastDelta,
)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
// At this point, Carol should broadcast her active commitment
// transaction in order to go to the chain and sweep her HTLC.
txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout)
if err != nil {
t.Fatalf("expected transaction not found in mempool: %v", err)
}
bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
}
// The commitment transaction should be spending from the funding
// transaction.
commitHash := txids[0]
tx, err := net.Miner.Node.GetRawTransaction(commitHash)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
commitTx := tx.MsgTx()
if commitTx.TxIn[0].PreviousOutPoint != carolFundingPoint {
t.Fatalf("commit transaction not spending from expected "+
"outpoint: %v", spew.Sdump(commitTx))
}
// Confirm the commitment.
mineBlocks(t, net, 1, 1)
// Restart bob again.
if err := restartBob(); err != nil {
t.Fatalf("unable to restart bob: %v", err)
}
// After the force close transaction is mined, Carol should broadcast
// her second level HTLC transaction. Bob will broadcast a sweep tx to
// sweep his output in the channel with Carol. When Bob notices Carol's
// second level transaction in the mempool, he will extract the
// preimage and settle the HTLC back off-chain.
secondLevelHashes, err := waitForNTxsInMempool(net.Miner.Node, 2,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
// Carol's second level transaction should be spending from
// the commitment transaction.
var secondLevelHash *chainhash.Hash
for _, txid := range secondLevelHashes {
tx, err := net.Miner.Node.GetRawTransaction(txid)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash == *commitHash {
secondLevelHash = txid
}
}
if secondLevelHash == nil {
t.Fatalf("Carol's second level tx not found")
}
// We'll now mine an additional block which should confirm both the
// second layer transactions.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
time.Sleep(time.Second * 4)
// TODO(roasbeef): assert bob pending state as well
// Carol's pending channel report should now show two outputs under
// limbo: her commitment output, as well as the second-layer claim
// output.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
t.Fatalf("carol should have pending for close chan but doesn't")
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
t.Fatalf("carol should have nonzero limbo balance instead "+
"has: %v", forceCloseChan.LimboBalance)
}
// The pending HTLC carol has should also now be in stage 2.
if len(forceCloseChan.PendingHtlcs) != 1 {
t.Fatalf("carol should have pending htlc but doesn't")
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
t.Fatalf("carol's htlc should have advanced to the second "+
"stage: %v", err)
}
// Once the second-level transaction confirmed, Bob should have
// extracted the preimage from the chain, and sent it back to Alice,
// clearing the HTLC off-chain.
nodes = []*lntest.HarnessNode{net.Alice}
err = lntest.WaitPredicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// If we mine 4 additional blocks, then both outputs should now be
// mature.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// We should have a new transaction in the mempool.
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction: %v", err)
}
// Finally, if we mine an additional block to confirm these two sweep
// transactions, Carol should not show a pending channel in her report
// afterwards.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to mine block: %v", err)
}
err = lntest.WaitPredicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err = carol.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
predErr = fmt.Errorf("unable to query for pending channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("carol still has pending channels: %v",
spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// The invoice should show as settled for Carol, indicating that it was
// swept on-chain.
invoicesReq := &lnrpc.ListInvoiceRequest{}
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
if err != nil {
t.Fatalf("unable to retrieve invoices: %v", err)
}
if len(invoicesResp.Invoices) != 1 {
t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices))
}
invoice := invoicesResp.Invoices[0]
if invoice.State != lnrpc.Invoice_SETTLED {
t.Fatalf("expected invoice to be settled on chain")
}
if invoice.AmtPaidSat != invoiceAmt {
t.Fatalf("expected invoice to be settled with %d sat, got "+
"%d sat", invoiceAmt, invoice.AmtPaidSat)
}
// We'll close out the channel between Alice and Bob, then shutdown
// carol to conclude the test.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false)
}

@ -0,0 +1,333 @@
// +build rpctest
package lnd
import (
"context"
"fmt"
"time"
"github.com/btcsuite/btcd/wire"
"github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntypes"
)
// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario,
// if the remote party goes to chain while we have an incoming HTLC, then when
// we found out the preimage via the witness beacon, we properly settle the
// HTLC on-chain in order to ensure that we don't lose any funds.
func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(
t, net, false,
)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
// With the network active, we'll now add a new hodl invoice at Carol's
// end. Make sure the cltv expiry delta is large enough, otherwise Bob
// won't send out the outgoing htlc.
const invoiceAmt = 100000
preimage := lntypes.Preimage{1, 2, 5}
payHash := preimage.Hash()
invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
Value: invoiceAmt,
CltvExpiry: 40,
Hash: payHash[:],
}
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to add invoice: %v", err)
}
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
// will not immediately settle the payment.
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
alicePayStream, err := net.Alice.SendPayment(ctx)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
err = alicePayStream.Send(&lnrpc.SendRequest{
PaymentRequest: carolInvoice.PaymentRequest,
})
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, payHash[:])
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// Wait for carol to mark invoice as accepted. There is a small gap to
// bridge between adding the htlc to the channel and executing the exit
// hop logic.
waitForInvoiceAccepted(t, carol, payHash)
// Next, Alice decides that she wants to exit the channel, so she'll
// immediately force close the channel by broadcast her commitment
// transaction.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
aliceForceClose := closeChannelAndAssert(ctxt, t, net, net.Alice,
aliceChanPoint, true)
// Wait for the channel to be marked pending force close.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForChannelPendingForceClose(ctxt, net.Alice, aliceChanPoint)
if err != nil {
t.Fatalf("channel not pending force close: %v", err)
}
// Mine enough blocks for Alice to sweep her funds from the force
// closed channel.
_, err = net.Miner.Node.Generate(defaultCSV)
if err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// Alice should now sweep her funds.
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find sweeping tx in mempool: %v", err)
}
// Suspend bob, so Carol is forced to go on chain.
restartBob, err := net.SuspendNode(net.Bob)
if err != nil {
t.Fatalf("unable to suspend bob: %v", err)
}
// Settle invoice. This will just mark the invoice as settled, as there
// is no link anymore to remove the htlc from the commitment tx. For
// this test, it is important to actually settle and not leave the
// invoice in the accepted state, because without a known preimage, the
// channel arbitrator won't go to chain.
ctx, cancel = context.WithTimeout(ctxb, defaultTimeout)
defer cancel()
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
Preimage: preimage[:],
})
if err != nil {
t.Fatalf("settle invoice: %v", err)
}
// We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive.
numBlocks := uint32(invoiceReq.CltvExpiry-
defaultIncomingBroadcastDelta) - defaultCSV
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
// Carol's commitment transaction should now be in the mempool.
txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
}
// The transaction should be spending from the funding transaction
commitHash := txids[0]
tx1, err := net.Miner.Node.GetRawTransaction(commitHash)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx1.MsgTx().TxIn[0].PreviousOutPoint != carolFundingPoint {
t.Fatalf("commit transaction not spending fundingtx: %v",
spew.Sdump(tx1))
}
// Mine a block, which should contain the commitment.
block := mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 {
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
assertTxInBlock(t, block, commitHash)
// Restart bob again.
if err := restartBob(); err != nil {
t.Fatalf("unable to restart bob: %v", err)
}
// After the force close transacion is mined, Carol should broadcast
// her second level HTLC transacion. Bob will broadcast a sweep tx to
// sweep his output in the channel with Carol. He can do this
// immediately, as the output is not timelocked since Carol was the one
// force closing.
commitSpends, err := waitForNTxsInMempool(net.Miner.Node, 2,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
// Both Carol's second level transaction and Bob's sweep should be
// spending from the commitment transaction.
for _, txid := range commitSpends {
tx, err := net.Miner.Node.GetRawTransaction(txid)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash {
t.Fatalf("tx did not spend from commitment tx")
}
}
// Mine a block to confirm the two transactions (+ coinbase).
block = mineBlocks(t, net, 1, 2)[0]
if len(block.Transactions) != 3 {
t.Fatalf("expected 3 transactions in block, got %v",
len(block.Transactions))
}
for _, txid := range commitSpends {
assertTxInBlock(t, block, txid)
}
// Keep track of the second level tx maturity.
carolSecondLevelCSV := uint32(defaultCSV)
// When Bob notices Carol's second level transaction in the block, he
// will extract the preimage and broadcast a sweep tx to directly claim
// the HTLC in his (already closed) channel with Alice.
bobHtlcSweep, err := waitForTxInMempool(net.Miner.Node,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
// It should spend from the commitment in the channel with Alice.
tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *aliceForceClose {
t.Fatalf("tx did not spend from alice's force close tx")
}
// We'll now mine a block which should confirm Bob's HTLC sweep
// transaction.
block = mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 {
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
assertTxInBlock(t, block, bobHtlcSweep)
carolSecondLevelCSV--
// Now that the sweeping transaction has been confirmed, Bob should now
// recognize that all contracts have been fully resolved, and show no
// pending close channels.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = lntest.WaitPredicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := net.Bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// If we then mine 3 additional blocks, Carol's second level tx will
// mature, and she should pull the funds.
if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find Carol's sweeping transaction: %v", err)
}
// When Carol's sweep gets confirmed, she should have no more pending
// channels.
block = mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, carolSweep)
pendingChansRequest = &lnrpc.PendingChannelsRequest{}
err = lntest.WaitPredicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("carol still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// The invoice should show as settled for Carol, indicating that it was
// swept on-chain.
invoicesReq := &lnrpc.ListInvoiceRequest{}
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
if err != nil {
t.Fatalf("unable to retrieve invoices: %v", err)
}
if len(invoicesResp.Invoices) != 1 {
t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices))
}
invoice := invoicesResp.Invoices[0]
if invoice.State != lnrpc.Invoice_SETTLED {
t.Fatalf("expected invoice to be settled on chain")
}
if invoice.AmtPaidSat != invoiceAmt {
t.Fatalf("expected invoice to be settled with %d sat, got "+
"%d sat", invoiceAmt, invoice.AmtPaidSat)
}
}

@ -9315,8 +9315,10 @@ func assertSpendingTxInMempool(t *harnessTest, miner *rpcclient.Client,
}
}
func createThreeHopHodlNetwork(t *harnessTest,
net *lntest.NetworkHarness) (*lnrpc.ChannelPoint, *lnrpc.ChannelPoint, *lntest.HarnessNode) {
func createThreeHopNetwork(t *harnessTest, net *lntest.NetworkHarness,
carolHodl bool) (*lnrpc.ChannelPoint, *lnrpc.ChannelPoint,
*lntest.HarnessNode) {
ctxb := context.Background()
// We'll start the test by creating a channel between Alice and Bob,
@ -9342,10 +9344,14 @@ func createThreeHopHodlNetwork(t *harnessTest,
t.Fatalf("bob didn't report channel: %v", err)
}
// Next, we'll create a new node "carol" and have Bob connect to her.
// In this test, we'll make carol always hold onto the HTLC, this way
// it'll force Bob to go to chain to resolve the HTLC.
carol, err := net.NewNode("Carol", []string{"--debughtlc", "--hodl.exit-settle"})
// Next, we'll create a new node "carol" and have Bob connect to her. If
// the carolHodl flag is set, we'll make carol always hold onto the
// HTLC, this way it'll force Bob to go to chain to resolve the HTLC.
carolFlags := []string{"--debughtlc"}
if carolHodl {
carolFlags = append(carolFlags, "--hodl.exit-settle")
}
carol, err := net.NewNode("Carol", carolFlags)
if err != nil {
t.Fatalf("unable to create new node: %v", err)
}
@ -9393,7 +9399,8 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) {
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net)
aliceChanPoint, bobChanPoint, carol :=
createThreeHopNetwork(t, net, true)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
@ -9614,256 +9621,6 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest) {
closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false)
}
// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the
// receiver of an HTLC knows the preimage, but wasn't able to settle the HTLC
// off-chain, then it goes on chain to claim the HTLC. In this scenario, the
// node that sent the outgoing HTLC should extract the preimage from the sweep
// transaction, and finish settling the HTLC backwards into the route.
func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
// With the network active, we'll now add a new invoice at Carol's end.
// Make sure the cltv expiry delta is large enough, otherwise Bob won't
// send out the outgoing htlc.
const invoiceAmt = 100000
invoiceReq := &lnrpc.Invoice{
Value: invoiceAmt,
CltvExpiry: 40,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to generate carol invoice: %v", err)
}
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
// will not immediately settle the payment.
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
alicePayStream, err := net.Alice.SendPayment(ctx)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
err = alicePayStream.Send(&lnrpc.SendRequest{
PaymentRequest: carolInvoice.PaymentRequest,
})
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
// At this point, all 3 nodes should now have an active channel with
// the created HTLC pending on all of them.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, carolInvoice.RHash)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// Now we'll mine enough blocks to prompt carol to actually go to the
// chain in order to sweep her HTLC since the value is high enough.
// TODO(roasbeef): modify once go to chain policy changes
numBlocks := uint32(
invoiceReq.CltvExpiry - defaultIncomingBroadcastDelta,
)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
// At this point, Carol should broadcast her active commitment
// transaction in order to go to the chain and sweep her HTLC.
txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout)
if err != nil {
t.Fatalf("expected transaction not found in mempool: %v", err)
}
bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
}
// The commitment transaction should be spending from the funding
// transaction.
commitHash := txids[0]
tx, err := net.Miner.Node.GetRawTransaction(commitHash)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
commitTx := tx.MsgTx()
if commitTx.TxIn[0].PreviousOutPoint != carolFundingPoint {
t.Fatalf("commit transaction not spending from expected "+
"outpoint: %v", spew.Sdump(commitTx))
}
// Confirm the commitment.
mineBlocks(t, net, 1, 1)
// After the force close transaction is mined, Carol should broadcast
// her second level HTLC transaction. Bob will broadcast a sweep tx to
// sweep his output in the channel with Carol. When Bob notices Carol's
// second level transaction in the mempool, he will extract the
// preimage and settle the HTLC back off-chain.
secondLevelHashes, err := waitForNTxsInMempool(net.Miner.Node, 2,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
// Carol's second level transaction should be spending from
// the commitment transaction.
var secondLevelHash *chainhash.Hash
for _, txid := range secondLevelHashes {
tx, err := net.Miner.Node.GetRawTransaction(txid)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash == *commitHash {
secondLevelHash = txid
}
}
if secondLevelHash == nil {
t.Fatalf("Carol's second level tx not found")
}
// We'll now mine an additional block which should confirm both the
// second layer transactions.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
time.Sleep(time.Second * 4)
// TODO(roasbeef): assert bob pending state as well
// Carol's pending channel report should now show two outputs under
// limbo: her commitment output, as well as the second-layer claim
// output.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
t.Fatalf("unable to query for pending channels: %v", err)
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
t.Fatalf("carol should have pending for close chan but doesn't")
}
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
if forceCloseChan.LimboBalance == 0 {
t.Fatalf("carol should have nonzero limbo balance instead "+
"has: %v", forceCloseChan.LimboBalance)
}
// The pending HTLC carol has should also now be in stage 2.
if len(forceCloseChan.PendingHtlcs) != 1 {
t.Fatalf("carol should have pending htlc but doesn't")
}
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
t.Fatalf("carol's htlc should have advanced to the second "+
"stage: %v", err)
}
// Once the second-level transaction confirmed, Bob should have
// extracted the preimage from the chain, and sent it back to Alice,
// clearing the HTLC off-chain.
nodes = []*lntest.HarnessNode{net.Alice}
err = lntest.WaitPredicate(func() bool {
predErr = assertNumActiveHtlcs(nodes, 0)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", predErr)
}
// If we mine 4 additional blocks, then both outputs should now be
// mature.
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// We should have a new transaction in the mempool.
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction: %v", err)
}
// Finally, if we mine an additional block to confirm these two sweep
// transactions, Carol should not show a pending channel in her report
// afterwards.
if _, err := net.Miner.Node.Generate(1); err != nil {
t.Fatalf("unable to mine block: %v", err)
}
err = lntest.WaitPredicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err = carol.PendingChannels(ctxt, pendingChansRequest)
if err != nil {
predErr = fmt.Errorf("unable to query for pending channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("carol still has pending channels: %v",
spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// The invoice should show as settled for Carol, indicating that it was
// swept on-chain.
invoicesReq := &lnrpc.ListInvoiceRequest{}
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
if err != nil {
t.Fatalf("unable to retrieve invoices: %v", err)
}
if len(invoicesResp.Invoices) != 1 {
t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices))
}
invoice := invoicesResp.Invoices[0]
if invoice.State != lnrpc.Invoice_SETTLED {
t.Fatalf("expected invoice to be settled on chain")
}
if invoice.AmtPaidSat != invoiceAmt {
t.Fatalf("expected invoice to be settled with %d sat, got "+
"%d sat", invoiceAmt, invoice.AmtPaidSat)
}
// We'll close out the channel between Alice and Bob, then shutdown
// carol to conclude the test.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false)
}
// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC
// scenario, if the node that extended the HTLC to the final node closes their
// commitment on-chain early, then it eventually recognizes this HTLC as one
@ -9876,7 +9633,8 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net)
aliceChanPoint, bobChanPoint, carol :=
createThreeHopNetwork(t, net, true)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
@ -10137,7 +9895,8 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net)
aliceChanPoint, bobChanPoint, carol :=
createThreeHopNetwork(t, net, true)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
@ -10344,623 +10103,6 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
closeChannelAndAssert(ctxt, t, net, net.Alice, aliceChanPoint, false)
}
// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if
// we're forced to go to chain with an incoming HTLC, then when we find out the
// preimage via the witness beacon, we properly settle the HTLC on-chain in
// order to ensure we don't lose any funds.
func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
// With the network active, we'll now add a new invoice at Carol's end.
invoiceReq := &lnrpc.Invoice{
Value: 100000,
CltvExpiry: 40,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to generate carol invoice: %v", err)
}
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
// will not immediately settle the payment.
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
alicePayStream, err := net.Alice.SendPayment(ctx)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
err = alicePayStream.Send(&lnrpc.SendRequest{
PaymentRequest: carolInvoice.PaymentRequest,
})
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
// We'll now wait until all 3 nodes have the HTLC as just sent fully
// locked in.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, carolInvoice.RHash)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
// At this point, Bob decides that he wants to exit the channel
// immediately, so he force closes his commitment transaction.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
bobForceClose := closeChannelAndAssert(ctxt, t, net, net.Bob,
aliceChanPoint, true)
// Alice will sweep her output immediately.
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find alice's sweep tx in miner mempool: %v",
err)
}
// We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive.
numBlocks := uint32(invoiceReq.CltvExpiry -
defaultIncomingBroadcastDelta)
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
// Carol's commitment transaction should now be in the mempool.
txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
}
// The tx should be spending from the funding transaction,
commitHash := txids[0]
tx1, err := net.Miner.Node.GetRawTransaction(commitHash)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx1.MsgTx().TxIn[0].PreviousOutPoint != carolFundingPoint {
t.Fatalf("commit transaction not spending fundingtx: %v",
spew.Sdump(tx1))
}
// Mine a block that should confirm the commit tx.
block := mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 {
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
assertTxInBlock(t, block, commitHash)
// After the force close transacion is mined, Carol should broadcast
// her second level HTLC transacion. Bob will broadcast a sweep tx to
// sweep his output in the channel with Carol. He can do this
// immediately, as the output is not timelocked since Carol was the one
// force closing.
commitSpends, err := waitForNTxsInMempool(net.Miner.Node, 2,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
// Both Carol's second level transaction and Bob's sweep should be
// spending from the commitment transaction.
for _, txid := range commitSpends {
tx, err := net.Miner.Node.GetRawTransaction(txid)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash {
t.Fatalf("tx did not spend from commitment tx")
}
}
// Mine a block to confirm the two transactions (+ the coinbase).
block = mineBlocks(t, net, 1, 2)[0]
if len(block.Transactions) != 3 {
t.Fatalf("expected 3 transactions in block, got %v",
len(block.Transactions))
}
for _, txid := range commitSpends {
assertTxInBlock(t, block, txid)
}
// Keep track of the second level tx maturity.
carolSecondLevelCSV := uint32(defaultCSV)
// When Bob notices Carol's second level transaction in the block, he
// will extract the preimage and broadcast a second level tx to claim
// the HTLC in his (already closed) channel with Alice.
bobSecondLvlTx, err := waitForTxInMempool(net.Miner.Node,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
// It should spend from the commitment in the channel with Alice.
tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobForceClose {
t.Fatalf("tx did not spend from bob's force close tx")
}
// At this point, Bob should have broadcast his second layer success
// transaction, and should have sent it to the nursery for incubation.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = lntest.WaitPredicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := net.Bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
predErr = fmt.Errorf("bob should have pending for " +
"close chan but doesn't")
return false
}
for _, forceCloseChan := range pendingChanResp.PendingForceClosingChannels {
if forceCloseChan.Channel.LocalBalance != 0 {
continue
}
if len(forceCloseChan.PendingHtlcs) != 1 {
predErr = fmt.Errorf("bob should have pending htlc " +
"but doesn't")
return false
}
stage := forceCloseChan.PendingHtlcs[0].Stage
if stage != 1 {
predErr = fmt.Errorf("bob's htlc should have "+
"advanced to the first stage but was "+
"stage: %v", stage)
return false
}
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
}
// We'll now mine a block which should confirm Bob's second layer
// transaction.
block = mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 {
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
assertTxInBlock(t, block, bobSecondLvlTx)
// Keep track of Bob's second level maturity, and decrement our track
// of Carol's.
bobSecondLevelCSV := uint32(defaultCSV)
carolSecondLevelCSV--
// If we then mine 3 additional blocks, Carol's second level tx should
// mature, and she can pull the funds from it with a sweep tx.
if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
bobSecondLevelCSV -= carolSecondLevelCSV
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find Carol's sweeping transaction: %v", err)
}
// Mining one additional block, Bob's second level tx is mature, and he
// can sweep the output.
block = mineBlocks(t, net, bobSecondLevelCSV, 1)[0]
assertTxInBlock(t, block, carolSweep)
bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find bob's sweeping transaction")
}
// Make sure it spends from the second level tx.
tx, err = net.Miner.Node.GetRawTransaction(bobSweep)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobSecondLvlTx {
t.Fatalf("tx did not spend from bob's second level tx")
}
// When we mine one additional block, that will confirm Bob's sweep.
// Now Bob should have no pending channels anymore, as this just
// resolved it by the confirmation of the sweep transaction.
block = mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, bobSweep)
err = lntest.WaitPredicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := net.Bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
req := &lnrpc.ListChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanInfo, err := net.Bob.ListChannels(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query for open "+
"channels: %v", err)
return false
}
if len(chanInfo.Channels) != 0 {
predErr = fmt.Errorf("Bob should have no open "+
"channels, instead he has %v",
len(chanInfo.Channels))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// Also Carol should have no channels left (open nor pending).
err = lntest.WaitPredicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob carol has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
req := &lnrpc.ListChannelsRequest{}
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
chanInfo, err := carol.ListChannels(ctxt, req)
if err != nil {
predErr = fmt.Errorf("unable to query for open "+
"channels: %v", err)
return false
}
if len(chanInfo.Channels) != 0 {
predErr = fmt.Errorf("carol should have no open "+
"channels, instead she has %v",
len(chanInfo.Channels))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
}
// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario,
// if the remote party goes to chain while we have an incoming HTLC, then when
// we found out the preimage via the witness beacon, we properly settle the
// HTLC on-chain in order to ensure that we don't lose any funds.
func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest) {
ctxb := context.Background()
// First, we'll create a three hop network: Alice -> Bob -> Carol, with
// Carol refusing to actually settle or directly cancel any HTLC's
// self.
aliceChanPoint, bobChanPoint, carol := createThreeHopHodlNetwork(t, net)
// Clean up carol's node when the test finishes.
defer shutdownAndAssert(net, t, carol)
// With the network active, we'll now add a new invoice at Carol's end.
const invoiceAmt = 100000
invoiceReq := &lnrpc.Invoice{
Value: invoiceAmt,
CltvExpiry: 40,
}
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
carolInvoice, err := carol.AddInvoice(ctxt, invoiceReq)
if err != nil {
t.Fatalf("unable to generate carol invoice: %v", err)
}
// Now that we've created the invoice, we'll send a single payment from
// Alice to Carol. We won't wait for the response however, as Carol
// will not immediately settle the payment.
ctx, cancel := context.WithCancel(ctxb)
defer cancel()
alicePayStream, err := net.Alice.SendPayment(ctx)
if err != nil {
t.Fatalf("unable to create payment stream for alice: %v", err)
}
err = alicePayStream.Send(&lnrpc.SendRequest{
PaymentRequest: carolInvoice.PaymentRequest,
})
if err != nil {
t.Fatalf("unable to send payment: %v", err)
}
// We'll now wait until all 3 nodes have the HTLC as just sent fully
// locked in.
var predErr error
nodes := []*lntest.HarnessNode{net.Alice, net.Bob, carol}
err = lntest.WaitPredicate(func() bool {
predErr = assertActiveHtlcs(nodes, carolInvoice.RHash)
if predErr != nil {
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf("htlc mismatch: %v", err)
}
// Next, Alice decides that she wants to exit the channel, so she'll
// immediately force close the channel by broadcast her commitment
// transaction.
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
aliceForceClose := closeChannelAndAssert(ctxt, t, net, net.Alice,
aliceChanPoint, true)
// Wait for the channel to be marked pending force close.
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
err = waitForChannelPendingForceClose(ctxt, net.Alice, aliceChanPoint)
if err != nil {
t.Fatalf("channel not pending force close: %v", err)
}
// Mine enough blocks for Alice to sweep her funds from the force
// closed channel.
_, err = net.Miner.Node.Generate(defaultCSV)
if err != nil {
t.Fatalf("unable to generate blocks: %v", err)
}
// Alice should now sweep her funds.
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find sweeping tx in mempool: %v", err)
}
// We'll now mine enough blocks so Carol decides that she needs to go
// on-chain to claim the HTLC as Bob has been inactive.
numBlocks := uint32(invoiceReq.CltvExpiry-
defaultIncomingBroadcastDelta) - defaultCSV
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
t.Fatalf("unable to generate blocks")
}
// Carol's commitment transaction should now be in the mempool.
txids, err := waitForNTxsInMempool(net.Miner.Node, 1, minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
bobFundingTxid, err := getChanPointFundingTxid(bobChanPoint)
if err != nil {
t.Fatalf("unable to get txid: %v", err)
}
carolFundingPoint := wire.OutPoint{
Hash: *bobFundingTxid,
Index: bobChanPoint.OutputIndex,
}
// The transaction should be spending from the funding transaction
commitHash := txids[0]
tx1, err := net.Miner.Node.GetRawTransaction(commitHash)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx1.MsgTx().TxIn[0].PreviousOutPoint != carolFundingPoint {
t.Fatalf("commit transaction not spending fundingtx: %v",
spew.Sdump(tx1))
}
// Mine a block, which should contain the commitment.
block := mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 {
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
assertTxInBlock(t, block, commitHash)
// After the force close transacion is mined, Carol should broadcast
// her second level HTLC transacion. Bob will broadcast a sweep tx to
// sweep his output in the channel with Carol. He can do this
// immediately, as the output is not timelocked since Carol was the one
// force closing.
commitSpends, err := waitForNTxsInMempool(net.Miner.Node, 2,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
// Both Carol's second level transaction and Bob's sweep should be
// spending from the commitment transaction.
for _, txid := range commitSpends {
tx, err := net.Miner.Node.GetRawTransaction(txid)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *commitHash {
t.Fatalf("tx did not spend from commitment tx")
}
}
// Mine a block to confirm the two transactions (+ coinbase).
block = mineBlocks(t, net, 1, 2)[0]
if len(block.Transactions) != 3 {
t.Fatalf("expected 3 transactions in block, got %v",
len(block.Transactions))
}
for _, txid := range commitSpends {
assertTxInBlock(t, block, txid)
}
// Keep track of the second level tx maturity.
carolSecondLevelCSV := uint32(defaultCSV)
// When Bob notices Carol's second level transaction in the block, he
// will extract the preimage and broadcast a sweep tx to directly claim
// the HTLC in his (already closed) channel with Alice.
bobHtlcSweep, err := waitForTxInMempool(net.Miner.Node,
minerMempoolTimeout)
if err != nil {
t.Fatalf("transactions not found in mempool: %v", err)
}
// It should spend from the commitment in the channel with Alice.
tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep)
if err != nil {
t.Fatalf("unable to get txn: %v", err)
}
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *aliceForceClose {
t.Fatalf("tx did not spend from alice's force close tx")
}
// We'll now mine a block which should confirm Bob's HTLC sweep
// transaction.
block = mineBlocks(t, net, 1, 1)[0]
if len(block.Transactions) != 2 {
t.Fatalf("expected 2 transactions in block, got %v",
len(block.Transactions))
}
assertTxInBlock(t, block, bobHtlcSweep)
carolSecondLevelCSV--
// Now that the sweeping transaction has been confirmed, Bob should now
// recognize that all contracts have been fully resolved, and show no
// pending close channels.
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
err = lntest.WaitPredicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := net.Bob.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("bob still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// If we then mine 3 additional blocks, Carol's second level tx will
// mature, and she should pull the funds.
if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil {
t.Fatalf("unable to generate block: %v", err)
}
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
if err != nil {
t.Fatalf("unable to find Carol's sweeping transaction: %v", err)
}
// When Carol's sweep gets confirmed, she should have no more pending
// channels.
block = mineBlocks(t, net, 1, 1)[0]
assertTxInBlock(t, block, carolSweep)
pendingChansRequest = &lnrpc.PendingChannelsRequest{}
err = lntest.WaitPredicate(func() bool {
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
pendingChanResp, err := carol.PendingChannels(
ctxt, pendingChansRequest,
)
if err != nil {
predErr = fmt.Errorf("unable to query for pending "+
"channels: %v", err)
return false
}
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
predErr = fmt.Errorf("carol still has pending channels "+
"but shouldn't: %v", spew.Sdump(pendingChanResp))
return false
}
return true
}, time.Second*15)
if err != nil {
t.Fatalf(predErr.Error())
}
// The invoice should show as settled for Carol, indicating that it was
// swept on-chain.
invoicesReq := &lnrpc.ListInvoiceRequest{}
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
if err != nil {
t.Fatalf("unable to retrieve invoices: %v", err)
}
if len(invoicesResp.Invoices) != 1 {
t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices))
}
invoice := invoicesResp.Invoices[0]
if invoice.State != lnrpc.Invoice_SETTLED {
t.Fatalf("expected invoice to be settled on chain")
}
if invoice.AmtPaidSat != invoiceAmt {
t.Fatalf("expected invoice to be settled with %d sat, got "+
"%d sat", invoiceAmt, invoice.AmtPaidSat)
}
}
// testSwitchCircuitPersistence creates a multihop network to ensure the sender
// and intermediaries are persisting their open payment circuits. After
// forwarding a packet via an outgoing link, all are restarted, and expected to

@ -27,6 +27,7 @@ import (
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/chanbackup"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/macaroons"
)
@ -240,11 +241,14 @@ type HarnessNode struct {
lnrpc.LightningClient
lnrpc.WalletUnlockerClient
invoicesrpc.InvoicesClient
}
// Assert *HarnessNode implements the lnrpc.LightningClient interface.
var _ lnrpc.LightningClient = (*HarnessNode)(nil)
var _ lnrpc.WalletUnlockerClient = (*HarnessNode)(nil)
var _ invoicesrpc.InvoicesClient = (*HarnessNode)(nil)
// newNode creates a new test lightning node instance from the passed config.
func newNode(cfg nodeConfig) (*HarnessNode, error) {
@ -487,6 +491,7 @@ func (hn *HarnessNode) initLightningClient(conn *grpc.ClientConn) error {
// Construct the LightningClient that will allow us to use the
// HarnessNode directly for normal rpc operations.
hn.LightningClient = lnrpc.NewLightningClient(conn)
hn.InvoicesClient = invoicesrpc.NewInvoicesClient(conn)
// Set the harness node's pubkey to what the node claims in GetInfo.
err := hn.FetchNodeInfo()

@ -1309,13 +1309,6 @@ type LightningChannel struct {
// signatures, of which there may be hundreds.
sigPool *SigPool
// pCache is the global preimage cache shared across all other
// LightningChannel instance. We'll use this cache either when we force
// close, or we detect that the remote party has force closed. If the
// preimage for an incoming HTLC is found in the cache, then we'll try
// to claim it on chain.
pCache PreimageCache
// Capacity is the total capacity of this channel.
Capacity btcutil.Amount
@ -1368,7 +1361,7 @@ type LightningChannel struct {
// settled channel state. Throughout state transitions, then channel will
// automatically persist pertinent state to the database in an efficient
// manner.
func NewLightningChannel(signer input.Signer, pCache PreimageCache,
func NewLightningChannel(signer input.Signer,
state *channeldb.OpenChannel,
sigPool *SigPool) (*LightningChannel, error) {
@ -1387,7 +1380,6 @@ func NewLightningChannel(signer input.Signer, pCache PreimageCache,
lc := &LightningChannel{
Signer: signer,
sigPool: sigPool,
pCache: pCache,
currentHeight: localCommit.CommitHeight,
remoteCommitChain: newCommitmentChain(),
localCommitChain: newCommitmentChain(),
@ -5096,7 +5088,7 @@ type UnilateralCloseSummary struct {
// which case we will attempt to sweep the non-HTLC output using the passed
// commitPoint.
func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Signer,
pCache PreimageCache, commitSpend *chainntnfs.SpendDetail,
commitSpend *chainntnfs.SpendDetail,
remoteCommit channeldb.ChannelCommitment,
commitPoint *btcec.PublicKey) (*UnilateralCloseSummary, error) {
@ -5113,7 +5105,6 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Si
SatPerKWeight(remoteCommit.FeePerKw), false, signer,
remoteCommit.Htlcs, keyRing, &chanState.LocalChanCfg,
&chanState.RemoteChanCfg, *commitSpend.SpenderTxHash,
pCache,
)
if err != nil {
return nil, fmt.Errorf("unable to create htlc "+
@ -5211,11 +5202,11 @@ func NewUnilateralCloseSummary(chanState *channeldb.OpenChannel, signer input.Si
// using this struct if we need to go on-chain for any reason, or if we detect
// that the remote party broadcasts their commitment transaction.
type IncomingHtlcResolution struct {
// Preimage is the preimage that will be used to satisfy the contract
// of the HTLC.
// Preimage is the preimage that will be used to satisfy the contract of
// the HTLC.
//
// NOTE: This field will only be populated if we know the preimage at
// the time a unilateral or force close occurs.
// NOTE: This field will only be populated in the incoming contest
// resolver.
Preimage [32]byte
// SignedSuccessTx is the fully signed HTLC success transaction. This
@ -5448,7 +5439,7 @@ func newOutgoingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan
func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.ChannelConfig,
commitHash chainhash.Hash, htlc *channeldb.HTLC, keyRing *CommitmentKeyRing,
feePerKw SatPerKWeight, dustLimit btcutil.Amount, csvDelay uint32,
localCommit bool, preimage [32]byte) (*IncomingHtlcResolution, error) {
localCommit bool) (*IncomingHtlcResolution, error) {
op := wire.OutPoint{
Hash: commitHash,
@ -5475,7 +5466,6 @@ func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan
// With the script generated, we can completely populated the
// SignDescriptor needed to sweep the output.
return &IncomingHtlcResolution{
Preimage: preimage,
ClaimOutpoint: op,
CsvDelay: csvDelay,
SweepSignDesc: input.SignDescriptor{
@ -5526,10 +5516,12 @@ func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan
InputIndex: 0,
}
// Next, we'll construct the full witness needed to satisfy the input
// of the success transaction.
// Next, we'll construct the full witness needed to satisfy the input of
// the success transaction. Don't specify the preimage yet. The preimage
// will be supplied by the contract resolver, either directly or when it
// becomes known.
successWitness, err := input.ReceiverHtlcSpendRedeem(
htlc.Signature, preimage[:], signer, &successSignDesc, successTx,
htlc.Signature, nil, signer, &successSignDesc, successTx,
)
if err != nil {
return nil, err
@ -5554,7 +5546,6 @@ func newIncomingHtlcResolution(signer input.Signer, localChanCfg *channeldb.Chan
keyRing.CommitPoint, localChanCfg.DelayBasePoint.PubKey,
)
return &IncomingHtlcResolution{
Preimage: preimage,
SignedSuccessTx: successTx,
CsvDelay: csvDelay,
ClaimOutpoint: wire.OutPoint{
@ -5604,7 +5595,7 @@ func (r *OutgoingHtlcResolution) HtlcPoint() wire.OutPoint {
func extractHtlcResolutions(feePerKw SatPerKWeight, ourCommit bool,
signer input.Signer, htlcs []channeldb.HTLC, keyRing *CommitmentKeyRing,
localChanCfg, remoteChanCfg *channeldb.ChannelConfig,
commitHash chainhash.Hash, pCache PreimageCache) (*HtlcResolutions, error) {
commitHash chainhash.Hash) (*HtlcResolutions, error) {
// TODO(roasbeef): don't need to swap csv delay?
dustLimit := remoteChanCfg.DustLimit
@ -5628,19 +5619,11 @@ func extractHtlcResolutions(feePerKw SatPerKWeight, ourCommit bool,
// If the HTLC is incoming, then we'll attempt to see if we
// know the pre-image to the HTLC.
if htlc.Incoming {
// We'll now query the preimage cache for the preimage
// for this HTLC. If it's present then we can fully
// populate this resolution.
preimage, _ := pCache.LookupPreimage(htlc.RHash)
// Otherwise, we'll create an incoming HTLC resolution
// as we can satisfy the contract.
var pre [32]byte
copy(pre[:], preimage[:])
ihr, err := newIncomingHtlcResolution(
signer, localChanCfg, commitHash, &htlc, keyRing,
feePerKw, dustLimit, uint32(csvDelay), ourCommit,
pre,
)
if err != nil {
return nil, err
@ -5730,7 +5713,7 @@ func (lc *LightningChannel) ForceClose() (*LocalForceCloseSummary, error) {
localCommitment := lc.channelState.LocalCommitment
summary, err := NewLocalForceCloseSummary(
lc.channelState, lc.Signer, lc.pCache, commitTx,
lc.channelState, lc.Signer, commitTx,
localCommitment,
)
if err != nil {
@ -5748,8 +5731,8 @@ func (lc *LightningChannel) ForceClose() (*LocalForceCloseSummary, error) {
// channel state. The passed commitTx must be a fully signed commitment
// transaction corresponding to localCommit.
func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Signer,
pCache PreimageCache, commitTx *wire.MsgTx,
localCommit channeldb.ChannelCommitment) (*LocalForceCloseSummary, error) {
commitTx *wire.MsgTx, localCommit channeldb.ChannelCommitment) (
*LocalForceCloseSummary, error) {
// Re-derive the original pkScript for to-self output within the
// commitment transaction. We'll need this to find the corresponding
@ -5830,7 +5813,8 @@ func NewLocalForceCloseSummary(chanState *channeldb.OpenChannel, signer input.Si
htlcResolutions, err := extractHtlcResolutions(
SatPerKWeight(localCommit.FeePerKw), true, signer,
localCommit.Htlcs, keyRing, &chanState.LocalChanCfg,
&chanState.RemoteChanCfg, txHash, pCache)
&chanState.RemoteChanCfg, txHash,
)
if err != nil {
return nil, err
}

@ -19,7 +19,6 @@ import (
"github.com/lightningnetwork/lnd/chainntnfs"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire"
)
@ -544,10 +543,6 @@ func TestForceClose(t *testing.T) {
t.Fatalf("Can't update the channel state: %v", err)
}
// Before we force close Alice's channel, we'll add the pre-image of
// Bob's HTLC to her preimage cache.
aliceChannel.pCache.AddPreimages(lntypes.Preimage(preimageBob))
// With the cache populated, we'll now attempt the force close
// initiated by Alice.
closeSummary, err := aliceChannel.ForceClose()
@ -681,8 +676,11 @@ func TestForceClose(t *testing.T) {
receiverHtlcScript := closeSummary.CloseTx.TxOut[inHtlcIndex].PkScript
// With the original pkscript located, we'll now verify that the second
// level transaction can spend from the multi-sig out.
// level transaction can spend from the multi-sig out. Supply the
// preimage manually. This is usually done by the contract resolver
// before publication.
successTx := inHtlcResolution.SignedSuccessTx
successTx.TxIn[0].Witness[3] = preimageBob[:]
vm, err = txscript.NewEngine(receiverHtlcScript,
successTx, 0, txscript.StandardVerifyFlags, nil,
nil, int64(htlcAmount.ToSatoshis()))
@ -778,10 +776,6 @@ func TestForceClose(t *testing.T) {
"htlcs, got %v htlcs",
1, len(closeSummary.HtlcResolutions.IncomingHTLCs))
}
var zeroHash [32]byte
if closeSummary.HtlcResolutions.IncomingHTLCs[0].Preimage != zeroHash {
t.Fatalf("bob shouldn't know preimage but does")
}
}
// TestForceCloseDustOutput tests that if either side force closes with an
@ -1459,16 +1453,14 @@ func TestStateUpdatePersistence(t *testing.T) {
}
aliceChannelNew, err := NewLightningChannel(
aliceChannel.Signer, nil, aliceChannels[0],
aliceChannel.sigPool,
aliceChannel.Signer, aliceChannels[0], aliceChannel.sigPool,
)
if err != nil {
t.Fatalf("unable to create new channel: %v", err)
}
bobChannelNew, err := NewLightningChannel(
bobChannel.Signer, nil, bobChannels[0],
bobChannel.sigPool,
bobChannel.Signer, bobChannels[0], bobChannel.sigPool,
)
if err != nil {
t.Fatalf("unable to create new channel: %v", err)
@ -2645,13 +2637,13 @@ func TestChanSyncFullySynced(t *testing.T) {
}
aliceChannelNew, err := NewLightningChannel(
aliceChannel.Signer, nil, aliceChannels[0], aliceChannel.sigPool,
aliceChannel.Signer, aliceChannels[0], aliceChannel.sigPool,
)
if err != nil {
t.Fatalf("unable to create new channel: %v", err)
}
bobChannelNew, err := NewLightningChannel(
bobChannel.Signer, nil, bobChannels[0], bobChannel.sigPool,
bobChannel.Signer, bobChannels[0], bobChannel.sigPool,
)
if err != nil {
t.Fatalf("unable to create new channel: %v", err)
@ -2673,7 +2665,7 @@ func restartChannel(channelOld *LightningChannel) (*LightningChannel, error) {
}
channelNew, err := NewLightningChannel(
channelOld.Signer, channelOld.pCache, nodeChannels[0],
channelOld.Signer, nodeChannels[0],
channelOld.sigPool,
)
if err != nil {
@ -4943,11 +4935,6 @@ func TestChannelUnilateralCloseHtlcResolution(t *testing.T) {
t.Fatalf("unable to close: %v", err)
}
// Now that Bob has force closed, we'll modify Alice's pre image cache
// such that she now gains the ability to also settle the incoming HTLC
// from Bob.
aliceChannel.pCache.AddPreimages(lntypes.Preimage(preimageBob))
// We'll then use Bob's transaction to trigger a spend notification for
// Alice.
closeTx := bobForceClose.CloseTx
@ -4958,7 +4945,7 @@ func TestChannelUnilateralCloseHtlcResolution(t *testing.T) {
}
aliceCloseSummary, err := NewUnilateralCloseSummary(
aliceChannel.channelState, aliceChannel.Signer,
aliceChannel.pCache, spendDetail,
spendDetail,
aliceChannel.channelState.RemoteCommitment,
aliceChannel.channelState.RemoteCurrentRevocation,
)
@ -5108,7 +5095,7 @@ func TestChannelUnilateralClosePendingCommit(t *testing.T) {
// our output wasn't picked up.
aliceWrongCloseSummary, err := NewUnilateralCloseSummary(
aliceChannel.channelState, aliceChannel.Signer,
aliceChannel.pCache, spendDetail,
spendDetail,
aliceChannel.channelState.RemoteCommitment,
aliceChannel.channelState.RemoteCurrentRevocation,
)
@ -5129,7 +5116,7 @@ func TestChannelUnilateralClosePendingCommit(t *testing.T) {
}
aliceCloseSummary, err := NewUnilateralCloseSummary(
aliceChannel.channelState, aliceChannel.Signer,
aliceChannel.pCache, spendDetail,
spendDetail,
aliceRemoteChainTip.Commitment,
aliceChannel.channelState.RemoteNextRevocation,
)
@ -5926,7 +5913,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) {
// We now re-create the channels, mimicking a restart. This should sync
// the update logs up to the correct state set up above.
newAliceChannel, err := NewLightningChannel(
aliceChannel.Signer, nil, aliceChannel.channelState,
aliceChannel.Signer, aliceChannel.channelState,
aliceChannel.sigPool,
)
if err != nil {
@ -5934,7 +5921,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) {
}
newBobChannel, err := NewLightningChannel(
bobChannel.Signer, nil, bobChannel.channelState,
bobChannel.Signer, bobChannel.channelState,
bobChannel.sigPool,
)
if err != nil {
@ -6008,7 +5995,7 @@ func restoreAndAssert(t *testing.T, channel *LightningChannel, numAddsLocal,
numFailsLocal, numAddsRemote, numFailsRemote int) {
newChannel, err := NewLightningChannel(
channel.Signer, nil, channel.channelState,
channel.Signer, channel.channelState,
channel.sigPool,
)
if err != nil {
@ -6318,8 +6305,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
expLocal, expRemote uint64) *LightningChannel {
newChannel, err := NewLightningChannel(
channel.Signer, nil, channel.channelState,
channel.sigPool,
channel.Signer, channel.channelState, channel.sigPool,
)
if err != nil {
t.Fatalf("unable to create new channel: %v", err)

@ -10,7 +10,6 @@ import (
"github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil"
"github.com/btcsuite/btcwallet/wallet/txauthor"
"github.com/lightningnetwork/lnd/lntypes"
)
// AddressType is an enum-like type which denotes the possible address types
@ -294,21 +293,6 @@ type MessageSigner interface {
SignMessage(pubKey *btcec.PublicKey, msg []byte) (*btcec.Signature, error)
}
// PreimageCache is an interface that represents a global cache for preimages.
// We'll utilize this cache to communicate the discovery of new preimages
// across sub-systems.
type PreimageCache interface {
// LookupPreimage attempts to look up a preimage according to its hash.
// If found, the preimage is returned along with true for the second
// argument. Otherwise, it'll return false.
LookupPreimage(hash lntypes.Hash) (lntypes.Preimage, bool)
// AddPreimages adds a batch of newly discovered preimages to the global
// cache, and also signals any subscribers of the newly discovered
// witness.
AddPreimages(preimages ...lntypes.Preimage) error
}
// WalletDriver represents a "driver" for a particular concrete
// WalletController implementation. A driver is identified by a globally unique
// string identifier along with a 'New()' method which is responsible for

@ -301,13 +301,11 @@ func CreateTestChannels() (*LightningChannel, *LightningChannel, func(), error)
aliceSigner := &input.MockSigner{Privkeys: aliceKeys}
bobSigner := &input.MockSigner{Privkeys: bobKeys}
pCache := newMockPreimageCache()
// TODO(roasbeef): make mock version of pre-image store
alicePool := NewSigPool(1, aliceSigner)
channelAlice, err := NewLightningChannel(
aliceSigner, pCache, aliceChannelState, alicePool,
aliceSigner, aliceChannelState, alicePool,
)
if err != nil {
return nil, nil, nil, err
@ -316,7 +314,7 @@ func CreateTestChannels() (*LightningChannel, *LightningChannel, func(), error)
bobPool := NewSigPool(1, bobSigner)
channelBob, err := NewLightningChannel(
bobSigner, pCache, bobChannelState, bobPool,
bobSigner, bobChannelState, bobPool,
)
if err != nil {
return nil, nil, nil, err

@ -780,8 +780,6 @@ func TestCommitmentAndHTLCTransactions(t *testing.T) {
},
}
pCache := newMockPreimageCache()
for i, test := range testCases {
expectedCommitmentTx, err := txFromHex(test.expectedCommitmentTxHex)
if err != nil {
@ -848,7 +846,7 @@ func TestCommitmentAndHTLCTransactions(t *testing.T) {
htlcResolutions, err := extractHtlcResolutions(
SatPerKWeight(test.commitment.FeePerKw), true, signer,
htlcs, keys, channel.localChanCfg, channel.remoteChanCfg,
commitTx.TxHash(), pCache,
commitTx.TxHash(),
)
if err != nil {
t.Errorf("Case %d: Failed to extract HTLC resolutions: %v", i, err)

14
peer.go

@ -196,10 +196,6 @@ type peer struct {
// remote node.
localFeatures *lnwire.RawFeatureVector
// finalCltvRejectDelta defines the number of blocks before the expiry
// of the htlc where we no longer settle it as an exit hop.
finalCltvRejectDelta uint32
// outgoingCltvRejectDelta defines the number of blocks before expiry of
// an htlc where we don't offer an htlc anymore.
outgoingCltvRejectDelta uint32
@ -242,7 +238,7 @@ func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server,
addr *lnwire.NetAddress, inbound bool,
localFeatures *lnwire.RawFeatureVector,
chanActiveTimeout time.Duration,
finalCltvRejectDelta, outgoingCltvRejectDelta uint32) (
outgoingCltvRejectDelta uint32) (
*peer, error) {
nodePub := addr.IdentityKey
@ -258,7 +254,6 @@ func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server,
localFeatures: localFeatures,
finalCltvRejectDelta: finalCltvRejectDelta,
outgoingCltvRejectDelta: outgoingCltvRejectDelta,
sendQueue: make(chan outgoingMsg),
@ -429,8 +424,7 @@ func (p *peer) QuitSignal() <-chan struct{} {
func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
for _, dbChan := range chans {
lnChan, err := lnwallet.NewLightningChannel(
p.server.cc.signer, p.server.witnessBeacon, dbChan,
p.server.sigPool,
p.server.cc.signer, dbChan, p.server.sigPool,
)
if err != nil {
return err
@ -599,7 +593,6 @@ func (p *peer) addLink(chanPoint *wire.OutPoint,
UnsafeReplay: cfg.UnsafeReplay,
MinFeeUpdateTimeout: htlcswitch.DefaultMinLinkFeeUpdateTimeout,
MaxFeeUpdateTimeout: htlcswitch.DefaultMaxLinkFeeUpdateTimeout,
FinalCltvRejectDelta: p.finalCltvRejectDelta,
OutgoingCltvRejectDelta: p.outgoingCltvRejectDelta,
}
@ -1789,8 +1782,7 @@ out:
// set of active channels, so we can look it up later
// easily according to its channel ID.
lnChan, err := lnwallet.NewLightningChannel(
p.server.cc.signer, p.server.witnessBeacon,
newChan, p.server.sigPool,
p.server.cc.signer, newChan, p.server.sigPool,
)
if err != nil {
p.activeChanMtx.Unlock()

@ -1917,7 +1917,7 @@ func (r *rpcServer) fetchActiveChannel(chanPoint wire.OutPoint) (
// we create a fully populated channel state machine which
// uses the db channel as backing storage.
return lnwallet.NewLightningChannel(
r.server.cc.wallet.Cfg.Signer, nil, dbChan, nil,
r.server.cc.wallet.Cfg.Signer, dbChan, nil,
)
}

@ -342,7 +342,10 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
readPool: readPool,
chansToRestore: chansToRestore,
invoices: invoices.NewRegistry(chanDB, decodeFinalCltvExpiry),
invoices: invoices.NewRegistry(
chanDB, decodeFinalCltvExpiry,
defaultFinalCltvRejectDelta,
),
channelNotifier: channelnotifier.New(chanDB),
@ -374,7 +377,6 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
}
s.witnessBeacon = &preimageBeacon{
invoices: s.invoices,
wCache: chanDB.NewWitnessCache(),
subscribers: make(map[uint64]*preimageSubscriber),
}
@ -2557,7 +2559,6 @@ func (s *server) peerConnected(conn net.Conn, connReq *connmgr.ConnReq,
p, err := newPeer(
conn, connReq, s, peerAddr, inbound, localFeatures,
cfg.ChanEnableTimeout,
defaultFinalCltvRejectDelta,
defaultOutgoingCltvRejectDelta,
)
if err != nil {

@ -303,7 +303,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
alicePool := lnwallet.NewSigPool(1, aliceSigner)
channelAlice, err := lnwallet.NewLightningChannel(
aliceSigner, nil, aliceChannelState, alicePool,
aliceSigner, aliceChannelState, alicePool,
)
if err != nil {
return nil, nil, nil, nil, err
@ -312,7 +312,7 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
bobPool := lnwallet.NewSigPool(1, bobSigner)
channelBob, err := lnwallet.NewLightningChannel(
bobSigner, nil, bobChannelState, bobPool,
bobSigner, bobChannelState, bobPool,
)
if err != nil {
return nil, nil, nil, nil, err

@ -5,9 +5,7 @@ import (
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/contractcourt"
"github.com/lightningnetwork/lnd/invoices"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwallet"
)
// preimageSubscriber reprints an active subscription to be notified once the
@ -24,8 +22,6 @@ type preimageSubscriber struct {
type preimageBeacon struct {
sync.RWMutex
invoices *invoices.InvoiceRegistry
wCache *channeldb.WitnessCache
clientCounter uint64
@ -72,25 +68,6 @@ func (p *preimageBeacon) LookupPreimage(
p.RLock()
defer p.RUnlock()
// First, we'll check the invoice registry to see if we already know of
// the preimage as it's on that we created ourselves.
invoice, _, err := p.invoices.LookupInvoice(payHash)
switch {
case err == channeldb.ErrInvoiceNotFound:
// If we get this error, then it simply means that this invoice
// wasn't found, so we don't treat it as a critical error.
case err != nil:
return lntypes.Preimage{}, false
}
// If we've found the invoice, then we can return the preimage
// directly.
if err != channeldb.ErrInvoiceNotFound &&
invoice.Terms.PaymentPreimage != channeldb.UnknownPreimage {
return invoice.Terms.PaymentPreimage, true
}
// Otherwise, we'll perform a final check using the witness cache.
preimage, err := p.wCache.LookupSha256Witness(payHash)
if err != nil {
@ -144,4 +121,3 @@ func (p *preimageBeacon) AddPreimages(preimages ...lntypes.Preimage) error {
}
var _ contractcourt.WitnessBeacon = (*preimageBeacon)(nil)
var _ lnwallet.PreimageCache = (*preimageBeacon)(nil)