Merge pull request #4721 from guggero/itest-flake-hunt
itest: cleanup and flake fix
This commit is contained in:
commit
8b32285f48
@ -57,9 +57,6 @@ type NetworkHarness struct {
|
|||||||
Alice *HarnessNode
|
Alice *HarnessNode
|
||||||
Bob *HarnessNode
|
Bob *HarnessNode
|
||||||
|
|
||||||
seenTxns chan *chainhash.Hash
|
|
||||||
bitcoinWatchRequests chan *txWatchRequest
|
|
||||||
|
|
||||||
// Channel for transmitting stderr output from failed lightning node
|
// Channel for transmitting stderr output from failed lightning node
|
||||||
// to main process.
|
// to main process.
|
||||||
lndErrorChan chan error
|
lndErrorChan chan error
|
||||||
@ -85,8 +82,6 @@ func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string) (
|
|||||||
n := NetworkHarness{
|
n := NetworkHarness{
|
||||||
activeNodes: make(map[int]*HarnessNode),
|
activeNodes: make(map[int]*HarnessNode),
|
||||||
nodesByPub: make(map[string]*HarnessNode),
|
nodesByPub: make(map[string]*HarnessNode),
|
||||||
seenTxns: make(chan *chainhash.Hash),
|
|
||||||
bitcoinWatchRequests: make(chan *txWatchRequest),
|
|
||||||
lndErrorChan: make(chan error),
|
lndErrorChan: make(chan error),
|
||||||
netParams: r.ActiveNet,
|
netParams: r.ActiveNet,
|
||||||
Miner: r,
|
Miner: r,
|
||||||
@ -95,7 +90,6 @@ func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string) (
|
|||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
lndBinary: lndBinary,
|
lndBinary: lndBinary,
|
||||||
}
|
}
|
||||||
go n.networkWatcher()
|
|
||||||
return &n, nil
|
return &n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -746,81 +740,12 @@ func saveProfilesPage(node *HarnessNode) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(roasbeef): add a WithChannel higher-order function?
|
// WaitForTxInMempool blocks until the target txid is seen in the mempool. If
|
||||||
// * python-like context manager w.r.t using a channel within a test
|
|
||||||
// * possibly adds more funds to the target wallet if the funds are not
|
|
||||||
// enough
|
|
||||||
|
|
||||||
// txWatchRequest encapsulates a request to the harness' Bitcoin network
|
|
||||||
// watcher to dispatch a notification once a transaction with the target txid
|
|
||||||
// is seen within the test network.
|
|
||||||
type txWatchRequest struct {
|
|
||||||
txid chainhash.Hash
|
|
||||||
eventChan chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// networkWatcher is a goroutine which accepts async notification
|
|
||||||
// requests for the broadcast of a target transaction, and then dispatches the
|
|
||||||
// transaction once its seen on the Bitcoin network.
|
|
||||||
func (n *NetworkHarness) networkWatcher() {
|
|
||||||
seenTxns := make(map[chainhash.Hash]struct{})
|
|
||||||
clients := make(map[chainhash.Hash][]chan struct{})
|
|
||||||
|
|
||||||
for {
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-n.quit:
|
|
||||||
return
|
|
||||||
|
|
||||||
case req := <-n.bitcoinWatchRequests:
|
|
||||||
// If we've already seen this transaction, then
|
|
||||||
// immediately dispatch the request. Otherwise, append
|
|
||||||
// to the list of clients who are watching for the
|
|
||||||
// broadcast of this transaction.
|
|
||||||
if _, ok := seenTxns[req.txid]; ok {
|
|
||||||
close(req.eventChan)
|
|
||||||
} else {
|
|
||||||
clients[req.txid] = append(clients[req.txid], req.eventChan)
|
|
||||||
}
|
|
||||||
case txid := <-n.seenTxns:
|
|
||||||
// Add this txid to our set of "seen" transactions. So
|
|
||||||
// we're able to dispatch any notifications for this
|
|
||||||
// txid which arrive *after* it's seen within the
|
|
||||||
// network.
|
|
||||||
seenTxns[*txid] = struct{}{}
|
|
||||||
|
|
||||||
// If there isn't a registered notification for this
|
|
||||||
// transaction then ignore it.
|
|
||||||
txClients, ok := clients[*txid]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, dispatch the notification to all clients,
|
|
||||||
// cleaning up the now un-needed state.
|
|
||||||
for _, client := range txClients {
|
|
||||||
close(client)
|
|
||||||
}
|
|
||||||
delete(clients, *txid)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnTxAccepted is a callback to be called each time a new transaction has been
|
|
||||||
// broadcast on the network.
|
|
||||||
func (n *NetworkHarness) OnTxAccepted(hash *chainhash.Hash) {
|
|
||||||
select {
|
|
||||||
case n.seenTxns <- hash:
|
|
||||||
case <-n.quit:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForTxBroadcast blocks until the target txid is seen on the network. If
|
|
||||||
// the transaction isn't seen within the network before the passed timeout,
|
// the transaction isn't seen within the network before the passed timeout,
|
||||||
// then an error is returned.
|
// then an error is returned.
|
||||||
// TODO(roasbeef): add another method which creates queue of all seen transactions
|
func (n *NetworkHarness) WaitForTxInMempool(ctx context.Context,
|
||||||
func (n *NetworkHarness) WaitForTxBroadcast(ctx context.Context, txid chainhash.Hash) error {
|
txid chainhash.Hash) error {
|
||||||
|
|
||||||
// Return immediately if harness has been torn down.
|
// Return immediately if harness has been torn down.
|
||||||
select {
|
select {
|
||||||
case <-n.quit:
|
case <-n.quit:
|
||||||
@ -828,20 +753,29 @@ func (n *NetworkHarness) WaitForTxBroadcast(ctx context.Context, txid chainhash.
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
eventChan := make(chan struct{})
|
ticker := time.NewTicker(50 * time.Millisecond)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
n.bitcoinWatchRequests <- &txWatchRequest{
|
var mempool []*chainhash.Hash
|
||||||
txid: txid,
|
for {
|
||||||
eventChan: eventChan,
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return fmt.Errorf("wanted %v, found %v txs "+
|
||||||
|
"in mempool: %v", txid, len(mempool), mempool)
|
||||||
|
|
||||||
|
case <-ticker.C:
|
||||||
|
var err error
|
||||||
|
mempool, err = n.Miner.Node.GetRawMempool()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
for _, mempoolTx := range mempool {
|
||||||
case <-eventChan:
|
if *mempoolTx == txid {
|
||||||
return nil
|
return nil
|
||||||
case <-n.quit:
|
}
|
||||||
return fmt.Errorf("NetworkHarness has been torn down")
|
}
|
||||||
case <-ctx.Done():
|
}
|
||||||
return fmt.Errorf("tx not seen before context timeout")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1163,7 +1097,7 @@ func (n *NetworkHarness) CloseChannel(ctx context.Context,
|
|||||||
"%v", err)
|
"%v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := n.WaitForTxBroadcast(ctx, *closeTxid); err != nil {
|
if err := n.WaitForTxInMempool(ctx, *closeTxid); err != nil {
|
||||||
errChan <- fmt.Errorf("error while waiting for "+
|
errChan <- fmt.Errorf("error while waiting for "+
|
||||||
"broadcast tx: %v", err)
|
"broadcast tx: %v", err)
|
||||||
return
|
return
|
||||||
|
@ -3,10 +3,8 @@ package itest
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/lightningnetwork/lnd"
|
"github.com/lightningnetwork/lnd"
|
||||||
"github.com/lightningnetwork/lnd/lncfg"
|
"github.com/lightningnetwork/lnd/lncfg"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
@ -15,6 +13,7 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntest"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
"github.com/lightningnetwork/lnd/lntypes"
|
"github.com/lightningnetwork/lnd/lntypes"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if
|
// testMultiHopHtlcLocalChainClaim tests that in a multi-hop HTLC scenario, if
|
||||||
@ -51,9 +50,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
|
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to add invoice: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now that we've created the invoice, we'll send a single payment from
|
// Now that we've created the invoice, we'll send a single payment from
|
||||||
// Alice to Carol. We won't wait for the response however, as Carol
|
// Alice to Carol. We won't wait for the response however, as Carol
|
||||||
@ -62,32 +59,21 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
_, err = alice.RouterClient.SendPaymentV2(
|
_, err = alice.RouterClient.SendPaymentV2(
|
||||||
ctx,
|
ctx, &routerrpc.SendPaymentRequest{
|
||||||
&routerrpc.SendPaymentRequest{
|
|
||||||
PaymentRequest: carolInvoice.PaymentRequest,
|
PaymentRequest: carolInvoice.PaymentRequest,
|
||||||
TimeoutSeconds: 60,
|
TimeoutSeconds: 60,
|
||||||
FeeLimitMsat: noFeeLimitMsat,
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to send payment: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, all 3 nodes should now have an active channel with
|
// At this point, all 3 nodes should now have an active channel with
|
||||||
// the created HTLC pending on all of them.
|
// the created HTLC pending on all of them.
|
||||||
var predErr error
|
|
||||||
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
||||||
err = wait.Predicate(func() bool {
|
err = wait.NoError(func() error {
|
||||||
predErr = assertActiveHtlcs(nodes, payHash[:])
|
return assertActiveHtlcs(nodes, payHash[:])
|
||||||
if predErr != nil {
|
}, defaultTimeout)
|
||||||
return false
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("htlc mismatch: %v", predErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for carol to mark invoice as accepted. There is a small gap to
|
// Wait for carol to mark invoice as accepted. There is a small gap to
|
||||||
// bridge between adding the htlc to the channel and executing the exit
|
// bridge between adding the htlc to the channel and executing the exit
|
||||||
@ -101,8 +87,9 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
// At this point, Bob decides that he wants to exit the channel
|
// At this point, Bob decides that he wants to exit the channel
|
||||||
// immediately, so he force closes his commitment transaction.
|
// immediately, so he force closes his commitment transaction.
|
||||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||||
bobForceClose := closeChannelAndAssertType(ctxt, t, net, bob,
|
bobForceClose := closeChannelAndAssertType(
|
||||||
aliceChanPoint, c == commitTypeAnchors, true)
|
ctxt, t, net, bob, aliceChanPoint, c == commitTypeAnchors, true,
|
||||||
|
)
|
||||||
|
|
||||||
// Alice will sweep her commitment output immediately. If there are
|
// Alice will sweep her commitment output immediately. If there are
|
||||||
// anchors, Alice will also sweep hers.
|
// anchors, Alice will also sweep hers.
|
||||||
@ -113,16 +100,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
_, err = waitForNTxsInMempool(
|
_, err = waitForNTxsInMempool(
|
||||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to find alice's sweep tx in miner mempool: %v",
|
|
||||||
err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Suspend Bob to force Carol to go to chain.
|
// Suspend Bob to force Carol to go to chain.
|
||||||
restartBob, err := net.SuspendNode(bob)
|
restartBob, err := net.SuspendNode(bob)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to suspend bob: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Settle invoice. This will just mark the invoice as settled, as there
|
// Settle invoice. This will just mark the invoice as settled, as there
|
||||||
// is no link anymore to remove the htlc from the commitment tx. For
|
// is no link anymore to remove the htlc from the commitment tx. For
|
||||||
@ -134,31 +116,24 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
|
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
|
||||||
Preimage: preimage[:],
|
Preimage: preimage[:],
|
||||||
})
|
})
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("settle invoice: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll now mine enough blocks so Carol decides that she needs to go
|
// We'll now mine enough blocks so Carol decides that she needs to go
|
||||||
// on-chain to claim the HTLC as Bob has been inactive.
|
// on-chain to claim the HTLC as Bob has been inactive.
|
||||||
numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry -
|
numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry -
|
||||||
lncfg.DefaultIncomingBroadcastDelta))
|
lncfg.DefaultIncomingBroadcastDelta))
|
||||||
|
|
||||||
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
|
_, err = net.Miner.Node.Generate(numBlocks)
|
||||||
t.Fatalf("unable to generate blocks")
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// Carol's commitment transaction should now be in the mempool. If there
|
// Carol's commitment transaction should now be in the mempool. If there
|
||||||
// is an anchor, Carol will sweep that too.
|
// is an anchor, Carol will sweep that too.
|
||||||
_, err = waitForNTxsInMempool(
|
_, err = waitForNTxsInMempool(
|
||||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("transactions not found in mempool: %v", err)
|
|
||||||
}
|
|
||||||
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
|
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to get txid: %v", err)
|
|
||||||
}
|
|
||||||
carolFundingPoint := wire.OutPoint{
|
carolFundingPoint := wire.OutPoint{
|
||||||
Hash: *bobFundingTxid,
|
Hash: *bobFundingTxid,
|
||||||
Index: bobChanPoint.OutputIndex,
|
Index: bobChanPoint.OutputIndex,
|
||||||
@ -174,16 +149,12 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
// Mine a block that should confirm the commit tx, the anchor if present
|
// Mine a block that should confirm the commit tx, the anchor if present
|
||||||
// and the coinbase.
|
// and the coinbase.
|
||||||
block := mineBlocks(t, net, 1, expectedTxes)[0]
|
block := mineBlocks(t, net, 1, expectedTxes)[0]
|
||||||
if len(block.Transactions) != expectedTxes+1 {
|
require.Len(t.t, block.Transactions, expectedTxes+1)
|
||||||
t.Fatalf("expected %v transactions in block, got %v",
|
|
||||||
expectedTxes+1, len(block.Transactions))
|
|
||||||
}
|
|
||||||
assertTxInBlock(t, block, &closingTxid)
|
assertTxInBlock(t, block, &closingTxid)
|
||||||
|
|
||||||
// Restart bob again.
|
// Restart bob again.
|
||||||
if err := restartBob(); err != nil {
|
err = restartBob()
|
||||||
t.Fatalf("unable to restart bob: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// After the force close transacion is mined, Carol should broadcast her
|
// After the force close transacion is mined, Carol should broadcast her
|
||||||
// second level HTLC transacion. Bob will broadcast a sweep tx to sweep
|
// second level HTLC transacion. Bob will broadcast a sweep tx to sweep
|
||||||
@ -198,9 +169,7 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
txes, err := getNTxsFromMempool(
|
txes, err := getNTxsFromMempool(
|
||||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("transactions not found in mempool: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Both Carol's second level transaction and Bob's sweep should be
|
// Both Carol's second level transaction and Bob's sweep should be
|
||||||
// spending from the commitment transaction.
|
// spending from the commitment transaction.
|
||||||
@ -209,16 +178,11 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
// At this point we suspend Alice to make sure she'll handle the
|
// At this point we suspend Alice to make sure she'll handle the
|
||||||
// on-chain settle after a restart.
|
// on-chain settle after a restart.
|
||||||
restartAlice, err := net.SuspendNode(alice)
|
restartAlice, err := net.SuspendNode(alice)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to suspend alice: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mine a block to confirm the two transactions (+ the coinbase).
|
// Mine a block to confirm the two transactions (+ the coinbase).
|
||||||
block = mineBlocks(t, net, 1, expectedTxes)[0]
|
block = mineBlocks(t, net, 1, expectedTxes)[0]
|
||||||
if len(block.Transactions) != expectedTxes+1 {
|
require.Len(t.t, block.Transactions, expectedTxes+1)
|
||||||
t.Fatalf("expected 3 transactions in block, got %v",
|
|
||||||
len(block.Transactions))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep track of the second level tx maturity.
|
// Keep track of the second level tx maturity.
|
||||||
carolSecondLevelCSV := uint32(defaultCSV)
|
carolSecondLevelCSV := uint32(defaultCSV)
|
||||||
@ -226,73 +190,48 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
// When Bob notices Carol's second level transaction in the block, he
|
// When Bob notices Carol's second level transaction in the block, he
|
||||||
// will extract the preimage and broadcast a second level tx to claim
|
// will extract the preimage and broadcast a second level tx to claim
|
||||||
// the HTLC in his (already closed) channel with Alice.
|
// the HTLC in his (already closed) channel with Alice.
|
||||||
bobSecondLvlTx, err := waitForTxInMempool(net.Miner.Node,
|
bobSecondLvlTx, err := waitForTxInMempool(
|
||||||
minerMempoolTimeout)
|
net.Miner.Node, minerMempoolTimeout,
|
||||||
if err != nil {
|
)
|
||||||
t.Fatalf("transactions not found in mempool: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// It should spend from the commitment in the channel with Alice.
|
// It should spend from the commitment in the channel with Alice.
|
||||||
tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx)
|
tx, err := net.Miner.Node.GetRawTransaction(bobSecondLvlTx)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to get txn: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobForceClose {
|
require.Equal(
|
||||||
t.Fatalf("tx did not spend from bob's force close tx")
|
t.t, *bobForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
|
||||||
}
|
)
|
||||||
|
|
||||||
// At this point, Bob should have broadcast his second layer success
|
// At this point, Bob should have broadcast his second layer success
|
||||||
// transaction, and should have sent it to the nursery for incubation.
|
// transaction, and should have sent it to the nursery for incubation.
|
||||||
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
|
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := bob.PendingChannels(
|
err = waitForNumChannelPendingForceClose(
|
||||||
ctxt, pendingChansRequest,
|
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||||
)
|
if c.Channel.LocalBalance != 0 {
|
||||||
if err != nil {
|
return nil
|
||||||
predErr = fmt.Errorf("unable to query for pending "+
|
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
|
if len(c.PendingHtlcs) != 1 {
|
||||||
predErr = fmt.Errorf("bob should have pending for " +
|
return fmt.Errorf("bob should have pending " +
|
||||||
"close chan but doesn't")
|
"htlc but doesn't")
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, forceCloseChan := range pendingChanResp.PendingForceClosingChannels {
|
if c.PendingHtlcs[0].Stage != 1 {
|
||||||
if forceCloseChan.Channel.LocalBalance != 0 {
|
return fmt.Errorf("bob's htlc should have "+
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(forceCloseChan.PendingHtlcs) != 1 {
|
|
||||||
predErr = fmt.Errorf("bob should have pending htlc " +
|
|
||||||
"but doesn't")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
stage := forceCloseChan.PendingHtlcs[0].Stage
|
|
||||||
if stage != 1 {
|
|
||||||
predErr = fmt.Errorf("bob's htlc should have "+
|
|
||||||
"advanced to the first stage but was "+
|
"advanced to the first stage but was "+
|
||||||
"stage: %v", stage)
|
"stage: %v", c.PendingHtlcs[0].Stage)
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
require.NoError(t.t, err)
|
||||||
|
|
||||||
// We'll now mine a block which should confirm Bob's second layer
|
// We'll now mine a block which should confirm Bob's second layer
|
||||||
// transaction.
|
// transaction.
|
||||||
block = mineBlocks(t, net, 1, 1)[0]
|
block = mineBlocks(t, net, 1, 1)[0]
|
||||||
if len(block.Transactions) != 2 {
|
require.Len(t.t, block.Transactions, 2)
|
||||||
t.Fatalf("expected 2 transactions in block, got %v",
|
|
||||||
len(block.Transactions))
|
|
||||||
}
|
|
||||||
assertTxInBlock(t, block, bobSecondLvlTx)
|
assertTxInBlock(t, block, bobSecondLvlTx)
|
||||||
|
|
||||||
// Keep track of Bob's second level maturity, and decrement our track
|
// Keep track of Bob's second level maturity, and decrement our track
|
||||||
@ -302,21 +241,17 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
|
|
||||||
// Now that the preimage from Bob has hit the chain, restart Alice to
|
// Now that the preimage from Bob has hit the chain, restart Alice to
|
||||||
// ensure she'll pick it up.
|
// ensure she'll pick it up.
|
||||||
if err := restartAlice(); err != nil {
|
err = restartAlice()
|
||||||
t.Fatalf("unable to restart alice: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// If we then mine 3 additional blocks, Carol's second level tx should
|
// If we then mine 3 additional blocks, Carol's second level tx should
|
||||||
// mature, and she can pull the funds from it with a sweep tx.
|
// mature, and she can pull the funds from it with a sweep tx.
|
||||||
if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil {
|
_, err = net.Miner.Node.Generate(carolSecondLevelCSV)
|
||||||
t.Fatalf("unable to generate block: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
bobSecondLevelCSV -= carolSecondLevelCSV
|
bobSecondLevelCSV -= carolSecondLevelCSV
|
||||||
|
|
||||||
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to find Carol's sweeping transaction: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mining one additional block, Bob's second level tx is mature, and he
|
// Mining one additional block, Bob's second level tx is mature, and he
|
||||||
// can sweep the output.
|
// can sweep the output.
|
||||||
@ -324,18 +259,14 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
assertTxInBlock(t, block, carolSweep)
|
assertTxInBlock(t, block, carolSweep)
|
||||||
|
|
||||||
bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
bobSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to find bob's sweeping transaction")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure it spends from the second level tx.
|
// Make sure it spends from the second level tx.
|
||||||
tx, err = net.Miner.Node.GetRawTransaction(bobSweep)
|
tx, err = net.Miner.Node.GetRawTransaction(bobSweep)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to get txn: %v", err)
|
require.Equal(
|
||||||
}
|
t.t, *bobSecondLvlTx, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
|
||||||
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *bobSecondLvlTx {
|
)
|
||||||
t.Fatalf("tx did not spend from bob's second level tx")
|
|
||||||
}
|
|
||||||
|
|
||||||
// When we mine one additional block, that will confirm Bob's sweep.
|
// When we mine one additional block, that will confirm Bob's sweep.
|
||||||
// Now Bob should have no pending channels anymore, as this just
|
// Now Bob should have no pending channels anymore, as this just
|
||||||
@ -343,77 +274,15 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
block = mineBlocks(t, net, 1, 1)[0]
|
block = mineBlocks(t, net, 1, 1)[0]
|
||||||
assertTxInBlock(t, block, bobSweep)
|
assertTxInBlock(t, block, bobSweep)
|
||||||
|
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := bob.PendingChannels(
|
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
|
||||||
ctxt, pendingChansRequest,
|
require.NoError(t.t, err)
|
||||||
)
|
assertNodeNumChannels(t, bob, 0)
|
||||||
if err != nil {
|
|
||||||
predErr = fmt.Errorf("unable to query for pending "+
|
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
|
|
||||||
predErr = fmt.Errorf("bob still has pending channels "+
|
|
||||||
"but shouldn't: %v", spew.Sdump(pendingChanResp))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
req := &lnrpc.ListChannelsRequest{}
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
chanInfo, err := bob.ListChannels(ctxt, req)
|
|
||||||
if err != nil {
|
|
||||||
predErr = fmt.Errorf("unable to query for open "+
|
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(chanInfo.Channels) != 0 {
|
|
||||||
predErr = fmt.Errorf("Bob should have no open "+
|
|
||||||
"channels, instead he has %v",
|
|
||||||
len(chanInfo.Channels))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(predErr.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Also Carol should have no channels left (open nor pending).
|
// Also Carol should have no channels left (open nor pending).
|
||||||
err = wait.Predicate(func() bool {
|
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
require.NoError(t.t, err)
|
||||||
pendingChanResp, err := carol.PendingChannels(
|
assertNodeNumChannels(t, carol, 0)
|
||||||
ctxt, pendingChansRequest,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
predErr = fmt.Errorf("unable to query for pending "+
|
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
|
|
||||||
predErr = fmt.Errorf("bob carol has pending channels "+
|
|
||||||
"but shouldn't: %v", spew.Sdump(pendingChanResp))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
req := &lnrpc.ListChannelsRequest{}
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
|
||||||
chanInfo, err := carol.ListChannels(ctxt, req)
|
|
||||||
if err != nil {
|
|
||||||
predErr = fmt.Errorf("unable to query for open "+
|
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(chanInfo.Channels) != 0 {
|
|
||||||
predErr = fmt.Errorf("carol should have no open "+
|
|
||||||
"channels, instead she has %v",
|
|
||||||
len(chanInfo.Channels))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(predErr.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, check that the Alice's payment is correctly marked
|
// Finally, check that the Alice's payment is correctly marked
|
||||||
// succeeded.
|
// succeeded.
|
||||||
@ -421,7 +290,5 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
err = checkPaymentStatus(
|
err = checkPaymentStatus(
|
||||||
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
|
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -2,19 +2,18 @@ package itest
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/lightningnetwork/lnd"
|
"github.com/lightningnetwork/lnd"
|
||||||
"github.com/lightningnetwork/lnd/lncfg"
|
"github.com/lightningnetwork/lnd/lncfg"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntest"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the
|
// testMultiHopHtlcLocalTimeout tests that in a multi-hop HTLC scenario, if the
|
||||||
@ -58,8 +57,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
payHash := makeFakePayHash(t)
|
payHash := makeFakePayHash(t)
|
||||||
|
|
||||||
_, err := alice.RouterClient.SendPaymentV2(
|
_, err := alice.RouterClient.SendPaymentV2(
|
||||||
ctx,
|
ctx, &routerrpc.SendPaymentRequest{
|
||||||
&routerrpc.SendPaymentRequest{
|
|
||||||
Dest: carolPubKey,
|
Dest: carolPubKey,
|
||||||
Amt: int64(dustHtlcAmt),
|
Amt: int64(dustHtlcAmt),
|
||||||
PaymentHash: dustPayHash,
|
PaymentHash: dustPayHash,
|
||||||
@ -68,13 +66,10 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
FeeLimitMsat: noFeeLimitMsat,
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to send alice htlc: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = alice.RouterClient.SendPaymentV2(
|
_, err = alice.RouterClient.SendPaymentV2(
|
||||||
ctx,
|
ctx, &routerrpc.SendPaymentRequest{
|
||||||
&routerrpc.SendPaymentRequest{
|
|
||||||
Dest: carolPubKey,
|
Dest: carolPubKey,
|
||||||
Amt: int64(htlcAmt),
|
Amt: int64(htlcAmt),
|
||||||
PaymentHash: payHash,
|
PaymentHash: payHash,
|
||||||
@ -83,21 +78,15 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
FeeLimitMsat: noFeeLimitMsat,
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to send alice htlc: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify that all nodes in the path now have two HTLC's with the
|
// Verify that all nodes in the path now have two HTLC's with the
|
||||||
// proper parameters.
|
// proper parameters.
|
||||||
var predErr error
|
|
||||||
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
||||||
err = wait.Predicate(func() bool {
|
err = wait.NoError(func() error {
|
||||||
predErr = assertActiveHtlcs(nodes, dustPayHash, payHash)
|
return assertActiveHtlcs(nodes, dustPayHash, payHash)
|
||||||
return predErr == nil
|
}, defaultTimeout)
|
||||||
}, time.Second*15)
|
require.NoError(t.t, err)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("htlc mismatch: %v", predErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Increase the fee estimate so that the following force close tx will
|
// Increase the fee estimate so that the following force close tx will
|
||||||
// be cpfp'ed.
|
// be cpfp'ed.
|
||||||
@ -110,9 +99,8 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
numBlocks := padCLTV(
|
numBlocks := padCLTV(
|
||||||
uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
|
uint32(finalCltvDelta - lncfg.DefaultOutgoingBroadcastDelta),
|
||||||
)
|
)
|
||||||
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
|
_, err = net.Miner.Node.Generate(numBlocks)
|
||||||
t.Fatalf("unable to generate blocks: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// Bob's force close transaction should now be found in the mempool. If
|
// Bob's force close transaction should now be found in the mempool. If
|
||||||
// there are anchors, we also expect Bob's anchor sweep.
|
// there are anchors, we also expect Bob's anchor sweep.
|
||||||
@ -122,15 +110,11 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
|
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to get txid: %v", err)
|
|
||||||
}
|
|
||||||
_, err = waitForNTxsInMempool(
|
_, err = waitForNTxsInMempool(
|
||||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to find closing txid: %v", err)
|
|
||||||
}
|
|
||||||
closeTx := getSpendingTxInMempool(
|
closeTx := getSpendingTxInMempool(
|
||||||
t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{
|
t, net.Miner.Node, minerMempoolTimeout, wire.OutPoint{
|
||||||
Hash: *bobFundingTxid,
|
Hash: *bobFundingTxid,
|
||||||
@ -146,30 +130,25 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
// that we sent earlier. This means Alice should now only have a single
|
// that we sent earlier. This means Alice should now only have a single
|
||||||
// HTLC on her channel.
|
// HTLC on her channel.
|
||||||
nodes = []*lntest.HarnessNode{alice}
|
nodes = []*lntest.HarnessNode{alice}
|
||||||
err = wait.Predicate(func() bool {
|
err = wait.NoError(func() error {
|
||||||
predErr = assertActiveHtlcs(nodes, payHash)
|
return assertActiveHtlcs(nodes, payHash)
|
||||||
return predErr == nil
|
}, defaultTimeout)
|
||||||
}, time.Second*15)
|
require.NoError(t.t, err)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("htlc mismatch: %v", predErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// With the closing transaction confirmed, we should expect Bob's HTLC
|
// With the closing transaction confirmed, we should expect Bob's HTLC
|
||||||
// timeout transaction to be broadcast due to the expiry being reached.
|
// timeout transaction to be broadcast due to the expiry being reached.
|
||||||
// If there are anchors, we also expect Carol's anchor sweep now.
|
// If there are anchors, we also expect Carol's anchor sweep now.
|
||||||
txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout)
|
txes, err := getNTxsFromMempool(
|
||||||
if err != nil {
|
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||||
t.Fatalf("unable to find bob's htlc timeout tx: %v", err)
|
)
|
||||||
}
|
require.NoError(t.t, err)
|
||||||
|
|
||||||
// Lookup the timeout transaction that is expected to spend from the
|
// Lookup the timeout transaction that is expected to spend from the
|
||||||
// closing tx. We distinguish it from a possibly anchor sweep by value.
|
// closing tx. We distinguish it from a possibly anchor sweep by value.
|
||||||
var htlcTimeout *chainhash.Hash
|
var htlcTimeout *chainhash.Hash
|
||||||
for _, tx := range txes {
|
for _, tx := range txes {
|
||||||
prevOp := tx.TxIn[0].PreviousOutPoint
|
prevOp := tx.TxIn[0].PreviousOutPoint
|
||||||
if prevOp.Hash != closeTxid {
|
require.Equal(t.t, closeTxid, prevOp.Hash)
|
||||||
t.Fatalf("tx not spending from closing tx")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assume that the timeout tx doesn't spend an output of exactly
|
// Assume that the timeout tx doesn't spend an output of exactly
|
||||||
// the size of the anchor.
|
// the size of the anchor.
|
||||||
@ -178,9 +157,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
htlcTimeout = &hash
|
htlcTimeout = &hash
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if htlcTimeout == nil {
|
require.NotNil(t.t, htlcTimeout)
|
||||||
t.Fatalf("htlc timeout tx not found in mempool")
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll mine the remaining blocks in order to generate the sweep
|
// We'll mine the remaining blocks in order to generate the sweep
|
||||||
// transaction of Bob's commitment output.
|
// transaction of Bob's commitment output.
|
||||||
@ -188,9 +165,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
|
|
||||||
// Check that the sweep spends from the mined commitment.
|
// Check that the sweep spends from the mined commitment.
|
||||||
txes, err = getNTxsFromMempool(net.Miner.Node, 1, minerMempoolTimeout)
|
txes, err = getNTxsFromMempool(net.Miner.Node, 1, minerMempoolTimeout)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("sweep not found: %v", err)
|
|
||||||
}
|
|
||||||
assertAllTxesSpendFrom(t, txes, closeTxid)
|
assertAllTxesSpendFrom(t, txes, closeTxid)
|
||||||
|
|
||||||
// Bob's pending channel report should show that he has a commitment
|
// Bob's pending channel report should show that he has a commitment
|
||||||
@ -199,21 +174,12 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
|
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
|
||||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest)
|
pendingChanResp, err := bob.PendingChannels(ctxt, pendingChansRequest)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to query for pending channels: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
|
require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels))
|
||||||
t.Fatalf("bob should have pending for close chan but doesn't")
|
|
||||||
}
|
|
||||||
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
|
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
|
||||||
if forceCloseChan.LimboBalance == 0 {
|
require.NotZero(t.t, forceCloseChan.LimboBalance)
|
||||||
t.Fatalf("bob should have nonzero limbo balance instead "+
|
require.NotZero(t.t, len(forceCloseChan.PendingHtlcs))
|
||||||
"has: %v", forceCloseChan.LimboBalance)
|
|
||||||
}
|
|
||||||
if len(forceCloseChan.PendingHtlcs) == 0 {
|
|
||||||
t.Fatalf("bob should have pending htlc but doesn't")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now we'll mine an additional block, which should confirm Bob's commit
|
// Now we'll mine an additional block, which should confirm Bob's commit
|
||||||
// sweep. This block should also prompt Bob to broadcast their second
|
// sweep. This block should also prompt Bob to broadcast their second
|
||||||
@ -230,60 +196,33 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
// Therefore, at this point, there should be no active HTLC's on the
|
// Therefore, at this point, there should be no active HTLC's on the
|
||||||
// commitment transaction from Alice -> Bob.
|
// commitment transaction from Alice -> Bob.
|
||||||
nodes = []*lntest.HarnessNode{alice}
|
nodes = []*lntest.HarnessNode{alice}
|
||||||
err = wait.Predicate(func() bool {
|
err = wait.NoError(func() error {
|
||||||
predErr = assertNumActiveHtlcs(nodes, 0)
|
return assertNumActiveHtlcs(nodes, 0)
|
||||||
return predErr == nil
|
}, defaultTimeout)
|
||||||
}, time.Second*15)
|
require.NoError(t.t, err)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, Bob should show that the pending HTLC has advanced to
|
// At this point, Bob should show that the pending HTLC has advanced to
|
||||||
// the second stage and is to be swept.
|
// the second stage and is to be swept.
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest)
|
pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to query for pending channels: %v", err)
|
|
||||||
}
|
|
||||||
forceCloseChan = pendingChanResp.PendingForceClosingChannels[0]
|
forceCloseChan = pendingChanResp.PendingForceClosingChannels[0]
|
||||||
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
|
require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage)
|
||||||
t.Fatalf("bob's htlc should have advanced to the second stage: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next, we'll mine a final block that should confirm the second-layer
|
// Next, we'll mine a final block that should confirm the second-layer
|
||||||
// sweeping transaction.
|
// sweeping transaction.
|
||||||
if _, err := net.Miner.Node.Generate(1); err != nil {
|
_, err = net.Miner.Node.Generate(1)
|
||||||
t.Fatalf("unable to generate blocks: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// Once this transaction has been confirmed, Bob should detect that he
|
// Once this transaction has been confirmed, Bob should detect that he
|
||||||
// no longer has any pending channels.
|
// no longer has any pending channels.
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err = bob.PendingChannels(ctxt, pendingChansRequest)
|
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
predErr = fmt.Errorf("unable to query for pending "+
|
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
|
|
||||||
predErr = fmt.Errorf("bob still has pending "+
|
|
||||||
"channels but shouldn't: %v",
|
|
||||||
spew.Sdump(pendingChanResp))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(predErr.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Coop close channel, expect no anchors.
|
// Coop close channel, expect no anchors.
|
||||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||||
closeChannelAndAssertType(
|
closeChannelAndAssertType(
|
||||||
ctxt, t, net, alice, aliceChanPoint, false,
|
ctxt, t, net, alice, aliceChanPoint, false, false,
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -2,11 +2,9 @@ package itest
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/lightningnetwork/lnd"
|
"github.com/lightningnetwork/lnd"
|
||||||
"github.com/lightningnetwork/lnd/lncfg"
|
"github.com/lightningnetwork/lnd/lncfg"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
@ -15,6 +13,7 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntest"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
"github.com/lightningnetwork/lnd/lntypes"
|
"github.com/lightningnetwork/lnd/lntypes"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the
|
// testMultiHopReceiverChainClaim tests that in the multi-hop setting, if the
|
||||||
@ -53,9 +52,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
|
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to add invoice: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now that we've created the invoice, we'll send a single payment from
|
// Now that we've created the invoice, we'll send a single payment from
|
||||||
// Alice to Carol. We won't wait for the response however, as Carol
|
// Alice to Carol. We won't wait for the response however, as Carol
|
||||||
@ -64,32 +61,21 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
_, err = alice.RouterClient.SendPaymentV2(
|
_, err = alice.RouterClient.SendPaymentV2(
|
||||||
ctx,
|
ctx, &routerrpc.SendPaymentRequest{
|
||||||
&routerrpc.SendPaymentRequest{
|
|
||||||
PaymentRequest: carolInvoice.PaymentRequest,
|
PaymentRequest: carolInvoice.PaymentRequest,
|
||||||
TimeoutSeconds: 60,
|
TimeoutSeconds: 60,
|
||||||
FeeLimitMsat: noFeeLimitMsat,
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to send payment: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, all 3 nodes should now have an active channel with
|
// At this point, all 3 nodes should now have an active channel with
|
||||||
// the created HTLC pending on all of them.
|
// the created HTLC pending on all of them.
|
||||||
var predErr error
|
|
||||||
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
||||||
err = wait.Predicate(func() bool {
|
err = wait.NoError(func() error {
|
||||||
predErr = assertActiveHtlcs(nodes, payHash[:])
|
return assertActiveHtlcs(nodes, payHash[:])
|
||||||
if predErr != nil {
|
}, defaultTimeout)
|
||||||
return false
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("htlc mismatch: %v", predErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for carol to mark invoice as accepted. There is a small gap to
|
// Wait for carol to mark invoice as accepted. There is a small gap to
|
||||||
// bridge between adding the htlc to the channel and executing the exit
|
// bridge between adding the htlc to the channel and executing the exit
|
||||||
@ -97,9 +83,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
waitForInvoiceAccepted(t, carol, payHash)
|
waitForInvoiceAccepted(t, carol, payHash)
|
||||||
|
|
||||||
restartBob, err := net.SuspendNode(bob)
|
restartBob, err := net.SuspendNode(bob)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to suspend bob: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Settle invoice. This will just mark the invoice as settled, as there
|
// Settle invoice. This will just mark the invoice as settled, as there
|
||||||
// is no link anymore to remove the htlc from the commitment tx. For
|
// is no link anymore to remove the htlc from the commitment tx. For
|
||||||
@ -111,9 +95,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
|
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
|
||||||
Preimage: preimage[:],
|
Preimage: preimage[:],
|
||||||
})
|
})
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("settle invoice: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Increase the fee estimate so that the following force close tx will
|
// Increase the fee estimate so that the following force close tx will
|
||||||
// be cpfp'ed.
|
// be cpfp'ed.
|
||||||
@ -125,9 +107,8 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
numBlocks := padCLTV(uint32(
|
numBlocks := padCLTV(uint32(
|
||||||
invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta,
|
invoiceReq.CltvExpiry - lncfg.DefaultIncomingBroadcastDelta,
|
||||||
))
|
))
|
||||||
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
|
_, err = net.Miner.Node.Generate(numBlocks)
|
||||||
t.Fatalf("unable to generate blocks")
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, Carol should broadcast her active commitment
|
// At this point, Carol should broadcast her active commitment
|
||||||
// transaction in order to go to the chain and sweep her HTLC. If there
|
// transaction in order to go to the chain and sweep her HTLC. If there
|
||||||
@ -139,14 +120,10 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
_, err = getNTxsFromMempool(
|
_, err = getNTxsFromMempool(
|
||||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("expected transaction not found in mempool: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
|
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to get txid: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
carolFundingPoint := wire.OutPoint{
|
carolFundingPoint := wire.OutPoint{
|
||||||
Hash: *bobFundingTxid,
|
Hash: *bobFundingTxid,
|
||||||
@ -164,9 +141,8 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
mineBlocks(t, net, 1, expectedTxes)
|
mineBlocks(t, net, 1, expectedTxes)
|
||||||
|
|
||||||
// Restart bob again.
|
// Restart bob again.
|
||||||
if err := restartBob(); err != nil {
|
err = restartBob()
|
||||||
t.Fatalf("unable to restart bob: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// After the force close transaction is mined, Carol should broadcast
|
// After the force close transaction is mined, Carol should broadcast
|
||||||
// her second level HTLC transaction. Bob will broadcast a sweep tx to
|
// her second level HTLC transaction. Bob will broadcast a sweep tx to
|
||||||
@ -178,20 +154,18 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
if c == commitTypeAnchors {
|
if c == commitTypeAnchors {
|
||||||
expectedTxes = 3
|
expectedTxes = 3
|
||||||
}
|
}
|
||||||
txes, err := getNTxsFromMempool(net.Miner.Node,
|
txes, err := getNTxsFromMempool(
|
||||||
expectedTxes, minerMempoolTimeout)
|
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||||
if err != nil {
|
)
|
||||||
t.Fatalf("transactions not found in mempool: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// All transactions should be spending from the commitment transaction.
|
// All transactions should be spending from the commitment transaction.
|
||||||
assertAllTxesSpendFrom(t, txes, closingTxid)
|
assertAllTxesSpendFrom(t, txes, closingTxid)
|
||||||
|
|
||||||
// We'll now mine an additional block which should confirm both the
|
// We'll now mine an additional block which should confirm both the
|
||||||
// second layer transactions.
|
// second layer transactions.
|
||||||
if _, err := net.Miner.Node.Generate(1); err != nil {
|
_, err = net.Miner.Node.Generate(1)
|
||||||
t.Fatalf("unable to generate block: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(time.Second * 4)
|
time.Sleep(time.Second * 4)
|
||||||
|
|
||||||
@ -203,98 +177,52 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
|
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := carol.PendingChannels(ctxt, pendingChansRequest)
|
pendingChanResp, err := carol.PendingChannels(ctxt, pendingChansRequest)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to query for pending channels: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
|
require.NotZero(t.t, len(pendingChanResp.PendingForceClosingChannels))
|
||||||
t.Fatalf("carol should have pending for close chan but doesn't")
|
|
||||||
}
|
|
||||||
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
|
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
|
||||||
if forceCloseChan.LimboBalance == 0 {
|
require.NotZero(t.t, forceCloseChan.LimboBalance)
|
||||||
t.Fatalf("carol should have nonzero limbo balance instead "+
|
|
||||||
"has: %v", forceCloseChan.LimboBalance)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The pending HTLC carol has should also now be in stage 2.
|
// The pending HTLC carol has should also now be in stage 2.
|
||||||
if len(forceCloseChan.PendingHtlcs) != 1 {
|
require.Len(t.t, forceCloseChan.PendingHtlcs, 1)
|
||||||
t.Fatalf("carol should have pending htlc but doesn't")
|
require.Equal(t.t, uint32(2), forceCloseChan.PendingHtlcs[0].Stage)
|
||||||
}
|
|
||||||
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
|
|
||||||
t.Fatalf("carol's htlc should have advanced to the second "+
|
|
||||||
"stage: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Once the second-level transaction confirmed, Bob should have
|
// Once the second-level transaction confirmed, Bob should have
|
||||||
// extracted the preimage from the chain, and sent it back to Alice,
|
// extracted the preimage from the chain, and sent it back to Alice,
|
||||||
// clearing the HTLC off-chain.
|
// clearing the HTLC off-chain.
|
||||||
nodes = []*lntest.HarnessNode{alice}
|
nodes = []*lntest.HarnessNode{alice}
|
||||||
err = wait.Predicate(func() bool {
|
err = wait.NoError(func() error {
|
||||||
predErr = assertNumActiveHtlcs(nodes, 0)
|
return assertNumActiveHtlcs(nodes, 0)
|
||||||
if predErr != nil {
|
}, defaultTimeout)
|
||||||
return false
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("htlc mismatch: %v", predErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we mine 4 additional blocks, then both outputs should now be
|
// If we mine 4 additional blocks, then both outputs should now be
|
||||||
// mature.
|
// mature.
|
||||||
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
|
_, err = net.Miner.Node.Generate(defaultCSV)
|
||||||
t.Fatalf("unable to generate blocks: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// We should have a new transaction in the mempool.
|
// We should have a new transaction in the mempool.
|
||||||
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to find bob's sweeping transaction: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, if we mine an additional block to confirm these two sweep
|
// Finally, if we mine an additional block to confirm these two sweep
|
||||||
// transactions, Carol should not show a pending channel in her report
|
// transactions, Carol should not show a pending channel in her report
|
||||||
// afterwards.
|
// afterwards.
|
||||||
if _, err := net.Miner.Node.Generate(1); err != nil {
|
_, err = net.Miner.Node.Generate(1)
|
||||||
t.Fatalf("unable to mine block: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err = carol.PendingChannels(ctxt, pendingChansRequest)
|
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
predErr = fmt.Errorf("unable to query for pending channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
|
|
||||||
predErr = fmt.Errorf("carol still has pending channels: %v",
|
|
||||||
spew.Sdump(pendingChanResp))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(predErr.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// The invoice should show as settled for Carol, indicating that it was
|
// The invoice should show as settled for Carol, indicating that it was
|
||||||
// swept on-chain.
|
// swept on-chain.
|
||||||
invoicesReq := &lnrpc.ListInvoiceRequest{}
|
invoicesReq := &lnrpc.ListInvoiceRequest{}
|
||||||
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
|
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to retrieve invoices: %v", err)
|
require.Len(t.t, invoicesResp.Invoices, 1)
|
||||||
}
|
|
||||||
if len(invoicesResp.Invoices) != 1 {
|
|
||||||
t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices))
|
|
||||||
}
|
|
||||||
invoice := invoicesResp.Invoices[0]
|
invoice := invoicesResp.Invoices[0]
|
||||||
if invoice.State != lnrpc.Invoice_SETTLED {
|
require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State)
|
||||||
t.Fatalf("expected invoice to be settled on chain")
|
require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat)
|
||||||
}
|
|
||||||
if invoice.AmtPaidSat != invoiceAmt {
|
|
||||||
t.Fatalf("expected invoice to be settled with %d sat, got "+
|
|
||||||
"%d sat", invoiceAmt, invoice.AmtPaidSat)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, check that the Alice's payment is correctly marked
|
// Finally, check that the Alice's payment is correctly marked
|
||||||
// succeeded.
|
// succeeded.
|
||||||
@ -302,9 +230,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
err = checkPaymentStatus(
|
err = checkPaymentStatus(
|
||||||
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
|
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll close out the channel between Alice and Bob, then shutdown
|
// We'll close out the channel between Alice and Bob, then shutdown
|
||||||
// carol to conclude the test.
|
// carol to conclude the test.
|
||||||
|
@ -2,11 +2,8 @@ package itest
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/lightningnetwork/lnd"
|
"github.com/lightningnetwork/lnd"
|
||||||
"github.com/lightningnetwork/lnd/lncfg"
|
"github.com/lightningnetwork/lnd/lncfg"
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
"github.com/lightningnetwork/lnd/lnrpc"
|
||||||
@ -15,6 +12,7 @@ import (
|
|||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntest"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
"github.com/lightningnetwork/lnd/lntypes"
|
"github.com/lightningnetwork/lnd/lntypes"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario,
|
// testMultiHopHtlcRemoteChainClaim tests that in the multi-hop HTLC scenario,
|
||||||
@ -51,9 +49,7 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
|||||||
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
|
carolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to add invoice: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now that we've created the invoice, we'll send a single payment from
|
// Now that we've created the invoice, we'll send a single payment from
|
||||||
// Alice to Carol. We won't wait for the response however, as Carol
|
// Alice to Carol. We won't wait for the response however, as Carol
|
||||||
@ -62,32 +58,21 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
_, err = alice.RouterClient.SendPaymentV2(
|
_, err = alice.RouterClient.SendPaymentV2(
|
||||||
ctx,
|
ctx, &routerrpc.SendPaymentRequest{
|
||||||
&routerrpc.SendPaymentRequest{
|
|
||||||
PaymentRequest: carolInvoice.PaymentRequest,
|
PaymentRequest: carolInvoice.PaymentRequest,
|
||||||
TimeoutSeconds: 60,
|
TimeoutSeconds: 60,
|
||||||
FeeLimitMsat: noFeeLimitMsat,
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to send payment: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, all 3 nodes should now have an active channel with
|
// At this point, all 3 nodes should now have an active channel with
|
||||||
// the created HTLC pending on all of them.
|
// the created HTLC pending on all of them.
|
||||||
var predErr error
|
|
||||||
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
||||||
err = wait.Predicate(func() bool {
|
err = wait.NoError(func() error {
|
||||||
predErr = assertActiveHtlcs(nodes, payHash[:])
|
return assertActiveHtlcs(nodes, payHash[:])
|
||||||
if predErr != nil {
|
}, defaultTimeout)
|
||||||
return false
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("htlc mismatch: %v", predErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait for carol to mark invoice as accepted. There is a small gap to
|
// Wait for carol to mark invoice as accepted. There is a small gap to
|
||||||
// bridge between adding the htlc to the channel and executing the exit
|
// bridge between adding the htlc to the channel and executing the exit
|
||||||
@ -102,22 +87,20 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
|||||||
// immediately force close the channel by broadcast her commitment
|
// immediately force close the channel by broadcast her commitment
|
||||||
// transaction.
|
// transaction.
|
||||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||||
aliceForceClose := closeChannelAndAssertType(ctxt, t, net, alice,
|
aliceForceClose := closeChannelAndAssertType(
|
||||||
aliceChanPoint, c == commitTypeAnchors, true)
|
ctxt, t, net, alice, aliceChanPoint, c == commitTypeAnchors,
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
|
||||||
// Wait for the channel to be marked pending force close.
|
// Wait for the channel to be marked pending force close.
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
err = waitForChannelPendingForceClose(ctxt, alice, aliceChanPoint)
|
err = waitForChannelPendingForceClose(ctxt, alice, aliceChanPoint)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("channel not pending force close: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mine enough blocks for Alice to sweep her funds from the force
|
// Mine enough blocks for Alice to sweep her funds from the force
|
||||||
// closed channel.
|
// closed channel.
|
||||||
_, err = net.Miner.Node.Generate(defaultCSV)
|
_, err = net.Miner.Node.Generate(defaultCSV)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to generate blocks: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Alice should now sweep her funds. If there are anchors, Alice should
|
// Alice should now sweep her funds. If there are anchors, Alice should
|
||||||
// also sweep hers.
|
// also sweep hers.
|
||||||
@ -125,16 +108,14 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
|||||||
if c == commitTypeAnchors {
|
if c == commitTypeAnchors {
|
||||||
expectedTxes = 2
|
expectedTxes = 2
|
||||||
}
|
}
|
||||||
_, err = waitForNTxsInMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout)
|
_, err = waitForNTxsInMempool(
|
||||||
if err != nil {
|
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||||
t.Fatalf("unable to find sweeping tx in mempool: %v", err)
|
)
|
||||||
}
|
require.NoError(t.t, err)
|
||||||
|
|
||||||
// Suspend bob, so Carol is forced to go on chain.
|
// Suspend bob, so Carol is forced to go on chain.
|
||||||
restartBob, err := net.SuspendNode(bob)
|
restartBob, err := net.SuspendNode(bob)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to suspend bob: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Settle invoice. This will just mark the invoice as settled, as there
|
// Settle invoice. This will just mark the invoice as settled, as there
|
||||||
// is no link anymore to remove the htlc from the commitment tx. For
|
// is no link anymore to remove the htlc from the commitment tx. For
|
||||||
@ -146,31 +127,25 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
|||||||
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
|
_, err = carol.SettleInvoice(ctx, &invoicesrpc.SettleInvoiceMsg{
|
||||||
Preimage: preimage[:],
|
Preimage: preimage[:],
|
||||||
})
|
})
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("settle invoice: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll now mine enough blocks so Carol decides that she needs to go
|
// We'll now mine enough blocks so Carol decides that she needs to go
|
||||||
// on-chain to claim the HTLC as Bob has been inactive.
|
// on-chain to claim the HTLC as Bob has been inactive.
|
||||||
numBlocks := padCLTV(uint32(invoiceReq.CltvExpiry-
|
numBlocks := padCLTV(uint32(
|
||||||
lncfg.DefaultIncomingBroadcastDelta) - defaultCSV)
|
invoiceReq.CltvExpiry-lncfg.DefaultIncomingBroadcastDelta,
|
||||||
|
) - defaultCSV)
|
||||||
|
|
||||||
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
|
_, err = net.Miner.Node.Generate(numBlocks)
|
||||||
t.Fatalf("unable to generate blocks")
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// Carol's commitment transaction should now be in the mempool. If there
|
// Carol's commitment transaction should now be in the mempool. If there
|
||||||
// are anchors, Carol also sweeps her anchor.
|
// are anchors, Carol also sweeps her anchor.
|
||||||
_, err = waitForNTxsInMempool(
|
_, err = waitForNTxsInMempool(
|
||||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to find carol's txes: %v", err)
|
|
||||||
}
|
|
||||||
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
|
bobFundingTxid, err := lnd.GetChanPointFundingTxid(bobChanPoint)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to get txid: %v", err)
|
|
||||||
}
|
|
||||||
carolFundingPoint := wire.OutPoint{
|
carolFundingPoint := wire.OutPoint{
|
||||||
Hash: *bobFundingTxid,
|
Hash: *bobFundingTxid,
|
||||||
Index: bobChanPoint.OutputIndex,
|
Index: bobChanPoint.OutputIndex,
|
||||||
@ -186,16 +161,12 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
|||||||
// Mine a block, which should contain: the commitment, possibly an
|
// Mine a block, which should contain: the commitment, possibly an
|
||||||
// anchor sweep and the coinbase tx.
|
// anchor sweep and the coinbase tx.
|
||||||
block := mineBlocks(t, net, 1, expectedTxes)[0]
|
block := mineBlocks(t, net, 1, expectedTxes)[0]
|
||||||
if len(block.Transactions) != expectedTxes+1 {
|
require.Len(t.t, block.Transactions, expectedTxes+1)
|
||||||
t.Fatalf("expected %v transactions in block, got %v",
|
|
||||||
expectedTxes, len(block.Transactions))
|
|
||||||
}
|
|
||||||
assertTxInBlock(t, block, &closingTxid)
|
assertTxInBlock(t, block, &closingTxid)
|
||||||
|
|
||||||
// Restart bob again.
|
// Restart bob again.
|
||||||
if err := restartBob(); err != nil {
|
err = restartBob()
|
||||||
t.Fatalf("unable to restart bob: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// After the force close transacion is mined, Carol should broadcast her
|
// After the force close transacion is mined, Carol should broadcast her
|
||||||
// second level HTLC transacion. Bob will broadcast a sweep tx to sweep
|
// second level HTLC transacion. Bob will broadcast a sweep tx to sweep
|
||||||
@ -206,21 +177,17 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
|||||||
if c == commitTypeAnchors {
|
if c == commitTypeAnchors {
|
||||||
expectedTxes = 3
|
expectedTxes = 3
|
||||||
}
|
}
|
||||||
txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes,
|
txes, err := getNTxsFromMempool(
|
||||||
minerMempoolTimeout)
|
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||||
if err != nil {
|
)
|
||||||
t.Fatalf("transactions not found in mempool: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// All transactions should be pending from the commitment transaction.
|
// All transactions should be pending from the commitment transaction.
|
||||||
assertAllTxesSpendFrom(t, txes, closingTxid)
|
assertAllTxesSpendFrom(t, txes, closingTxid)
|
||||||
|
|
||||||
// Mine a block to confirm the two transactions (+ coinbase).
|
// Mine a block to confirm the two transactions (+ coinbase).
|
||||||
block = mineBlocks(t, net, 1, expectedTxes)[0]
|
block = mineBlocks(t, net, 1, expectedTxes)[0]
|
||||||
if len(block.Transactions) != expectedTxes+1 {
|
require.Len(t.t, block.Transactions, expectedTxes+1)
|
||||||
t.Fatalf("expected 3 transactions in block, got %v",
|
|
||||||
len(block.Transactions))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep track of the second level tx maturity.
|
// Keep track of the second level tx maturity.
|
||||||
carolSecondLevelCSV := uint32(defaultCSV)
|
carolSecondLevelCSV := uint32(defaultCSV)
|
||||||
@ -228,114 +195,60 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
|||||||
// When Bob notices Carol's second level transaction in the block, he
|
// When Bob notices Carol's second level transaction in the block, he
|
||||||
// will extract the preimage and broadcast a sweep tx to directly claim
|
// will extract the preimage and broadcast a sweep tx to directly claim
|
||||||
// the HTLC in his (already closed) channel with Alice.
|
// the HTLC in his (already closed) channel with Alice.
|
||||||
bobHtlcSweep, err := waitForTxInMempool(net.Miner.Node,
|
bobHtlcSweep, err := waitForTxInMempool(
|
||||||
minerMempoolTimeout)
|
net.Miner.Node, minerMempoolTimeout,
|
||||||
if err != nil {
|
)
|
||||||
t.Fatalf("transactions not found in mempool: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// It should spend from the commitment in the channel with Alice.
|
// It should spend from the commitment in the channel with Alice.
|
||||||
tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep)
|
tx, err := net.Miner.Node.GetRawTransaction(bobHtlcSweep)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to get txn: %v", err)
|
require.Equal(
|
||||||
}
|
t.t, *aliceForceClose, tx.MsgTx().TxIn[0].PreviousOutPoint.Hash,
|
||||||
if tx.MsgTx().TxIn[0].PreviousOutPoint.Hash != *aliceForceClose {
|
)
|
||||||
t.Fatalf("tx did not spend from alice's force close tx")
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll now mine a block which should confirm Bob's HTLC sweep
|
// We'll now mine a block which should confirm Bob's HTLC sweep
|
||||||
// transaction.
|
// transaction.
|
||||||
block = mineBlocks(t, net, 1, 1)[0]
|
block = mineBlocks(t, net, 1, 1)[0]
|
||||||
if len(block.Transactions) != 2 {
|
require.Len(t.t, block.Transactions, 2)
|
||||||
t.Fatalf("expected 2 transactions in block, got %v",
|
|
||||||
len(block.Transactions))
|
|
||||||
}
|
|
||||||
assertTxInBlock(t, block, bobHtlcSweep)
|
assertTxInBlock(t, block, bobHtlcSweep)
|
||||||
carolSecondLevelCSV--
|
carolSecondLevelCSV--
|
||||||
|
|
||||||
// Now that the sweeping transaction has been confirmed, Bob should now
|
// Now that the sweeping transaction has been confirmed, Bob should now
|
||||||
// recognize that all contracts have been fully resolved, and show no
|
// recognize that all contracts have been fully resolved, and show no
|
||||||
// pending close channels.
|
// pending close channels.
|
||||||
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
|
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := bob.PendingChannels(
|
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
|
||||||
ctxt, pendingChansRequest,
|
require.NoError(t.t, err)
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
predErr = fmt.Errorf("unable to query for pending "+
|
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
|
|
||||||
predErr = fmt.Errorf("bob still has pending channels "+
|
|
||||||
"but shouldn't: %v", spew.Sdump(pendingChanResp))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(predErr.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we then mine 3 additional blocks, Carol's second level tx will
|
// If we then mine 3 additional blocks, Carol's second level tx will
|
||||||
// mature, and she should pull the funds.
|
// mature, and she should pull the funds.
|
||||||
if _, err := net.Miner.Node.Generate(carolSecondLevelCSV); err != nil {
|
_, err = net.Miner.Node.Generate(carolSecondLevelCSV)
|
||||||
t.Fatalf("unable to generate block: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
carolSweep, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
carolSweep, err := waitForTxInMempool(
|
||||||
if err != nil {
|
net.Miner.Node, minerMempoolTimeout,
|
||||||
t.Fatalf("unable to find Carol's sweeping transaction: %v", err)
|
)
|
||||||
}
|
require.NoError(t.t, err)
|
||||||
|
|
||||||
// When Carol's sweep gets confirmed, she should have no more pending
|
// When Carol's sweep gets confirmed, she should have no more pending
|
||||||
// channels.
|
// channels.
|
||||||
block = mineBlocks(t, net, 1, 1)[0]
|
block = mineBlocks(t, net, 1, 1)[0]
|
||||||
assertTxInBlock(t, block, carolSweep)
|
assertTxInBlock(t, block, carolSweep)
|
||||||
|
|
||||||
pendingChansRequest = &lnrpc.PendingChannelsRequest{}
|
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := carol.PendingChannels(
|
err = waitForNumChannelPendingForceClose(ctxt, carol, 0, nil)
|
||||||
ctxt, pendingChansRequest,
|
require.NoError(t.t, err)
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
predErr = fmt.Errorf("unable to query for pending "+
|
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
|
|
||||||
predErr = fmt.Errorf("carol still has pending channels "+
|
|
||||||
"but shouldn't: %v", spew.Sdump(pendingChanResp))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(predErr.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// The invoice should show as settled for Carol, indicating that it was
|
// The invoice should show as settled for Carol, indicating that it was
|
||||||
// swept on-chain.
|
// swept on-chain.
|
||||||
invoicesReq := &lnrpc.ListInvoiceRequest{}
|
invoicesReq := &lnrpc.ListInvoiceRequest{}
|
||||||
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
|
invoicesResp, err := carol.ListInvoices(ctxb, invoicesReq)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to retrieve invoices: %v", err)
|
require.Len(t.t, invoicesResp.Invoices, 1)
|
||||||
}
|
|
||||||
if len(invoicesResp.Invoices) != 1 {
|
|
||||||
t.Fatalf("expected 1 invoice, got %d", len(invoicesResp.Invoices))
|
|
||||||
}
|
|
||||||
invoice := invoicesResp.Invoices[0]
|
invoice := invoicesResp.Invoices[0]
|
||||||
if invoice.State != lnrpc.Invoice_SETTLED {
|
require.Equal(t.t, lnrpc.Invoice_SETTLED, invoice.State)
|
||||||
t.Fatalf("expected invoice to be settled on chain")
|
require.Equal(t.t, int64(invoiceAmt), invoice.AmtPaidSat)
|
||||||
}
|
|
||||||
if invoice.AmtPaidSat != invoiceAmt {
|
|
||||||
t.Fatalf("expected invoice to be settled with %d sat, got "+
|
|
||||||
"%d sat", invoiceAmt, invoice.AmtPaidSat)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, check that the Alice's payment is correctly marked
|
// Finally, check that the Alice's payment is correctly marked
|
||||||
// succeeded.
|
// succeeded.
|
||||||
@ -343,7 +256,5 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest
|
|||||||
err = checkPaymentStatus(
|
err = checkPaymentStatus(
|
||||||
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
|
ctxt, alice, preimage, lnrpc.Payment_SUCCEEDED,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf(err.Error())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -3,14 +3,12 @@ package itest
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntest"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC
|
// testMultiHopLocalForceCloseOnChainHtlcTimeout tests that in a multi-hop HTLC
|
||||||
@ -47,8 +45,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||||||
carolPubKey := carol.PubKey[:]
|
carolPubKey := carol.PubKey[:]
|
||||||
payHash := makeFakePayHash(t)
|
payHash := makeFakePayHash(t)
|
||||||
_, err := alice.RouterClient.SendPaymentV2(
|
_, err := alice.RouterClient.SendPaymentV2(
|
||||||
ctx,
|
ctx, &routerrpc.SendPaymentRequest{
|
||||||
&routerrpc.SendPaymentRequest{
|
|
||||||
Dest: carolPubKey,
|
Dest: carolPubKey,
|
||||||
Amt: int64(htlcAmt),
|
Amt: int64(htlcAmt),
|
||||||
PaymentHash: payHash,
|
PaymentHash: payHash,
|
||||||
@ -57,21 +54,15 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||||||
FeeLimitMsat: noFeeLimitMsat,
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to send alice htlc: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Once the HTLC has cleared, all channels in our mini network should
|
// Once the HTLC has cleared, all channels in our mini network should
|
||||||
// have the it locked in.
|
// have the it locked in.
|
||||||
var predErr error
|
|
||||||
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
||||||
err = wait.Predicate(func() bool {
|
err = wait.NoError(func() error {
|
||||||
predErr = assertActiveHtlcs(nodes, payHash)
|
return assertActiveHtlcs(nodes, payHash)
|
||||||
return predErr == nil
|
}, defaultTimeout)
|
||||||
}, time.Second*15)
|
require.NoError(t.t, err)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("htlc mismatch: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Increase the fee estimate so that the following force close tx will
|
// Increase the fee estimate so that the following force close tx will
|
||||||
// be cpfp'ed.
|
// be cpfp'ed.
|
||||||
@ -87,105 +78,64 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||||||
|
|
||||||
// At this point, Bob should have a pending force close channel as he
|
// At this point, Bob should have a pending force close channel as he
|
||||||
// just went to chain.
|
// just went to chain.
|
||||||
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
|
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := bob.PendingChannels(ctxt,
|
err = waitForNumChannelPendingForceClose(
|
||||||
pendingChansRequest)
|
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||||
if err != nil {
|
if c.LimboBalance == 0 {
|
||||||
predErr = fmt.Errorf("unable to query for pending "+
|
return fmt.Errorf("bob should have nonzero "+
|
||||||
"channels: %v", err)
|
"limbo balance instead has: %v",
|
||||||
return false
|
c.LimboBalance)
|
||||||
}
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
|
|
||||||
predErr = fmt.Errorf("bob should have pending for " +
|
|
||||||
"close chan but doesn't")
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
|
return nil
|
||||||
if forceCloseChan.LimboBalance == 0 {
|
},
|
||||||
predErr = fmt.Errorf("bob should have nonzero limbo "+
|
)
|
||||||
"balance instead has: %v",
|
require.NoError(t.t, err)
|
||||||
forceCloseChan.LimboBalance)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(predErr.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll mine defaultCSV blocks in order to generate the sweep
|
// We'll mine defaultCSV blocks in order to generate the sweep
|
||||||
// transaction of Bob's funding output. If there are anchors, mine
|
// transaction of Bob's funding output. If there are anchors, mine
|
||||||
// Carol's anchor sweep too.
|
// Carol's anchor sweep too.
|
||||||
if c == commitTypeAnchors {
|
if c == commitTypeAnchors {
|
||||||
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to find carol's anchor sweep tx: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
|
_, err = net.Miner.Node.Generate(defaultCSV)
|
||||||
t.Fatalf("unable to generate blocks: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
_, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to find bob's funding output sweep tx: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll now mine enough blocks for the HTLC to expire. After this, Bob
|
// We'll now mine enough blocks for the HTLC to expire. After this, Bob
|
||||||
// should hand off the now expired HTLC output to the utxo nursery.
|
// should hand off the now expired HTLC output to the utxo nursery.
|
||||||
numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV - 1))
|
numBlocks := padCLTV(uint32(finalCltvDelta - defaultCSV - 1))
|
||||||
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
|
_, err = net.Miner.Node.Generate(numBlocks)
|
||||||
t.Fatalf("unable to generate blocks: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// Bob's pending channel report should show that he has a single HTLC
|
// Bob's pending channel report should show that he has a single HTLC
|
||||||
// that's now in stage one.
|
// that's now in stage one.
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := bob.PendingChannels(
|
err = waitForNumChannelPendingForceClose(
|
||||||
ctxt, pendingChansRequest,
|
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||||
)
|
if len(c.PendingHtlcs) != 1 {
|
||||||
if err != nil {
|
return fmt.Errorf("bob should have pending " +
|
||||||
predErr = fmt.Errorf("unable to query for pending "+
|
"htlc but doesn't")
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
|
if c.PendingHtlcs[0].Stage != 1 {
|
||||||
predErr = fmt.Errorf("bob should have pending force " +
|
return fmt.Errorf("bob's htlc should have "+
|
||||||
"close chan but doesn't")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
|
|
||||||
if len(forceCloseChan.PendingHtlcs) != 1 {
|
|
||||||
predErr = fmt.Errorf("bob should have pending htlc " +
|
|
||||||
"but doesn't")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if forceCloseChan.PendingHtlcs[0].Stage != 1 {
|
|
||||||
predErr = fmt.Errorf("bob's htlc should have "+
|
|
||||||
"advanced to the first stage: %v", err)
|
"advanced to the first stage: %v", err)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return nil
|
||||||
}, time.Second*15)
|
},
|
||||||
if err != nil {
|
)
|
||||||
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// We should also now find a transaction in the mempool, as Bob should
|
// We should also now find a transaction in the mempool, as Bob should
|
||||||
// have broadcast his second layer timeout transaction.
|
// have broadcast his second layer timeout transaction.
|
||||||
timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
timeoutTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to find bob's htlc timeout tx: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next, we'll mine an additional block. This should serve to confirm
|
// Next, we'll mine an additional block. This should serve to confirm
|
||||||
// the second layer timeout transaction.
|
// the second layer timeout transaction.
|
||||||
@ -195,62 +145,39 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||||||
// With the second layer timeout transaction confirmed, Bob should have
|
// With the second layer timeout transaction confirmed, Bob should have
|
||||||
// canceled backwards the HTLC that carol sent.
|
// canceled backwards the HTLC that carol sent.
|
||||||
nodes = []*lntest.HarnessNode{alice}
|
nodes = []*lntest.HarnessNode{alice}
|
||||||
err = wait.Predicate(func() bool {
|
err = wait.NoError(func() error {
|
||||||
predErr = assertNumActiveHtlcs(nodes, 0)
|
return assertNumActiveHtlcs(nodes, 0)
|
||||||
return predErr == nil
|
}, defaultTimeout)
|
||||||
}, time.Second*15)
|
require.NoError(t.t, err)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Additionally, Bob should now show that HTLC as being advanced to the
|
// Additionally, Bob should now show that HTLC as being advanced to the
|
||||||
// second stage.
|
// second stage.
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := bob.PendingChannels(
|
err = waitForNumChannelPendingForceClose(
|
||||||
ctxt, pendingChansRequest,
|
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||||
)
|
if len(c.PendingHtlcs) != 1 {
|
||||||
if err != nil {
|
return fmt.Errorf("bob should have pending " +
|
||||||
predErr = fmt.Errorf("unable to query for pending "+
|
"htlc but doesn't")
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
|
if c.PendingHtlcs[0].Stage != 2 {
|
||||||
predErr = fmt.Errorf("bob should have pending for " +
|
return fmt.Errorf("bob's htlc should have "+
|
||||||
"close chan but doesn't")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
|
|
||||||
if len(forceCloseChan.PendingHtlcs) != 1 {
|
|
||||||
predErr = fmt.Errorf("bob should have pending htlc " +
|
|
||||||
"but doesn't")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
|
|
||||||
predErr = fmt.Errorf("bob's htlc should have "+
|
|
||||||
"advanced to the second stage: %v", err)
|
"advanced to the second stage: %v", err)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return nil
|
||||||
}, time.Second*15)
|
},
|
||||||
if err != nil {
|
)
|
||||||
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// We'll now mine 4 additional blocks. This should be enough for Bob's
|
// We'll now mine 4 additional blocks. This should be enough for Bob's
|
||||||
// CSV timelock to expire and the sweeping transaction of the HTLC to be
|
// CSV timelock to expire and the sweeping transaction of the HTLC to be
|
||||||
// broadcast.
|
// broadcast.
|
||||||
if _, err := net.Miner.Node.Generate(defaultCSV); err != nil {
|
_, err = net.Miner.Node.Generate(defaultCSV)
|
||||||
t.Fatalf("unable to mine blocks: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
sweepTx, err := waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to find bob's htlc sweep tx: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll then mine a final block which should confirm this second layer
|
// We'll then mine a final block which should confirm this second layer
|
||||||
// sweep transaction.
|
// sweep transaction.
|
||||||
@ -259,27 +186,9 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||||||
|
|
||||||
// At this point, Bob should no longer show any channels as pending
|
// At this point, Bob should no longer show any channels as pending
|
||||||
// close.
|
// close.
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := bob.PendingChannels(
|
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
|
||||||
ctxt, pendingChansRequest,
|
require.NoError(t.t, err)
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
predErr = fmt.Errorf("unable to query for pending "+
|
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
|
|
||||||
predErr = fmt.Errorf("bob still has pending channels "+
|
|
||||||
"but shouldn't: %v", spew.Sdump(pendingChanResp))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(predErr.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Coop close, no anchors.
|
// Coop close, no anchors.
|
||||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||||
|
@ -3,14 +3,12 @@ package itest
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
"github.com/davecgh/go-spew/spew"
|
|
||||||
"github.com/lightningnetwork/lnd/lnrpc"
|
|
||||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||||
"github.com/lightningnetwork/lnd/lntest"
|
"github.com/lightningnetwork/lnd/lntest"
|
||||||
"github.com/lightningnetwork/lnd/lntest/wait"
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a
|
// testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a
|
||||||
@ -48,8 +46,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||||||
carolPubKey := carol.PubKey[:]
|
carolPubKey := carol.PubKey[:]
|
||||||
payHash := makeFakePayHash(t)
|
payHash := makeFakePayHash(t)
|
||||||
_, err := alice.RouterClient.SendPaymentV2(
|
_, err := alice.RouterClient.SendPaymentV2(
|
||||||
ctx,
|
ctx, &routerrpc.SendPaymentRequest{
|
||||||
&routerrpc.SendPaymentRequest{
|
|
||||||
Dest: carolPubKey,
|
Dest: carolPubKey,
|
||||||
Amt: int64(htlcAmt),
|
Amt: int64(htlcAmt),
|
||||||
PaymentHash: payHash,
|
PaymentHash: payHash,
|
||||||
@ -58,21 +55,15 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||||||
FeeLimitMsat: noFeeLimitMsat,
|
FeeLimitMsat: noFeeLimitMsat,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to send alice htlc: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Once the HTLC has cleared, all the nodes in our mini network should
|
// Once the HTLC has cleared, all the nodes in our mini network should
|
||||||
// show that the HTLC has been locked in.
|
// show that the HTLC has been locked in.
|
||||||
var predErr error
|
|
||||||
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
nodes := []*lntest.HarnessNode{alice, bob, carol}
|
||||||
err = wait.Predicate(func() bool {
|
err = wait.NoError(func() error {
|
||||||
predErr = assertActiveHtlcs(nodes, payHash)
|
return assertActiveHtlcs(nodes, payHash)
|
||||||
return predErr == nil
|
}, defaultTimeout)
|
||||||
}, time.Second*15)
|
require.NoError(t.t, err)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("htlc mismatch: %v", predErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Increase the fee estimate so that the following force close tx will
|
// Increase the fee estimate so that the following force close tx will
|
||||||
// be cpfp'ed.
|
// be cpfp'ed.
|
||||||
@ -90,28 +81,9 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||||||
|
|
||||||
// At this point, Bob should have a pending force close channel as
|
// At this point, Bob should have a pending force close channel as
|
||||||
// Carol has gone directly to chain.
|
// Carol has gone directly to chain.
|
||||||
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
|
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := bob.PendingChannels(
|
err = waitForNumChannelPendingForceClose(ctxt, bob, 1, nil)
|
||||||
ctxt, pendingChansRequest,
|
require.NoError(t.t, err)
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
predErr = fmt.Errorf("unable to query for "+
|
|
||||||
"pending channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
|
|
||||||
predErr = fmt.Errorf("bob should have pending " +
|
|
||||||
"force close channels but doesn't")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(predErr.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bob can sweep his output immediately. If there is an anchor, Bob will
|
// Bob can sweep his output immediately. If there is an anchor, Bob will
|
||||||
// sweep that as well.
|
// sweep that as well.
|
||||||
@ -123,55 +95,39 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||||||
_, err = waitForNTxsInMempool(
|
_, err = waitForNTxsInMempool(
|
||||||
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
net.Miner.Node, expectedTxes, minerMempoolTimeout,
|
||||||
)
|
)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("failed to find txes in miner mempool: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Next, we'll mine enough blocks for the HTLC to expire. At this
|
// Next, we'll mine enough blocks for the HTLC to expire. At this
|
||||||
// point, Bob should hand off the output to his internal utxo nursery,
|
// point, Bob should hand off the output to his internal utxo nursery,
|
||||||
// which will broadcast a sweep transaction.
|
// which will broadcast a sweep transaction.
|
||||||
numBlocks := padCLTV(finalCltvDelta - 1)
|
numBlocks := padCLTV(finalCltvDelta - 1)
|
||||||
if _, err := net.Miner.Node.Generate(numBlocks); err != nil {
|
_, err = net.Miner.Node.Generate(numBlocks)
|
||||||
t.Fatalf("unable to generate blocks: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
|
||||||
// If we check Bob's pending channel report, it should show that he has
|
// If we check Bob's pending channel report, it should show that he has
|
||||||
// a single HTLC that's now in the second stage, as skip the initial
|
// a single HTLC that's now in the second stage, as skip the initial
|
||||||
// first stage since this is a direct HTLC.
|
// first stage since this is a direct HTLC.
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := bob.PendingChannels(
|
err = waitForNumChannelPendingForceClose(
|
||||||
ctxt, pendingChansRequest,
|
ctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {
|
||||||
)
|
if len(c.PendingHtlcs) != 1 {
|
||||||
if err != nil {
|
return fmt.Errorf("bob should have pending " +
|
||||||
predErr = fmt.Errorf("unable to query for pending "+
|
"htlc but doesn't")
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) == 0 {
|
if c.PendingHtlcs[0].Stage != 2 {
|
||||||
predErr = fmt.Errorf("bob should have pending for " +
|
return fmt.Errorf("bob's htlc should have "+
|
||||||
"close chan but doesn't")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
forceCloseChan := pendingChanResp.PendingForceClosingChannels[0]
|
|
||||||
if len(forceCloseChan.PendingHtlcs) != 1 {
|
|
||||||
predErr = fmt.Errorf("bob should have pending htlc " +
|
|
||||||
"but doesn't")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if forceCloseChan.PendingHtlcs[0].Stage != 2 {
|
|
||||||
predErr = fmt.Errorf("bob's htlc should have "+
|
|
||||||
"advanced to the second stage: %v", err)
|
"advanced to the second stage: %v", err)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return nil
|
||||||
}, time.Second*15)
|
},
|
||||||
if err != nil {
|
)
|
||||||
t.Fatalf("bob didn't hand off time-locked HTLC: %v", predErr)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
|
// We need to generate an additional block to trigger the sweep.
|
||||||
|
_, err = net.Miner.Node.Generate(1)
|
||||||
|
require.NoError(t.t, err)
|
||||||
|
|
||||||
// Bob's sweeping transaction should now be found in the mempool at
|
// Bob's sweeping transaction should now be found in the mempool at
|
||||||
// this point.
|
// this point.
|
||||||
@ -185,14 +141,10 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||||||
// we'll fail.
|
// we'll fail.
|
||||||
// TODO(halseth): can we use waitForChannelPendingForceClose to
|
// TODO(halseth): can we use waitForChannelPendingForceClose to
|
||||||
// avoid this hack?
|
// avoid this hack?
|
||||||
if _, err := net.Miner.Node.Generate(1); err != nil {
|
_, err = net.Miner.Node.Generate(1)
|
||||||
t.Fatalf("unable to generate block: %v", err)
|
require.NoError(t.t, err)
|
||||||
}
|
|
||||||
sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
sweepTx, err = waitForTxInMempool(net.Miner.Node, minerMempoolTimeout)
|
||||||
if err != nil {
|
require.NoError(t.t, err)
|
||||||
t.Fatalf("unable to find bob's sweeping transaction: "+
|
|
||||||
"%v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we mine an additional block, then this should confirm Bob's
|
// If we mine an additional block, then this should confirm Bob's
|
||||||
@ -204,45 +156,23 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,
|
|||||||
// cancel back that HTLC. As a result, Alice should not know of any
|
// cancel back that HTLC. As a result, Alice should not know of any
|
||||||
// active HTLC's.
|
// active HTLC's.
|
||||||
nodes = []*lntest.HarnessNode{alice}
|
nodes = []*lntest.HarnessNode{alice}
|
||||||
err = wait.Predicate(func() bool {
|
err = wait.NoError(func() error {
|
||||||
predErr = assertNumActiveHtlcs(nodes, 0)
|
return assertNumActiveHtlcs(nodes, 0)
|
||||||
return predErr == nil
|
}, defaultTimeout)
|
||||||
}, time.Second*15)
|
require.NoError(t.t, err)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("alice's channel still has active htlc's: %v", predErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now we'll check Bob's pending channel report. Since this was Carol's
|
// Now we'll check Bob's pending channel report. Since this was Carol's
|
||||||
// commitment, he doesn't have to wait for any CSV delays. As a result,
|
// commitment, he doesn't have to wait for any CSV delays. As a result,
|
||||||
// he should show no additional pending transactions.
|
// he should show no additional pending transactions.
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
pendingChanResp, err := bob.PendingChannels(
|
err = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)
|
||||||
ctxt, pendingChansRequest,
|
require.NoError(t.t, err)
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
predErr = fmt.Errorf("unable to query for pending "+
|
|
||||||
"channels: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(pendingChanResp.PendingForceClosingChannels) != 0 {
|
|
||||||
predErr = fmt.Errorf("bob still has pending channels "+
|
|
||||||
"but shouldn't: %v", spew.Sdump(pendingChanResp))
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf(predErr.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
// We'll close out the test by closing the channel from Alice to Bob,
|
// We'll close out the test by closing the channel from Alice to Bob,
|
||||||
// and then shutting down the new node we created as its no longer
|
// and then shutting down the new node we created as its no longer
|
||||||
// needed. Coop close, no anchors.
|
// needed. Coop close, no anchors.
|
||||||
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)
|
||||||
closeChannelAndAssertType(
|
closeChannelAndAssertType(
|
||||||
ctxt, t, net, alice, aliceChanPoint, false,
|
ctxt, t, net, alice, aliceChanPoint, false, false,
|
||||||
false,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -330,37 +330,68 @@ func waitForChannelPendingForceClose(ctx context.Context,
|
|||||||
Index: fundingChanPoint.OutputIndex,
|
Index: fundingChanPoint.OutputIndex,
|
||||||
}
|
}
|
||||||
|
|
||||||
var predErr error
|
return wait.NoError(func() error {
|
||||||
err = wait.Predicate(func() bool {
|
|
||||||
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
|
pendingChansRequest := &lnrpc.PendingChannelsRequest{}
|
||||||
pendingChanResp, err := node.PendingChannels(
|
pendingChanResp, err := node.PendingChannels(
|
||||||
ctx, pendingChansRequest,
|
ctx, pendingChansRequest,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
predErr = fmt.Errorf("unable to get pending "+
|
return fmt.Errorf("unable to get pending channels: %v",
|
||||||
"channels: %v", err)
|
err)
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
forceClose, err := findForceClosedChannel(pendingChanResp, &op)
|
forceClose, err := findForceClosedChannel(pendingChanResp, &op)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
predErr = err
|
return err
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We must wait until the UTXO nursery has received the channel
|
// We must wait until the UTXO nursery has received the channel
|
||||||
// and is aware of its maturity height.
|
// and is aware of its maturity height.
|
||||||
if forceClose.MaturityHeight == 0 {
|
if forceClose.MaturityHeight == 0 {
|
||||||
predErr = fmt.Errorf("channel had maturity height of 0")
|
return fmt.Errorf("channel had maturity height of 0")
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}, time.Second*15)
|
|
||||||
if err != nil {
|
|
||||||
return predErr
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
}, defaultTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
// lnrpcForceCloseChannel is a short type alias for a ridiculously long type
|
||||||
|
// name in the lnrpc package.
|
||||||
|
type lnrpcForceCloseChannel = lnrpc.PendingChannelsResponse_ForceClosedChannel
|
||||||
|
|
||||||
|
// waitForNumChannelPendingForceClose waits for the node to report a certain
|
||||||
|
// number of channels in state pending force close.
|
||||||
|
func waitForNumChannelPendingForceClose(ctx context.Context,
|
||||||
|
node *lntest.HarnessNode, expectedNum int,
|
||||||
|
perChanCheck func(channel *lnrpcForceCloseChannel) error) error {
|
||||||
|
|
||||||
|
return wait.NoError(func() error {
|
||||||
|
resp, err := node.PendingChannels(
|
||||||
|
ctx, &lnrpc.PendingChannelsRequest{},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("unable to get pending channels: %v",
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
|
||||||
|
forceCloseChans := resp.PendingForceClosingChannels
|
||||||
|
if len(forceCloseChans) != expectedNum {
|
||||||
|
return fmt.Errorf("bob should have %d pending "+
|
||||||
|
"force close channels but has %d", expectedNum,
|
||||||
|
len(forceCloseChans))
|
||||||
|
}
|
||||||
|
|
||||||
|
if perChanCheck != nil {
|
||||||
|
for _, forceCloseChan := range forceCloseChans {
|
||||||
|
err := perChanCheck(forceCloseChan)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleanupForceClose mines a force close commitment found in the mempool and
|
// cleanupForceClose mines a force close commitment found in the mempool and
|
||||||
@ -1518,7 +1549,7 @@ func testUnconfirmedChannelFunding(net *lntest.NetworkHarness, t *harnessTest) {
|
|||||||
|
|
||||||
// testPaymentFollowingChannelOpen tests that the channel transition from
|
// testPaymentFollowingChannelOpen tests that the channel transition from
|
||||||
// 'pending' to 'open' state does not cause any inconsistencies within other
|
// 'pending' to 'open' state does not cause any inconsistencies within other
|
||||||
// subsystems trying to udpate the channel state in the db. We follow this
|
// subsystems trying to update the channel state in the db. We follow this
|
||||||
// transition with a payment that updates the commitment state and verify that
|
// transition with a payment that updates the commitment state and verify that
|
||||||
// the pending state is up to date.
|
// the pending state is up to date.
|
||||||
func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) {
|
func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) {
|
||||||
@ -1550,7 +1581,7 @@ func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest)
|
|||||||
t.Fatalf("Bob restart failed: %v", err)
|
t.Fatalf("Bob restart failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// We ensure that Bob reconnets to Alice.
|
// We ensure that Bob reconnects to Alice.
|
||||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
if err := net.EnsureConnected(ctxt, net.Bob, net.Alice); err != nil {
|
if err := net.EnsureConnected(ctxt, net.Bob, net.Alice); err != nil {
|
||||||
t.Fatalf("peers unable to reconnect after restart: %v", err)
|
t.Fatalf("peers unable to reconnect after restart: %v", err)
|
||||||
@ -1559,7 +1590,7 @@ func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest)
|
|||||||
// We mine one block for the channel to be confirmed.
|
// We mine one block for the channel to be confirmed.
|
||||||
_ = mineBlocks(t, net, 6, 1)[0]
|
_ = mineBlocks(t, net, 6, 1)[0]
|
||||||
|
|
||||||
// We verify that the chanel is open from both nodes point of view.
|
// We verify that the channel is open from both nodes point of view.
|
||||||
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 0)
|
assertNumOpenChannelsPending(ctxt, t, net.Alice, net.Bob, 0)
|
||||||
@ -1575,14 +1606,11 @@ func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest)
|
|||||||
|
|
||||||
// Send payment to Bob so that a channel update to disk will be
|
// Send payment to Bob so that a channel update to disk will be
|
||||||
// executed.
|
// executed.
|
||||||
sendAndAssertSuccess(
|
sendAndAssertSuccess(t, net.Alice, &routerrpc.SendPaymentRequest{
|
||||||
t, net.Alice,
|
|
||||||
&routerrpc.SendPaymentRequest{
|
|
||||||
PaymentRequest: bobPayReqs[0],
|
PaymentRequest: bobPayReqs[0],
|
||||||
TimeoutSeconds: 60,
|
TimeoutSeconds: 60,
|
||||||
FeeLimitSat: 1000000,
|
FeeLimitSat: 1000000,
|
||||||
},
|
})
|
||||||
)
|
|
||||||
|
|
||||||
// At this point we want to make sure the channel is opened and not
|
// At this point we want to make sure the channel is opened and not
|
||||||
// pending.
|
// pending.
|
||||||
@ -13987,20 +14015,27 @@ func sendAndAssertSuccess(t *harnessTest, node *lntest.HarnessNode,
|
|||||||
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
|
var result *lnrpc.Payment
|
||||||
|
err := wait.NoError(func() error {
|
||||||
stream, err := node.RouterClient.SendPaymentV2(ctx, req)
|
stream, err := node.RouterClient.SendPaymentV2(ctx, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to send payment: %v", err)
|
return fmt.Errorf("unable to send payment: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := getPaymentResult(stream)
|
result, err = getPaymentResult(stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to get payment result: %v", err)
|
return fmt.Errorf("unable to get payment result: %v",
|
||||||
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if result.Status != lnrpc.Payment_SUCCEEDED {
|
if result.Status != lnrpc.Payment_SUCCEEDED {
|
||||||
t.Fatalf("payment failed: %v", result.Status)
|
return fmt.Errorf("payment failed: %v", result.Status)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}, defaultTimeout)
|
||||||
|
require.NoError(t.t, err)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -14054,7 +14089,7 @@ func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) (
|
|||||||
// TestLightningNetworkDaemon performs a series of integration tests amongst a
|
// TestLightningNetworkDaemon performs a series of integration tests amongst a
|
||||||
// programmatically driven network of lnd nodes.
|
// programmatically driven network of lnd nodes.
|
||||||
func TestLightningNetworkDaemon(t *testing.T) {
|
func TestLightningNetworkDaemon(t *testing.T) {
|
||||||
// If no tests are regsitered, then we can exit early.
|
// If no tests are registered, then we can exit early.
|
||||||
if len(testsCases) == 0 {
|
if len(testsCases) == 0 {
|
||||||
t.Skip("integration tests not selected with flag 'rpctest'")
|
t.Skip("integration tests not selected with flag 'rpctest'")
|
||||||
}
|
}
|
||||||
@ -14075,14 +14110,9 @@ func TestLightningNetworkDaemon(t *testing.T) {
|
|||||||
//
|
//
|
||||||
// We will also connect it to our chain backend.
|
// We will also connect it to our chain backend.
|
||||||
minerLogDir := "./.minerlogs"
|
minerLogDir := "./.minerlogs"
|
||||||
handlers := &rpcclient.NotificationHandlers{
|
|
||||||
OnTxAccepted: func(hash *chainhash.Hash, amt btcutil.Amount) {
|
|
||||||
lndHarness.OnTxAccepted(hash)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
miner, minerCleanUp, err := lntest.NewMiner(
|
miner, minerCleanUp, err := lntest.NewMiner(
|
||||||
minerLogDir, "output_btcd_miner.log",
|
minerLogDir, "output_btcd_miner.log",
|
||||||
harnessNetParams, handlers,
|
harnessNetParams, &rpcclient.NotificationHandlers{},
|
||||||
)
|
)
|
||||||
require.NoError(t, err, "failed to create new miner")
|
require.NoError(t, err, "failed to create new miner")
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -2,12 +2,12 @@
|
|||||||
<time> [ERR] BRAR: Unable to register for conf for txid(<hex>): TxNotifier is exiting
|
<time> [ERR] BRAR: Unable to register for conf for txid(<hex>): TxNotifier is exiting
|
||||||
<time> [ERR] BTCN: Broadcast attempt failed: rejected by <ip>: replacement transaction <hex> has an insufficient absolute fee: needs <amt>, has <amt>
|
<time> [ERR] BTCN: Broadcast attempt failed: rejected by <ip>: replacement transaction <hex> has an insufficient absolute fee: needs <amt>, has <amt>
|
||||||
<time> [ERR] BTCN: Broadcast attempt failed: rejected by <ip>: replacement transaction <hex> has an insufficient fee rate: needs more than <amt>, has <amt>
|
<time> [ERR] BTCN: Broadcast attempt failed: rejected by <ip>: replacement transaction <hex> has an insufficient fee rate: needs more than <amt>, has <amt>
|
||||||
|
<time> [ERR] BTCN: Can't accept connection: unable to accept connection from <ip>: chacha20poly1305: message authentication failed
|
||||||
<time> [ERR] BTCN: Can't accept connection: unable to accept connection from <ip>: EOF
|
<time> [ERR] BTCN: Can't accept connection: unable to accept connection from <ip>: EOF
|
||||||
<time> [ERR] BTCN: Can't accept connection: unable to accept connection from <ip>: read tcp <ip>-><ip>: i/o timeout
|
<time> [ERR] BTCN: Can't accept connection: unable to accept connection from <ip>: read tcp <ip>-><ip>: i/o timeout
|
||||||
<time> [ERR] BTCN: Unable to process block connected (height=<height>, hash=<hex>): out of order block <hex>: expected PrevBlock <hex>, got <hex>
|
<time> [ERR] BTCN: Unable to process block connected (height=<height>, hash=<hex>): out of order block <hex>: expected PrevBlock <hex>, got <hex>
|
||||||
<time> [ERR] BTCN: Unknown connid=<id>
|
<time> [ERR] BTCN: Unknown connid=<id>
|
||||||
<time> [ERR] CHFT: Close channel <chan_point> unknown to store
|
<time> [ERR] CHFT: Close channel <chan_point> unknown to store
|
||||||
<time> [ERR] CNCT: *contractcourt.commitSweepResolver(<chan_point>): unable to sweep input: remote party swept utxo
|
|
||||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to advance state: channel not found
|
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to advance state: channel not found
|
||||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to broadcast close tx: Transaction rejected: output already spent
|
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to broadcast close tx: Transaction rejected: output already spent
|
||||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to force close: channel not found
|
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to force close: channel not found
|
||||||
@ -20,6 +20,7 @@
|
|||||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcSuccessResolver: Transaction rejected: output already spent
|
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcSuccessResolver: Transaction rejected: output already spent
|
||||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcTimeoutResolver: htlcswitch shutting down
|
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcTimeoutResolver: htlcswitch shutting down
|
||||||
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcTimeoutResolver: TxNotifier is exiting
|
<time> [ERR] CNCT: ChannelArbitrator(<chan_point>): unable to progress *contractcourt.htlcTimeoutResolver: TxNotifier is exiting
|
||||||
|
<time> [ERR] CNCT: *contractcourt.commitSweepResolver(<chan_point>): unable to sweep input: remote party swept utxo
|
||||||
<time> [ERR] CNCT: Unable to advance state: channel not found
|
<time> [ERR] CNCT: Unable to advance state: channel not found
|
||||||
<time> [ERR] CNCT: unable to hand breached contract off to breachArbiter: server is shutting down
|
<time> [ERR] CNCT: unable to hand breached contract off to breachArbiter: server is shutting down
|
||||||
<time> [ERR] CNCT: unable to handle channel breach for chan_point=<chan_point>: server is shutting down
|
<time> [ERR] CNCT: unable to handle channel breach for chan_point=<chan_point>: server is shutting down
|
||||||
@ -35,6 +36,7 @@
|
|||||||
<time> [ERR] CRTR: Payment with hash <hex> failed: insufficient_balance
|
<time> [ERR] CRTR: Payment with hash <hex> failed: insufficient_balance
|
||||||
<time> [ERR] CRTR: Payment with hash <hex> failed: no_route
|
<time> [ERR] CRTR: Payment with hash <hex> failed: no_route
|
||||||
<time> [ERR] CRTR: Payment with hash <hex> failed: router shutting down
|
<time> [ERR] CRTR: Payment with hash <hex> failed: router shutting down
|
||||||
|
<time> [ERR] CRTR: Payment with hash <hex> failed: timeout
|
||||||
<time> [ERR] CRTR: Resuming payment with hash <hex> failed: error.
|
<time> [ERR] CRTR: Resuming payment with hash <hex> failed: error.
|
||||||
<time> [ERR] CRTR: Resuming payment with hash <hex> failed: incorrect_payment_details.
|
<time> [ERR] CRTR: Resuming payment with hash <hex> failed: incorrect_payment_details.
|
||||||
<time> [ERR] CRTR: Resuming payment with hash <hex> failed: no_route.
|
<time> [ERR] CRTR: Resuming payment with hash <hex> failed: no_route.
|
||||||
@ -47,19 +49,26 @@
|
|||||||
<time> [ERR] DISC: Unable to reply to peer query: set tcp <ip>: use of closed network connection
|
<time> [ERR] DISC: Unable to reply to peer query: set tcp <ip>: use of closed network connection
|
||||||
<time> [ERR] DISC: Unable to reply to peer query: write tcp <ip>-><ip>: use of closed network connection
|
<time> [ERR] DISC: Unable to reply to peer query: write tcp <ip>-><ip>: use of closed network connection
|
||||||
<time> [ERR] DISC: Unable to reply to peer query: write tcp <ip>-><ip>: write: broken pipe
|
<time> [ERR] DISC: Unable to reply to peer query: write tcp <ip>-><ip>: write: broken pipe
|
||||||
|
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=channel too large
|
||||||
|
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=chan size of 0.16777216 BTC exceeds maximum chan size of 0.16777215 BTC
|
||||||
|
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 0.16777215 BTC
|
||||||
|
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 10 BTC
|
||||||
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=Number of pending channels exceed maximum
|
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=Number of pending channels exceed maximum
|
||||||
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=Synchronizing blockchain
|
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=Synchronizing blockchain
|
||||||
|
<time> [ERR] FNDG: Unable to add new channel <chan_point> with peer <hex>: canceled adding new channel
|
||||||
<time> [ERR] FNDG: Unable to add new channel <chan_point> with peer <hex>: peer exiting
|
<time> [ERR] FNDG: Unable to add new channel <chan_point> with peer <hex>: peer exiting
|
||||||
<time> [ERR] FNDG: Unable to add new channel <chan_point> with peer <hex>: unable to get best block: the client has been shutdown
|
<time> [ERR] FNDG: Unable to add new channel <chan_point> with peer <hex>: unable to get best block: the client has been shutdown
|
||||||
<time> [ERR] FNDG: Unable to advance pending state of ChannelPoint(<chan_point>): error waiting for funding confirmation for ChannelPoint(<chan_point>): epoch client shutting down
|
<time> [ERR] FNDG: Unable to advance pending state of ChannelPoint(<chan_point>): error waiting for funding confirmation for ChannelPoint(<chan_point>): epoch client shutting down
|
||||||
<time> [ERR] FNDG: Unable to advance pending state of ChannelPoint(<chan_point>): error waiting for funding confirmation for ChannelPoint(<chan_point>): funding manager shutting down
|
<time> [ERR] FNDG: Unable to advance pending state of ChannelPoint(<chan_point>): error waiting for funding confirmation for ChannelPoint(<chan_point>): funding manager shutting down
|
||||||
<time> [ERR] FNDG: Unable to advance pending state of ChannelPoint(<chan_point>): error waiting for funding confirmation for ChannelPoint(<chan_point>): waiting for fundingconfirmation failed
|
<time> [ERR] FNDG: Unable to advance pending state of ChannelPoint(<chan_point>): error waiting for funding confirmation for ChannelPoint(<chan_point>): waiting for fundingconfirmation failed
|
||||||
|
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: ChainNotifier shutting down, cannot complete funding flow for ChannelPoint(<chan_point>)
|
||||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: channel announcement proof for short_chan_id=<cid> isn't valid: can't verify first bitcoin signature
|
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: channel announcement proof for short_chan_id=<cid> isn't valid: can't verify first bitcoin signature
|
||||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: funding manager shutting down
|
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: funding manager shutting down
|
||||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: gossiper is shutting down
|
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: gossiper is shutting down
|
||||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: router shutting down
|
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: router shutting down
|
||||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: unable add proof to the channel chanID=<hex>: edge marked as zombie
|
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: unable add proof to the channel chanID=<hex>: edge marked as zombie
|
||||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: unable add proof to the channel chanID=<hex>: edge not found
|
<time> [ERR] FNDG: Unable to advance state(<chan_point>): error sending channel announcement: channel announcement failed: unable add proof to the channel chanID=<hex>: edge not found
|
||||||
|
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: error sending channel announcement: gossiper is shutting down
|
||||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: error sending channel announcement: router shutting down
|
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: error sending channel announcement: router shutting down
|
||||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: funding manager shutting down
|
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: funding manager shutting down
|
||||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed sending fundingLocked: funding manager shutting down
|
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed sending fundingLocked: funding manager shutting down
|
||||||
@ -72,34 +81,30 @@
|
|||||||
<time> [ERR] FNDG: Unable to send node announcement: gossiper is shutting down
|
<time> [ERR] FNDG: Unable to send node announcement: gossiper is shutting down
|
||||||
<time> [ERR] FNDG: Unable to send node announcement: router shutting down
|
<time> [ERR] FNDG: Unable to send node announcement: router shutting down
|
||||||
<time> [ERR] HSWC: AmountBelowMinimum(amt=<amt>, update=(lnwire.ChannelUpdate) {
|
<time> [ERR] HSWC: AmountBelowMinimum(amt=<amt>, update=(lnwire.ChannelUpdate) {
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: ChannelPoint(<chan_point>): received error from peer: chan_id=<hex>, err=sync error with error: remote error
|
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: ChannelPoint(<chan_point>): received error from peer: chan_id=<hex>, err=invalid update with error: remote error
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: ChannelPoint(<chan_point>): received error from peer: chan_id=<hex>, err=invalid update with error: remote error
|
||||||
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: ChannelPoint(<chan_point>): received error from peer: chan_id=<hex>, err=sync error with error: remote error
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: ChannelPoint(<chan_point>): received error from peer: chan_id=<hex>, err=unable to resume channel, recovery required with error: remote error
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: ChannelPoint(<chan_point>): received error from peer: chan_id=<hex>, err=unable to resume channel, recovery required with error: remote error
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to handle upstream settle HTLC: Invalid payment preimage <hex> for hash <hex> with error: invalid update
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to handle upstream settle HTLC: Invalid payment preimage <hex> for hash <hex> with error: invalid update
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: ChannelPoint(<chan_point>) with CommitPoint(<hex>) had possible local commitment state data loss with error: unable to resume channel, recovery required
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: ChannelPoint(<chan_point>) with CommitPoint(<hex>) had possible local commitment state data loss with error: unable to resume channel, recovery required
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: possible remote commitment state data loss with error: sync error
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: possible remote commitment state data loss with error: sync error
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: Unable to send chan sync message for ChannelPoint(<chan_point>): peer exiting with error: unable to resume channel, recovery required
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: Unable to send chan sync message for ChannelPoint(<chan_point>): peer exiting with error: unable to resume channel, recovery required
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: unable to send chan sync message for ChannelPoint(<chan_point>): set tcp <ip>: use of closed network connection with error: unable to resume channel, recovery required
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: unable to send chan sync message for ChannelPoint(<chan_point>): set tcp <ip>: use of closed network connection with error: unable to resume channel, recovery required
|
||||||
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: unable to send chan sync message for ChannelPoint(<chan_point>): set tcp <ip>: use of closed network connection with error: unable to resume channel, recovery required
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: Unable to send chan sync message for ChannelPoint(<chan_point>): write tcp <ip>-><ip>: use of closed network connection with error: unable to resume channel, recovery required
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: Unable to send chan sync message for ChannelPoint(<chan_point>): write tcp <ip>-><ip>: use of closed network connection with error: unable to resume channel, recovery required
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: Unable to send chan sync message for ChannelPoint(<chan_point>): write tcp <ip>-><ip>: write: broken pipe with error: unable to resume channel, recovery required
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: Unable to send chan sync message for ChannelPoint(<chan_point>): write tcp <ip>-><ip>: write: broken pipe with error: unable to resume channel, recovery required
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: unable to send chan sync message for ChannelPoint(<chan_point>): write tcp <ip>-><ip>: write: connection reset by peer with error: unable to resume channel, recovery required
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: unable to send chan sync message for ChannelPoint(<chan_point>): write tcp <ip>-><ip>: write: connection reset by peer with error: unable to resume channel, recovery required
|
||||||
|
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to update commitment: link shutting down with error: internal error
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): link failed, exiting htlcManager
|
<time> [ERR] HSWC: ChannelLink(<chan>): link failed, exiting htlcManager
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): outgoing htlc(<hex>) has insufficient fee: expected 575000, got 1075
|
<time> [ERR] HSWC: ChannelLink(<chan>): outgoing htlc(<hex>) has insufficient fee: expected 575000, got 1075
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): outgoing htlc(<hex>) is too small: min_htlc=<amt>, htlc_value=<amt>
|
<time> [ERR] HSWC: ChannelLink(<chan>): outgoing htlc(<hex>) is too small: min_htlc=<amt>, htlc_value=<amt>
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): unable to cancel incoming HTLC for circuit-key=(Chan ID=<chan>, HTLC ID=0): HTLC with ID 0 has already been failed
|
<time> [ERR] HSWC: ChannelLink(<chan>): unable to cancel incoming HTLC for circuit-key=(Chan ID=<chan>, HTLC ID=0): HTLC with ID 0 has already been failed
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): unable to decode onion hop iterator: TemporaryChannelFailure
|
<time> [ERR] HSWC: ChannelLink(<chan>): unable to decode onion hop iterator: TemporaryChannelFailure
|
||||||
|
<time> [ERR] HSWC: ChannelLink(<chan>): unable to update signals
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: AmountBelowMinimum(amt=4000 mSAT, update=(lnwire.ChannelUpdate) {
|
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: AmountBelowMinimum(amt=4000 mSAT, update=(lnwire.ChannelUpdate) {
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: circuit has already been closed
|
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: circuit has already been closed
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: insufficient bandwidth to route htlc
|
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: insufficient bandwidth to route htlc
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: node configured to disallow forwards
|
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: node configured to disallow forwards
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: UnknownNextPeer
|
<time> [ERR] HSWC: ChannelLink(<chan>): unhandled error while forwarding htlc packet over htlcswitch: UnknownNextPeer
|
||||||
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: AmountBelowMinimum(amt=<amt>, update=(lnwire.ChannelUpdate) {
|
|
||||||
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: circuit has already been closed
|
|
||||||
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: FeeInsufficient(htlc_amt==<amt>, update=(lnwire.ChannelUpdate) {
|
|
||||||
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: insufficient bandwidth to route htlc
|
|
||||||
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: node configured to disallow forwards
|
|
||||||
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: UnknownNextPeer
|
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to synchronize channel states: unable to send chan sync message for ChannelPoint(<chan_point>): set tcp <ip>: use of closed network connection with error: unable to resume channel, recovery required
|
|
||||||
<time> [ERR] HSWC: FeeInsufficient(htlc_amt==<amt>, update=(lnwire.ChannelUpdate) {
|
<time> [ERR] HSWC: FeeInsufficient(htlc_amt==<amt>, update=(lnwire.ChannelUpdate) {
|
||||||
<time> [ERR] HSWC: insufficient bandwidth to route htlc
|
<time> [ERR] HSWC: insufficient bandwidth to route htlc
|
||||||
<time> [ERR] HSWC: Link <chan> not found
|
<time> [ERR] HSWC: Link <chan> not found
|
||||||
@ -109,10 +114,16 @@
|
|||||||
<time> [ERR] HSWC: unable to find target channel for HTLC fail: channel ID = <chan>, HTLC ID = <id>
|
<time> [ERR] HSWC: unable to find target channel for HTLC fail: channel ID = <chan>, HTLC ID = <id>
|
||||||
<time> [ERR] HSWC: Unable to forward resolution msg: unable to find target channel for HTLC fail: channel ID = <chan>, HTLC ID = <id>
|
<time> [ERR] HSWC: Unable to forward resolution msg: unable to find target channel for HTLC fail: channel ID = <chan>, HTLC ID = <id>
|
||||||
<time> [ERR] HSWC: unable to process onion packet: sphinx packet replay attempted
|
<time> [ERR] HSWC: unable to process onion packet: sphinx packet replay attempted
|
||||||
|
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: AmountBelowMinimum(amt=<amt>, update=(lnwire.ChannelUpdate) {
|
||||||
|
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: circuit has already been closed
|
||||||
|
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: FeeInsufficient(htlc_amt==<amt>, update=(lnwire.ChannelUpdate) {
|
||||||
|
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: insufficient bandwidth to route htlc
|
||||||
|
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: node configured to disallow forwards
|
||||||
|
<time> [ERR] HSWC: Unhandled error while reforwarding htlc settle/fail over htlcswitch: UnknownNextPeer
|
||||||
<time> [ERR] HSWC: UnknownNextPeer
|
<time> [ERR] HSWC: UnknownNextPeer
|
||||||
<time> [ERR] LNWL: ChannelPoint(<chan_point>): sync failed with local data loss: remote believes our tail height is <height>, while we have <height>!
|
|
||||||
<time> [ERR] LNWL: ChannelPoint(<chan_point>): sync failed: remote believes our tail height is <height>, while we have <height>!
|
<time> [ERR] LNWL: ChannelPoint(<chan_point>): sync failed: remote believes our tail height is <height>, while we have <height>!
|
||||||
<time> [ERR] LNWL: ChannelPoint(<chan_point>): sync failed: remote's next commit height is <height>, while we believe it is <height>!
|
<time> [ERR] LNWL: ChannelPoint(<chan_point>): sync failed: remote's next commit height is <height>, while we believe it is <height>!
|
||||||
|
<time> [ERR] LNWL: ChannelPoint(<chan_point>): sync failed with local data loss: remote believes our tail height is <height>, while we have <height>!
|
||||||
<time> [ERR] LNWL: Neutrino rescan ended with error: rescan exited
|
<time> [ERR] LNWL: Neutrino rescan ended with error: rescan exited
|
||||||
<time> [ERR] LNWL: Notifying unmined tx notification (<hex>) while creating notification for blocks
|
<time> [ERR] LNWL: Notifying unmined tx notification (<hex>) while creating notification for blocks
|
||||||
<time> [ERR] LNWL: Rescan for <num> addresses failed: the client has been shutdown
|
<time> [ERR] LNWL: Rescan for <num> addresses failed: the client has been shutdown
|
||||||
@ -120,14 +131,20 @@
|
|||||||
<time> [ERR] NANN: Unable to retrieve chan status for Channel(<chan_point>): edge not found
|
<time> [ERR] NANN: Unable to retrieve chan status for Channel(<chan_point>): edge not found
|
||||||
<time> [ERR] NANN: Unable to retrieve chan status for Channel(<chan_point>): unable to extract ChannelUpdate for channel <chan_point>
|
<time> [ERR] NANN: Unable to retrieve chan status for Channel(<chan_point>): unable to extract ChannelUpdate for channel <chan_point>
|
||||||
<time> [ERR] NANN: Unable to sign update disabling channel(<chan_point>): edge not found
|
<time> [ERR] NANN: Unable to sign update disabling channel(<chan_point>): edge not found
|
||||||
|
<time> [ERR] NTFN: chain notifier shutting down
|
||||||
<time> [ERR] NTFN: Error during rescan: rescan exited
|
<time> [ERR] NTFN: Error during rescan: rescan exited
|
||||||
|
<time> [ERR] NTFN: Failed getting UTXO: get utxo request cancelled
|
||||||
<time> [ERR] NTFN: Rescan to determine the spend details of <chan_point> failed: the client has been shutdown
|
<time> [ERR] NTFN: Rescan to determine the spend details of <chan_point> failed: the client has been shutdown
|
||||||
<time> [ERR] NTFN: Unable to fetch block header: the client has been shutdown
|
<time> [ERR] NTFN: Unable to fetch block header: the client has been shutdown
|
||||||
<time> [ERR] NTFN: unable to find blockhash for height=<height>: -1: Block number out of range
|
<time> [ERR] NTFN: unable to find blockhash for height=<height>: -1: Block number out of range
|
||||||
<time> [ERR] NTFN: unable to get block: the client has been shutdown
|
<time> [ERR] NTFN: unable to get block: the client has been shutdown
|
||||||
|
<time> [ERR] NTFN: unable to get hash from block with height 790
|
||||||
<time> [ERR] NTFN: unable to get missed blocks: starting height <height> is greater than ending height <height>
|
<time> [ERR] NTFN: unable to get missed blocks: starting height <height> is greater than ending height <height>
|
||||||
<time> [ERR] NTFN: Unable to rewind chain from height <height> to height <height>: unable to find blockhash for disconnected height=<height>: -1: Block number out of range
|
<time> [ERR] NTFN: Unable to rewind chain from height <height> to height <height>: unable to find blockhash for disconnected height=<height>: -1: Block number out of range
|
||||||
|
<time> [ERR] NTNF: unable to get hash from block with height <height>
|
||||||
|
<time> [ERR] PEER: Allowed test error from <ip> (inbound): ReadMessage: unhandled command [sendaddrv2]
|
||||||
<time> [ERR] PEER: resend failed: unable to fetch channel sync messages for peer <hex>@<ip>: unable to find closed channel summary
|
<time> [ERR] PEER: resend failed: unable to fetch channel sync messages for peer <hex>@<ip>: unable to find closed channel summary
|
||||||
|
<time> [ERR] PEER: unable to close channel, ChannelID(<hex>) is unknown
|
||||||
<time> [ERR] PEER: unable to force close link(<chan>): ChainArbitrator exiting
|
<time> [ERR] PEER: unable to force close link(<chan>): ChainArbitrator exiting
|
||||||
<time> [ERR] PEER: unable to force close link(<chan>): channel not found
|
<time> [ERR] PEER: unable to force close link(<chan>): channel not found
|
||||||
<time> [ERR] PEER: unable to force close link(<chan>): unable to find arbitrator
|
<time> [ERR] PEER: unable to force close link(<chan>): unable to find arbitrator
|
||||||
@ -137,17 +154,38 @@
|
|||||||
<time> [ERR] PEER: unable to send msg to remote peer: write tcp <ip>-><ip>: write: connection reset by peer
|
<time> [ERR] PEER: unable to send msg to remote peer: write tcp <ip>-><ip>: write: connection reset by peer
|
||||||
<time> [ERR] RPCS: [/chainrpc.ChainNotifier/RegisterBlockEpochNtfn]: chain notifier shutting down
|
<time> [ERR] RPCS: [/chainrpc.ChainNotifier/RegisterBlockEpochNtfn]: chain notifier shutting down
|
||||||
<time> [ERR] RPCS: [/chainrpc.ChainNotifier/RegisterBlockEpochNtfn]: context canceled
|
<time> [ERR] RPCS: [/chainrpc.ChainNotifier/RegisterBlockEpochNtfn]: context canceled
|
||||||
|
<time> [ERR] RPCS: [closechannel] unable to close ChannelPoint(<chan_point>): chain notifier shutting down
|
||||||
|
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: already connected to peer: <hex>@<ip>
|
||||||
|
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: dial tcp <ip>: i/o timeout
|
||||||
|
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: dial tcp <ip>: i/o timeout
|
||||||
|
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: read tcp <ip>-><ip>: i/o timeout
|
||||||
|
<time> [ERR] RPCS: Failed receiving from stream: rpc error: code = Canceled desc = context canceled
|
||||||
|
<time> [ERR] RPCS: Failed receiving from stream: rpc error: code = DeadlineExceeded desc = context deadline exceeded
|
||||||
|
<time> [ERR] RPCS: Failed sending error response: rpc error: code = Canceled desc = context canceled
|
||||||
|
<time> [ERR] RPCS: Failed sending error response: rpc error: code = Internal desc = transport: transport: the stream is done or WriteHeader was already called
|
||||||
|
<time> [ERR] RPCS: Failed sending response: rpc error: code = Canceled desc = context canceled
|
||||||
|
<time> [ERR] RPCS: Failed sending response: rpc error: code = Internal desc = transport: transport: the stream is done or WriteHeader was already called
|
||||||
<time> [ERR] RPCS: [/invoicesrpc.Invoices/SubscribeSingleInvoice]: rpc error: code = Canceled desc = context canceled
|
<time> [ERR] RPCS: [/invoicesrpc.Invoices/SubscribeSingleInvoice]: rpc error: code = Canceled desc = context canceled
|
||||||
|
<time> [ERR] RPCS: [/lnrpc.Lightning/BakeMacaroon]: invalid permission action. supported actions are [read write generate], supported entities are [onchain offchain address message peers info invoices signer macaroon uri]
|
||||||
|
<time> [ERR] RPCS: [/lnrpc.Lightning/BakeMacaroon]: invalid permission entity. supported actions are [read write generate], supported entities are [onchain offchain address message peers info invoices signer macaroon uri]
|
||||||
|
<time> [ERR] RPCS: [/lnrpc.Lightning/BakeMacaroon]: permission list cannot be empty. specify at least one action/entity pair. supported actions are [read write generate], supported entities are [onchain offchain address message peers info invoices signer macaroon uri]
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: cannot close channel with state: ChanStatusRestored
|
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: cannot close channel with state: ChanStatusRestored
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: cannot co-op close frozen channel as initiator until height=<height>, (current_height=<height>)
|
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: cannot co-op close frozen channel as initiator until height=3059, (current_height=3055)
|
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: cannot co-op close frozen channel as initiator until height=3059, (current_height=3055)
|
||||||
|
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: cannot co-op close frozen channel as initiator until height=<height>, (current_height=<height>)
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: chain notifier shutting down
|
<time> [ERR] RPCS: [/lnrpc.Lightning/CloseChannel]: chain notifier shutting down
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: already connected to peer: <hex>@<ip>
|
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: already connected to peer: <hex>@<ip>
|
||||||
|
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: dial tcp <ip>: i/o timeout
|
||||||
|
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: dial tcp <ip>: i/o timeout
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: read tcp <ip>-><ip>: i/o timeout
|
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: read tcp <ip>-><ip>: i/o timeout
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: server is still in the process of starting
|
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: server is still in the process of starting
|
||||||
|
<time> [ERR] RPCS: [/lnrpc.Lightning/DeleteMacaroonID]: the specified ID cannot be deleted
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/FundingStateStep]: pendingChanID(<hex>) already has intent registered
|
<time> [ERR] RPCS: [/lnrpc.Lightning/FundingStateStep]: pendingChanID(<hex>) already has intent registered
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/GetChanInfo]: edge marked as zombie
|
<time> [ERR] RPCS: [/lnrpc.Lightning/GetChanInfo]: edge marked as zombie
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: channels cannot be created before the wallet is fully synced
|
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: channels cannot be created before the wallet is fully synced
|
||||||
|
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=channel too large
|
||||||
|
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=chan size of 0.16777216 BTC exceeds maximum chan size of 0.16777215 BTC
|
||||||
|
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 0.16777215 BTC
|
||||||
|
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 10 BTC
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=Number of pending channels exceed maximum
|
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=Number of pending channels exceed maximum
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=Synchronizing blockchain
|
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=Synchronizing blockchain
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/PendingChannels]: unable to find arbitrator
|
<time> [ERR] RPCS: [/lnrpc.Lightning/PendingChannels]: unable to find arbitrator
|
||||||
@ -168,69 +206,33 @@
|
|||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/SubscribeChannelGraph]: rpc error: code = DeadlineExceeded desc = context deadline exceeded
|
<time> [ERR] RPCS: [/lnrpc.Lightning/SubscribeChannelGraph]: rpc error: code = DeadlineExceeded desc = context deadline exceeded
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/SubscribeChannelGraph]: rpc error: code = Internal desc = transport: transport: the stream is done or WriteHeader was already called
|
<time> [ERR] RPCS: [/lnrpc.Lightning/SubscribeChannelGraph]: rpc error: code = Internal desc = transport: transport: the stream is done or WriteHeader was already called
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/SubscribeInvoices]: rpc error: code = Canceled desc = context canceled
|
<time> [ERR] RPCS: [/lnrpc.Lightning/SubscribeInvoices]: rpc error: code = Canceled desc = context canceled
|
||||||
|
<time> [ERR] RPCS: [/routerrpc.Router/HtlcInterceptor]: rpc error: code = Canceled desc = context canceled
|
||||||
<time> [ERR] RPCS: [/routerrpc.Router/SendPayment]: routerrpc server shutting down
|
<time> [ERR] RPCS: [/routerrpc.Router/SendPayment]: routerrpc server shutting down
|
||||||
<time> [ERR] RPCS: [/routerrpc.Router/SendPaymentV2]: context canceled
|
<time> [ERR] RPCS: [/routerrpc.Router/SendPaymentV2]: context canceled
|
||||||
<time> [ERR] RPCS: [/routerrpc.Router/SendPaymentV2]: routerrpc server shutting down
|
<time> [ERR] RPCS: [/routerrpc.Router/SendPaymentV2]: routerrpc server shutting down
|
||||||
<time> [ERR] RPCS: [/routerrpc.Router/SubscribeHtlcEvents]: context canceled
|
<time> [ERR] RPCS: [/routerrpc.Router/SubscribeHtlcEvents]: context canceled
|
||||||
<time> [ERR] RPCS: [/routerrpc.Router/SubscribeHtlcEvents]: htlc event subscription terminated
|
<time> [ERR] RPCS: [/routerrpc.Router/SubscribeHtlcEvents]: htlc event subscription terminated
|
||||||
<time> [ERR] RPCS: [closechannel] unable to close ChannelPoint(<chan_point>): chain notifier shutting down
|
<time> [ERR] RPCS: [/routerrpc.Route<time> [INF] LTND: Listening on the p2p interface is disabled!
|
||||||
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: already connected to peer: <hex>@<ip>
|
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: must provide ephemeral pubkey
|
||||||
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: read tcp <ip>-><ip>: i/o timeout
|
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: use either key_desc or key_loc
|
||||||
<time> [ERR] RPCS: Failed receiving from stream: rpc error: code = Canceled desc = context canceled
|
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: use either raw_key_bytes or key_index
|
||||||
<time> [ERR] RPCS: Failed receiving from stream: rpc error: code = DeadlineExceeded desc = context deadline exceeded
|
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: when setting key_desc the field key_desc.key_loc must also be set
|
||||||
<time> [ERR] RPCS: Failed sending error response: rpc error: code = Canceled desc = context canceled
|
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=channel too large
|
||||||
<time> [ERR] RPCS: Failed sending error response: rpc error: code = Internal desc = transport: transport: the stream is done or WriteHeader was already called
|
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=chan size of 0.16777216 BTC exceeds maximum chan size of 0.16777215 BTC
|
||||||
<time> [ERR] RPCS: Failed sending response: rpc error: code = Canceled desc = context canceled
|
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 0.16777215 BTC
|
||||||
<time> [ERR] RPCS: Failed sending response: rpc error: code = Internal desc = transport: transport: the stream is done or WriteHeader was already called
|
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 10 BTC
|
||||||
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=Number of pending channels exceed maximum
|
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=Number of pending channels exceed maximum
|
||||||
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=Synchronizing blockchain
|
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=Synchronizing blockchain
|
||||||
|
<time> [ERR] RPCS: [/walletrpc.WalletKit/LabelTransaction]: cannot label transaction with empty label
|
||||||
|
<time> [ERR] RPCS: [/walletrpc.WalletKit/LabelTransaction]: transaction already labelled
|
||||||
<time> [ERR] RPCS: Websocket receive error from <ip>: read tcp4 <ip>-><ip>: use of closed network connection
|
<time> [ERR] RPCS: Websocket receive error from <ip>: read tcp4 <ip>-><ip>: use of closed network connection
|
||||||
<time> [ERR] RPCS: Websocket receive error from <ip>: websocket: close 1006 unexpected EOF
|
<time> [ERR] RPCS: Websocket receive error from <ip>: websocket: close 1006 unexpected EOF
|
||||||
|
<time> [ERR] RPCS: WS: error closing upgraded conn: write tcp4 <ip>-><ip>: write: connection reset by peer
|
||||||
|
<time> [ERR] SRVR: Unable to connect to <hex>@<ip>: dial tcp <ip>: i/o timeout
|
||||||
|
<time> [ERR] SRVR: Unable to connect to <hex>@<ip>: dial tcp <ip>: i/o timeout
|
||||||
<time> [ERR] SRVR: Unable to connect to <hex>@<ip>: read tcp <ip>-><ip>: i/o timeout
|
<time> [ERR] SRVR: Unable to connect to <hex>@<ip>: read tcp <ip>-><ip>: i/o timeout
|
||||||
<time> [ERR] SRVR: Unable to retrieve advertised address for node <hex>: no advertised addresses found
|
<time> [ERR] SRVR: Unable to retrieve advertised address for node <hex>: no advertised addresses found
|
||||||
<time> [ERR] SRVR: Unable to retrieve advertised address for node <hex>: unable to find node
|
<time> [ERR] SRVR: Unable to retrieve advertised address for node <hex>: unable to find node
|
||||||
<time> [ERR] UTXN: error while graduating class at height=<height>: TxNotifier is exiting
|
<time> [ERR] UTXN: error while graduating class at height=<height>: TxNotifier is exiting
|
||||||
<time> [ERR] UTXN: Failed to sweep first-stage HTLC (CLTV-delayed) output <chan_point>
|
<time> [ERR] UTXN: Failed to sweep first-stage HTLC (CLTV-delayed) output <chan_point>
|
||||||
<time> [ERR] UTXN: Notification chan closed, can't advance output <chan_point>
|
<time> [ERR] UTXN: Notification chan closed, can't advance output <chan_point>
|
||||||
<time> [ERR] RPCS: [/walletrpc.WalletKit/LabelTransaction]: cannot label transaction with empty label
|
|
||||||
<time> [ERR] RPCS: [/walletrpc.WalletKit/LabelTransaction]: transaction already labelled
|
|
||||||
<time> [ERR] NTFN: unable to get hash from block with height 790
|
|
||||||
<time> [ERR] CRTR: Payment with hash <hex> failed: timeout
|
|
||||||
<time> [ERR] RPCS: [/routerrpc.Route<time> [INF] LTND: Listening on the p2p interface is disabled!
|
|
||||||
<time> [ERR] FNDG: Unable to advance state(<chan_point>): failed adding to router graph: error sending channel announcement: gossiper is shutting down
|
|
||||||
<time> [ERR] PEER: unable to close channel, ChannelID(<hex>) is unknown
|
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): unable to update signals
|
|
||||||
<time> [ERR] RPCS: [/routerrpc.Router/HtlcInterceptor]: rpc error: code = Canceled desc = context canceled
|
|
||||||
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=channel too large
|
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=channel too large
|
|
||||||
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=channel too large
|
|
||||||
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=chan size of 0.16777216 BTC exceeds maximum chan size of 0.16777215 BTC
|
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=chan size of 0.16777216 BTC exceeds maximum chan size of 0.16777215 BTC
|
|
||||||
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=chan size of 0.16777216 BTC exceeds maximum chan size of 0.16777215 BTC
|
|
||||||
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 10 BTC
|
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 10 BTC
|
|
||||||
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 10 BTC
|
|
||||||
<time> [ERR] FNDG: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 0.16777215 BTC
|
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/OpenChannel]: received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 0.16777215 BTC
|
|
||||||
<time> [ERR] RPCS: unable to open channel to NodeKey(<hex>): received funding error from <hex>: chan_id=<hex>, err=chan size of 10.00000001 BTC exceeds maximum chan size of 0.16777215 BTC
|
|
||||||
<time> [ERR] NTNF: unable to get hash from block with height <height>
|
|
||||||
<time> [ERR] FNDG: Unable to add new channel <chan_point> with peer <hex>: canceled adding new channel
|
|
||||||
<time> [ERR] RPCS: WS: error closing upgraded conn: write tcp4 <ip>-><ip>: write: connection reset by peer
|
|
||||||
<time> [ERR] NTFN: chain notifier shutting down
|
|
||||||
<time> [ERR] NTFN: Failed getting UTXO: get utxo request cancelled
|
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/BakeMacaroon]: invalid permission action. supported actions are [read write generate], supported entities are [onchain offchain address message peers info invoices signer macaroon uri]
|
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/BakeMacaroon]: invalid permission entity. supported actions are [read write generate], supported entities are [onchain offchain address message peers info invoices signer macaroon uri]
|
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/BakeMacaroon]: permission list cannot be empty. specify at least one action/entity pair. supported actions are [read write generate], supported entities are [onchain offchain address message peers info invoices signer macaroon uri]
|
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/DeleteMacaroonID]: the specified ID cannot be deleted
|
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: dial tcp <ip>: i/o timeout
|
|
||||||
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: dial tcp <ip>: i/o timeout
|
|
||||||
<time> [ERR] SRVR: Unable to connect to <hex>@<ip>: dial tcp <ip>: i/o timeout
|
|
||||||
<time> [ERR] RPCS: [/lnrpc.Lightning/ConnectPeer]: dial tcp <ip>: i/o timeout
|
|
||||||
<time> [ERR] RPCS: [connectpeer]: error connecting to peer: dial tcp <ip>: i/o timeout
|
|
||||||
<time> [ERR] SRVR: Unable to connect to <hex>@<ip>: dial tcp <ip>: i/o timeout
|
|
||||||
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: use either key_desc or key_loc
|
|
||||||
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: use either raw_key_bytes or key_index
|
|
||||||
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: when setting key_desc the field key_desc.key_loc must also be set
|
|
||||||
<time> [ERR] RPCS: [/signrpc.Signer/DeriveSharedKey]: must provide ephemeral pubkey
|
|
||||||
<time> [ERR] BTCN: Can't accept connection: unable to accept connection from <ip>: chacha20poly1305: message authentication failed
|
|
||||||
<time> [ERR] HSWC: ChannelLink(<chan>): failing link: unable to update commitment: link shutting down with error: internal error
|
|
Loading…
Reference in New Issue
Block a user