2017-08-31 06:37:39 +03:00
|
|
|
// +build !rpctest
|
|
|
|
|
2019-01-24 16:28:25 +03:00
|
|
|
package lnd
|
2017-05-07 14:09:22 +03:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2018-02-24 06:28:36 +03:00
|
|
|
crand "crypto/rand"
|
2017-11-21 10:57:08 +03:00
|
|
|
"crypto/sha256"
|
2018-02-24 06:28:36 +03:00
|
|
|
"encoding/binary"
|
2017-05-07 14:09:22 +03:00
|
|
|
"fmt"
|
2018-02-24 06:28:36 +03:00
|
|
|
"io"
|
2017-05-07 14:09:22 +03:00
|
|
|
"io/ioutil"
|
2017-11-21 10:57:08 +03:00
|
|
|
"math/rand"
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
"net"
|
2017-05-07 14:09:22 +03:00
|
|
|
"os"
|
|
|
|
"reflect"
|
2017-07-26 08:57:29 +03:00
|
|
|
"sync"
|
2017-05-07 14:09:22 +03:00
|
|
|
"testing"
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
"time"
|
2017-05-07 14:09:22 +03:00
|
|
|
|
2018-07-18 05:23:47 +03:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/txscript"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
|
|
|
"github.com/btcsuite/btcutil"
|
2017-11-21 10:57:08 +03:00
|
|
|
"github.com/go-errors/errors"
|
2018-05-31 13:52:02 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2017-05-07 14:09:22 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2017-11-21 10:57:08 +03:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
2019-01-16 17:47:43 +03:00
|
|
|
"github.com/lightningnetwork/lnd/input"
|
2018-02-18 02:29:01 +03:00
|
|
|
"github.com/lightningnetwork/lnd/keychain"
|
2021-01-08 00:26:24 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lntest/channels"
|
2020-08-26 21:18:02 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lntest/mock"
|
2019-09-19 22:46:29 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lntest/wait"
|
2017-05-07 14:09:22 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
2019-10-31 05:43:05 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
|
2017-11-21 10:57:08 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
|
|
|
"github.com/lightningnetwork/lnd/shachain"
|
2021-04-20 09:46:23 +03:00
|
|
|
"github.com/stretchr/testify/require"
|
2017-05-07 14:09:22 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
breachOutPoints = []wire.OutPoint{
|
|
|
|
{
|
|
|
|
Hash: [chainhash.HashSize]byte{
|
|
|
|
0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0x2d, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
|
|
|
|
0x1f, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
|
|
|
|
},
|
|
|
|
Index: 9,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Hash: [chainhash.HashSize]byte{
|
|
|
|
0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
|
|
|
|
0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
|
|
|
|
0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
|
|
|
|
0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
|
|
|
|
},
|
|
|
|
Index: 49,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Hash: [chainhash.HashSize]byte{
|
|
|
|
0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
|
|
|
|
0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9,
|
|
|
|
},
|
|
|
|
Index: 23,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
breachKeys = [][]byte{
|
|
|
|
{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
|
|
|
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
|
|
|
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
|
|
|
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
|
|
|
|
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
|
|
|
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
|
|
|
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
|
|
|
0xb4, 0x12, 0xa3,
|
|
|
|
},
|
|
|
|
{0x07, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
|
|
|
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
|
|
|
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
|
|
|
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
|
|
|
|
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
|
|
|
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
|
|
|
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
|
|
|
0xb4, 0x12, 0xa3,
|
|
|
|
},
|
|
|
|
{0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
|
|
|
|
0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
|
|
|
|
0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
|
|
|
|
0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
|
|
|
|
},
|
2019-08-01 06:16:52 +03:00
|
|
|
{0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
|
|
|
|
0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
|
|
|
|
0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
|
|
|
|
0xa3, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
|
|
|
|
},
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
breachedOutputs = []breachedOutput{
|
|
|
|
{
|
2017-08-22 02:58:21 +03:00
|
|
|
amt: btcutil.Amount(1e7),
|
|
|
|
outpoint: breachOutPoints[0],
|
2019-01-16 17:47:43 +03:00
|
|
|
witnessType: input.CommitmentNoDelay,
|
|
|
|
signDesc: input.SignDescriptor{
|
2017-09-21 02:01:28 +03:00
|
|
|
SingleTweak: []byte{
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02,
|
|
|
|
},
|
|
|
|
WitnessScript: []byte{
|
|
|
|
0x00, 0x14, 0xee, 0x91, 0x41, 0x7e,
|
|
|
|
0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91,
|
|
|
|
0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
|
|
|
|
0xef, 0xb5, 0x71, 0x48,
|
|
|
|
},
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
Value: 5000000000,
|
|
|
|
PkScript: []byte{
|
|
|
|
0x41, // OP_DATA_65
|
|
|
|
0x04, 0xd6, 0x4b, 0xdf, 0xd0,
|
|
|
|
0x9e, 0xb1, 0xc5, 0xfe, 0x29,
|
|
|
|
0x5a, 0xbd, 0xeb, 0x1d, 0xca,
|
|
|
|
0x42, 0x81, 0xbe, 0x98, 0x8e,
|
|
|
|
0x2d, 0xa0, 0xb6, 0xc1, 0xc6,
|
|
|
|
0xa5, 0x9d, 0xc2, 0x26, 0xc2,
|
|
|
|
0x86, 0x24, 0xe1, 0x81, 0x75,
|
|
|
|
0xe8, 0x51, 0xc9, 0x6b, 0x97,
|
|
|
|
0x3d, 0x81, 0xb0, 0x1c, 0xc3,
|
|
|
|
0x1f, 0x04, 0x78, 0x34, 0xbc,
|
|
|
|
0x06, 0xd6, 0xd6, 0xed, 0xf6,
|
|
|
|
0x20, 0xd1, 0x84, 0x24, 0x1a,
|
|
|
|
0x6a, 0xed, 0x8b, 0x63,
|
|
|
|
0xa6, // 65-byte signature
|
|
|
|
0xac, // OP_CHECKSIG
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HashType: txscript.SigHashAll,
|
|
|
|
},
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
secondLevelWitnessScript: breachKeys[0],
|
2017-05-07 14:09:22 +03:00
|
|
|
},
|
2019-08-08 06:29:15 +03:00
|
|
|
{
|
|
|
|
amt: btcutil.Amount(1e7),
|
|
|
|
outpoint: breachOutPoints[0],
|
|
|
|
witnessType: input.CommitSpendNoDelayTweakless,
|
|
|
|
signDesc: input.SignDescriptor{
|
|
|
|
WitnessScript: []byte{
|
|
|
|
0x00, 0x14, 0xee, 0x91, 0x41, 0x7e,
|
|
|
|
0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91,
|
|
|
|
0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
|
|
|
|
0xef, 0xb5, 0x71, 0x48,
|
|
|
|
},
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
Value: 5000000000,
|
|
|
|
PkScript: []byte{
|
|
|
|
0x41, // OP_DATA_65
|
|
|
|
0x04, 0xd6, 0x4b, 0xdf, 0xd0,
|
|
|
|
0x9e, 0xb1, 0xc5, 0xfe, 0x29,
|
|
|
|
0x5a, 0xbd, 0xeb, 0x1d, 0xca,
|
|
|
|
0x42, 0x81, 0xbe, 0x98, 0x8e,
|
|
|
|
0x2d, 0xa0, 0xb6, 0xc1, 0xc6,
|
|
|
|
0xa5, 0x9d, 0xc2, 0x26, 0xc2,
|
|
|
|
0x86, 0x24, 0xe1, 0x81, 0x75,
|
|
|
|
0xe8, 0x51, 0xc9, 0x6b, 0x97,
|
|
|
|
0x3d, 0x81, 0xb0, 0x1c, 0xc3,
|
|
|
|
0x1f, 0x04, 0x78, 0x34, 0xbc,
|
|
|
|
0x06, 0xd6, 0xd6, 0xed, 0xf6,
|
|
|
|
0x20, 0xd1, 0x84, 0x24, 0x1a,
|
|
|
|
0x6a, 0xed, 0x8b, 0x63,
|
|
|
|
0xa6, // 65-byte signature
|
|
|
|
0xac, // OP_CHECKSIG
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HashType: txscript.SigHashAll,
|
|
|
|
},
|
|
|
|
secondLevelWitnessScript: breachKeys[0],
|
|
|
|
},
|
2017-05-07 14:09:22 +03:00
|
|
|
{
|
2017-08-22 02:58:21 +03:00
|
|
|
amt: btcutil.Amount(2e9),
|
|
|
|
outpoint: breachOutPoints[1],
|
2019-01-16 17:47:43 +03:00
|
|
|
witnessType: input.CommitmentRevoke,
|
|
|
|
signDesc: input.SignDescriptor{
|
2017-09-21 02:01:28 +03:00
|
|
|
SingleTweak: []byte{
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02,
|
|
|
|
},
|
|
|
|
WitnessScript: []byte{
|
|
|
|
0x00, 0x14, 0xee, 0x91, 0x41, 0x7e,
|
|
|
|
0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91,
|
|
|
|
0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
|
|
|
|
0xef, 0xb5, 0x71, 0x48,
|
|
|
|
},
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
Value: 5000000000,
|
|
|
|
PkScript: []byte{
|
|
|
|
0x41, // OP_DATA_65
|
|
|
|
0x04, 0xd6, 0x4b, 0xdf, 0xd0,
|
|
|
|
0x9e, 0xb1, 0xc5, 0xfe, 0x29,
|
|
|
|
0x5a, 0xbd, 0xeb, 0x1d, 0xca,
|
|
|
|
0x42, 0x81, 0xbe, 0x98, 0x8e,
|
|
|
|
0x2d, 0xa0, 0xb6, 0xc1, 0xc6,
|
|
|
|
0xa5, 0x9d, 0xc2, 0x26, 0xc2,
|
|
|
|
0x86, 0x24, 0xe1, 0x81, 0x75,
|
|
|
|
0xe8, 0x51, 0xc9, 0x6b, 0x97,
|
|
|
|
0x3d, 0x81, 0xb0, 0x1c, 0xc3,
|
|
|
|
0x1f, 0x04, 0x78, 0x34, 0xbc,
|
|
|
|
0x06, 0xd6, 0xd6, 0xed, 0xf6,
|
|
|
|
0x20, 0xd1, 0x84, 0x24, 0x1a,
|
|
|
|
0x6a, 0xed, 0x8b, 0x63,
|
|
|
|
0xa6, // 65-byte signature
|
|
|
|
0xac, // OP_CHECKSIG
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HashType: txscript.SigHashAll,
|
|
|
|
},
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
secondLevelWitnessScript: breachKeys[0],
|
2017-05-07 14:09:22 +03:00
|
|
|
},
|
|
|
|
{
|
2017-08-22 02:58:21 +03:00
|
|
|
amt: btcutil.Amount(3e4),
|
|
|
|
outpoint: breachOutPoints[2],
|
2019-01-16 17:47:43 +03:00
|
|
|
witnessType: input.CommitmentDelayOutput,
|
|
|
|
signDesc: input.SignDescriptor{
|
2017-09-21 02:01:28 +03:00
|
|
|
SingleTweak: []byte{
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02,
|
|
|
|
},
|
|
|
|
WitnessScript: []byte{
|
|
|
|
0x00, 0x14, 0xee, 0x91, 0x41, 0x7e,
|
|
|
|
0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91,
|
|
|
|
0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
|
|
|
|
0xef, 0xb5, 0x71, 0x48,
|
|
|
|
},
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
Value: 5000000000,
|
|
|
|
PkScript: []byte{
|
|
|
|
0x41, // OP_DATA_65
|
|
|
|
0x04, 0xd6, 0x4b, 0xdf, 0xd0,
|
|
|
|
0x9e, 0xb1, 0xc5, 0xfe, 0x29,
|
|
|
|
0x5a, 0xbd, 0xeb, 0x1d, 0xca,
|
|
|
|
0x42, 0x81, 0xbe, 0x98, 0x8e,
|
|
|
|
0x2d, 0xa0, 0xb6, 0xc1, 0xc6,
|
|
|
|
0xa5, 0x9d, 0xc2, 0x26, 0xc2,
|
|
|
|
0x86, 0x24, 0xe1, 0x81, 0x75,
|
|
|
|
0xe8, 0x51, 0xc9, 0x6b, 0x97,
|
|
|
|
0x3d, 0x81, 0xb0, 0x1c, 0xc3,
|
|
|
|
0x1f, 0x04, 0x78, 0x34, 0xbc,
|
|
|
|
0x06, 0xd6, 0xd6, 0xed, 0xf6,
|
|
|
|
0x20, 0xd1, 0x84, 0x24, 0x1a,
|
|
|
|
0x6a, 0xed, 0x8b, 0x63,
|
|
|
|
0xa6, // 65-byte signature
|
|
|
|
0xac, // OP_CHECKSIG
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HashType: txscript.SigHashAll,
|
|
|
|
},
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
secondLevelWitnessScript: breachKeys[0],
|
2017-05-07 14:09:22 +03:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
retributionMap = make(map[wire.OutPoint]retributionInfo)
|
|
|
|
retributions = []retributionInfo{
|
2017-05-07 14:09:22 +03:00
|
|
|
{
|
|
|
|
commitHash: [chainhash.HashSize]byte{
|
|
|
|
0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
|
|
|
|
0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
|
|
|
|
0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
|
|
|
|
0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
|
|
|
|
},
|
2017-11-11 02:23:28 +03:00
|
|
|
chainHash: [chainhash.HashSize]byte{
|
|
|
|
0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
|
|
|
|
0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
|
|
|
|
0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
|
|
|
|
0x6b, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
|
|
|
|
},
|
2017-11-21 10:57:08 +03:00
|
|
|
chanPoint: breachOutPoints[0],
|
|
|
|
breachHeight: 337,
|
2017-09-21 02:01:28 +03:00
|
|
|
// Set to breachedOutputs 0 and 1 in init()
|
|
|
|
breachedOutputs: []breachedOutput{{}, {}},
|
2017-05-07 14:09:22 +03:00
|
|
|
},
|
|
|
|
{
|
|
|
|
commitHash: [chainhash.HashSize]byte{
|
|
|
|
0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0x2d, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
|
|
|
|
0x1f, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
|
|
|
|
},
|
2017-11-11 02:23:28 +03:00
|
|
|
chainHash: [chainhash.HashSize]byte{
|
|
|
|
0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
|
|
|
|
0xb7, 0x94, 0x39, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
|
|
|
|
0x6b, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
|
|
|
|
0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
|
|
|
|
},
|
2017-11-21 10:57:08 +03:00
|
|
|
chanPoint: breachOutPoints[1],
|
|
|
|
breachHeight: 420420,
|
2017-09-21 02:01:28 +03:00
|
|
|
// Set to breachedOutputs 1 and 2 in init()
|
|
|
|
breachedOutputs: []breachedOutput{{}, {}},
|
2017-05-07 14:09:22 +03:00
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
func init() {
|
|
|
|
// Ensure that breached outputs are initialized before starting tests.
|
|
|
|
if err := initBreachedOutputs(); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Populate a retribution map to for convenience, to allow lookups by
|
|
|
|
// channel point.
|
|
|
|
for i := range retributions {
|
|
|
|
retInfo := &retributions[i]
|
2017-09-21 02:01:28 +03:00
|
|
|
retInfo.breachedOutputs[0] = breachedOutputs[i]
|
|
|
|
retInfo.breachedOutputs[1] = breachedOutputs[i+1]
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
retributionMap[retInfo.chanPoint] = *retInfo
|
2017-09-21 02:01:28 +03:00
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// FailingRetributionStore wraps a RetributionStore and supports controlled
|
|
|
|
// restarts of the persistent instance. This allows us to test (1) that no
|
|
|
|
// modifications to the entries are made between calls or through side effects,
|
|
|
|
// and (2) that the database is actually being persisted between actions.
|
|
|
|
type FailingRetributionStore interface {
|
|
|
|
RetributionStore
|
|
|
|
|
|
|
|
Restart()
|
|
|
|
}
|
|
|
|
|
|
|
|
// failingRetributionStore is a concrete implementation of a
|
|
|
|
// FailingRetributionStore. It wraps an underlying RetributionStore and is
|
|
|
|
// parameterized entirely by a restart function, which is intended to simulate a
|
|
|
|
// full stop/start of the store.
|
|
|
|
type failingRetributionStore struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
|
|
|
|
rs RetributionStore
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
nextAddErr error
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
restart func() RetributionStore
|
|
|
|
}
|
|
|
|
|
|
|
|
// newFailingRetributionStore creates a new failing retribution store. The given
|
|
|
|
// restart closure should ensure that it is reloading its contents from the
|
|
|
|
// persistent source.
|
|
|
|
func newFailingRetributionStore(
|
|
|
|
restart func() RetributionStore) *failingRetributionStore {
|
|
|
|
|
|
|
|
return &failingRetributionStore{
|
|
|
|
mu: sync.Mutex{},
|
|
|
|
rs: restart(),
|
|
|
|
restart: restart,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
// FailNextAdd instructs the retribution store to return the provided error. If
|
|
|
|
// the error is nil, a generic default will be used.
|
|
|
|
func (frs *failingRetributionStore) FailNextAdd(err error) {
|
|
|
|
if err == nil {
|
|
|
|
err = errors.New("retribution store failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
frs.mu.Lock()
|
|
|
|
frs.nextAddErr = err
|
|
|
|
frs.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
func (frs *failingRetributionStore) Restart() {
|
|
|
|
frs.mu.Lock()
|
|
|
|
frs.rs = frs.restart()
|
|
|
|
frs.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
// Add forwards the call to the underlying retribution store, unless this Add
|
|
|
|
// has been previously instructed to fail.
|
2017-07-28 03:43:38 +03:00
|
|
|
func (frs *failingRetributionStore) Add(retInfo *retributionInfo) error {
|
|
|
|
frs.mu.Lock()
|
|
|
|
defer frs.mu.Unlock()
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
if frs.nextAddErr != nil {
|
|
|
|
err := frs.nextAddErr
|
|
|
|
frs.nextAddErr = nil
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
return frs.rs.Add(retInfo)
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
func (frs *failingRetributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, error) {
|
|
|
|
frs.mu.Lock()
|
|
|
|
defer frs.mu.Unlock()
|
|
|
|
|
|
|
|
return frs.rs.IsBreached(chanPoint)
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
func (frs *failingRetributionStore) Remove(key *wire.OutPoint) error {
|
|
|
|
frs.mu.Lock()
|
|
|
|
defer frs.mu.Unlock()
|
|
|
|
|
|
|
|
return frs.rs.Remove(key)
|
|
|
|
}
|
|
|
|
|
2020-10-20 17:18:40 +03:00
|
|
|
func (frs *failingRetributionStore) ForAll(cb func(*retributionInfo) error,
|
|
|
|
reset func()) error {
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
frs.mu.Lock()
|
|
|
|
defer frs.mu.Unlock()
|
|
|
|
|
2020-10-20 17:18:40 +03:00
|
|
|
return frs.rs.ForAll(cb, reset)
|
2017-07-28 03:43:38 +03:00
|
|
|
}
|
|
|
|
|
2017-05-07 14:09:22 +03:00
|
|
|
// Parse the pubkeys in the breached outputs.
|
|
|
|
func initBreachedOutputs() error {
|
2017-07-26 08:57:29 +03:00
|
|
|
for i := range breachedOutputs {
|
2017-05-07 14:09:22 +03:00
|
|
|
bo := &breachedOutputs[i]
|
|
|
|
|
|
|
|
// Parse the sign descriptor's pubkey.
|
|
|
|
pubkey, err := btcec.ParsePubKey(breachKeys[i], btcec.S256())
|
|
|
|
if err != nil {
|
2017-07-28 03:43:38 +03:00
|
|
|
return fmt.Errorf("unable to parse pubkey: %v",
|
|
|
|
breachKeys[i])
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
2018-02-18 02:29:01 +03:00
|
|
|
bo.signDesc.KeyDesc.PubKey = pubkey
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that breachedOutput Encode/Decode works.
|
|
|
|
func TestBreachedOutputSerialization(t *testing.T) {
|
2017-09-21 02:01:28 +03:00
|
|
|
for i := range breachedOutputs {
|
2017-05-07 14:09:22 +03:00
|
|
|
bo := &breachedOutputs[i]
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
if err := bo.Encode(&buf); err != nil {
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("unable to serialize breached output [%v]: %v",
|
|
|
|
i, err)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
desBo := &breachedOutput{}
|
|
|
|
if err := desBo.Decode(&buf); err != nil {
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("unable to deserialize "+
|
|
|
|
"breached output [%v]: %v", i, err)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(bo, desBo) {
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("original and deserialized "+
|
|
|
|
"breached outputs not equal:\n"+
|
2017-05-07 14:09:22 +03:00
|
|
|
"original : %+v\n"+
|
|
|
|
"deserialized : %+v\n",
|
|
|
|
bo, desBo)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that retribution Encode/Decode works.
|
|
|
|
func TestRetributionSerialization(t *testing.T) {
|
2017-09-21 02:01:28 +03:00
|
|
|
for i := range retributions {
|
2017-05-07 14:09:22 +03:00
|
|
|
ret := &retributions[i]
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
if err := ret.Encode(&buf); err != nil {
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("unable to serialize retribution [%v]: %v",
|
|
|
|
i, err)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
2017-07-26 06:14:03 +03:00
|
|
|
desRet := &retributionInfo{}
|
2017-05-07 14:09:22 +03:00
|
|
|
if err := desRet.Decode(&buf); err != nil {
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("unable to deserialize retribution [%v]: %v",
|
|
|
|
i, err)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(ret, desRet) {
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("original and deserialized "+
|
|
|
|
"retribution infos not equal:\n"+
|
2017-05-07 14:09:22 +03:00
|
|
|
"original : %+v\n"+
|
|
|
|
"deserialized : %+v\n",
|
|
|
|
ret, desRet)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-26 08:57:29 +03:00
|
|
|
// copyRetInfo creates a complete copy of the given retributionInfo.
|
|
|
|
func copyRetInfo(retInfo *retributionInfo) *retributionInfo {
|
2017-09-21 02:01:28 +03:00
|
|
|
nOutputs := len(retInfo.breachedOutputs)
|
2017-07-28 03:43:38 +03:00
|
|
|
|
2017-07-26 08:57:29 +03:00
|
|
|
ret := &retributionInfo{
|
2017-09-21 02:01:28 +03:00
|
|
|
commitHash: retInfo.commitHash,
|
2017-11-11 02:23:28 +03:00
|
|
|
chainHash: retInfo.chainHash,
|
2017-09-21 02:01:28 +03:00
|
|
|
chanPoint: retInfo.chanPoint,
|
2017-11-21 10:57:08 +03:00
|
|
|
breachHeight: retInfo.breachHeight,
|
2017-09-21 02:01:28 +03:00
|
|
|
breachedOutputs: make([]breachedOutput, nOutputs),
|
2017-07-26 08:57:29 +03:00
|
|
|
}
|
|
|
|
|
2017-09-21 02:01:28 +03:00
|
|
|
for i := range retInfo.breachedOutputs {
|
|
|
|
ret.breachedOutputs[i] = retInfo.breachedOutputs[i]
|
2017-07-26 08:57:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
// mockRetributionStore implements the RetributionStore interface and is backed
|
|
|
|
// by an in-memory map. Access to the internal state is provided by a mutex.
|
|
|
|
// TODO(cfromknecht) extend to support and test controlled failures.
|
|
|
|
type mockRetributionStore struct {
|
2017-11-21 10:57:08 +03:00
|
|
|
mu sync.Mutex
|
|
|
|
state map[wire.OutPoint]*retributionInfo
|
|
|
|
finalTxs map[wire.OutPoint]*wire.MsgTx
|
2017-07-26 08:57:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func newMockRetributionStore() *mockRetributionStore {
|
|
|
|
return &mockRetributionStore{
|
2017-11-21 10:57:08 +03:00
|
|
|
mu: sync.Mutex{},
|
|
|
|
state: make(map[wire.OutPoint]*retributionInfo),
|
|
|
|
finalTxs: make(map[wire.OutPoint]*wire.MsgTx),
|
2017-07-26 08:57:29 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rs *mockRetributionStore) Add(retInfo *retributionInfo) error {
|
|
|
|
rs.mu.Lock()
|
|
|
|
rs.state[retInfo.chanPoint] = copyRetInfo(retInfo)
|
|
|
|
rs.mu.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
func (rs *mockRetributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, error) {
|
|
|
|
rs.mu.Lock()
|
|
|
|
_, ok := rs.state[*chanPoint]
|
|
|
|
rs.mu.Unlock()
|
|
|
|
|
|
|
|
return ok, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rs *mockRetributionStore) Finalize(chanPoint *wire.OutPoint,
|
|
|
|
finalTx *wire.MsgTx) error {
|
|
|
|
|
|
|
|
rs.mu.Lock()
|
|
|
|
rs.finalTxs[*chanPoint] = finalTx
|
|
|
|
rs.mu.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rs *mockRetributionStore) GetFinalizedTxn(
|
|
|
|
chanPoint *wire.OutPoint) (*wire.MsgTx, error) {
|
|
|
|
|
|
|
|
rs.mu.Lock()
|
|
|
|
finalTx := rs.finalTxs[*chanPoint]
|
|
|
|
rs.mu.Unlock()
|
|
|
|
|
|
|
|
return finalTx, nil
|
|
|
|
}
|
|
|
|
|
2017-07-26 08:57:29 +03:00
|
|
|
func (rs *mockRetributionStore) Remove(key *wire.OutPoint) error {
|
|
|
|
rs.mu.Lock()
|
|
|
|
delete(rs.state, *key)
|
2017-11-21 10:57:08 +03:00
|
|
|
delete(rs.finalTxs, *key)
|
2017-07-26 08:57:29 +03:00
|
|
|
rs.mu.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-10-20 17:18:40 +03:00
|
|
|
func (rs *mockRetributionStore) ForAll(cb func(*retributionInfo) error,
|
|
|
|
reset func()) error {
|
|
|
|
|
2017-07-26 08:57:29 +03:00
|
|
|
rs.mu.Lock()
|
|
|
|
defer rs.mu.Unlock()
|
2017-05-07 14:09:22 +03:00
|
|
|
|
2020-10-20 17:18:40 +03:00
|
|
|
reset()
|
2017-07-26 08:57:29 +03:00
|
|
|
for _, retInfo := range rs.state {
|
|
|
|
if err := cb(copyRetInfo(retInfo)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
var retributionStoreTestSuite = []struct {
|
|
|
|
name string
|
|
|
|
test func(FailingRetributionStore, *testing.T)
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
"Initialization",
|
|
|
|
testRetributionStoreInit,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"Add/Remove",
|
|
|
|
testRetributionStoreAddRemove,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"Persistence",
|
|
|
|
testRetributionStorePersistence,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"Overwrite",
|
|
|
|
testRetributionStoreOverwrite,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"RemoveEmpty",
|
|
|
|
testRetributionStoreRemoveEmpty,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-07-26 08:57:29 +03:00
|
|
|
// TestMockRetributionStore instantiates a mockRetributionStore and tests its
|
|
|
|
// behavior using the general RetributionStore test suite.
|
|
|
|
func TestMockRetributionStore(t *testing.T) {
|
2017-07-28 03:43:38 +03:00
|
|
|
for _, test := range retributionStoreTestSuite {
|
|
|
|
t.Run(
|
|
|
|
"mockRetributionStore."+test.name,
|
|
|
|
func(tt *testing.T) {
|
|
|
|
mrs := newMockRetributionStore()
|
|
|
|
frs := newFailingRetributionStore(
|
|
|
|
func() RetributionStore { return mrs },
|
|
|
|
)
|
|
|
|
test.test(frs, tt)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
2017-07-26 08:57:29 +03:00
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
func makeTestChannelDB() (*channeldb.DB, func(), error) {
|
2017-05-07 14:09:22 +03:00
|
|
|
// First, create a temporary directory to be used for the duration of
|
|
|
|
// this test.
|
|
|
|
tempDirName, err := ioutil.TempDir("", "channeldb")
|
|
|
|
if err != nil {
|
2017-11-21 10:57:08 +03:00
|
|
|
return nil, nil, err
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
cleanUp := func() {
|
|
|
|
os.RemoveAll(tempDirName)
|
|
|
|
}
|
2017-07-28 03:43:38 +03:00
|
|
|
|
2017-07-26 08:57:29 +03:00
|
|
|
db, err := channeldb.Open(tempDirName)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
cleanUp()
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return db, cleanUp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestChannelDBRetributionStore instantiates a retributionStore backed by a
|
|
|
|
// channeldb.DB, and tests its behavior using the general RetributionStore test
|
|
|
|
// suite.
|
|
|
|
func TestChannelDBRetributionStore(t *testing.T) {
|
2017-07-28 03:43:38 +03:00
|
|
|
// Finally, instantiate retribution store and execute RetributionStore
|
|
|
|
// test suite.
|
|
|
|
for _, test := range retributionStoreTestSuite {
|
|
|
|
t.Run(
|
|
|
|
"channeldbDBRetributionStore."+test.name,
|
|
|
|
func(tt *testing.T) {
|
2018-11-30 07:04:21 +03:00
|
|
|
db, cleanUp, err := makeTestChannelDB()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to open channeldb: %v", err)
|
|
|
|
}
|
|
|
|
defer db.Close()
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
restartDb := func() RetributionStore {
|
|
|
|
// Close and reopen channeldb
|
|
|
|
if err = db.Close(); err != nil {
|
|
|
|
t.Fatalf("unable to close "+
|
|
|
|
"channeldb during "+
|
|
|
|
"restart: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
db, err = channeldb.Open(db.Path())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to open "+
|
|
|
|
"channeldb: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return newRetributionStore(db)
|
2017-07-28 03:43:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
frs := newFailingRetributionStore(restartDb)
|
|
|
|
test.test(frs, tt)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
// countRetributions uses a retribution store's ForAll to count the number of
|
|
|
|
// elements emitted from the store.
|
2017-07-26 08:57:29 +03:00
|
|
|
func countRetributions(t *testing.T, rs RetributionStore) int {
|
2017-05-07 14:09:22 +03:00
|
|
|
count := 0
|
2017-07-26 06:14:03 +03:00
|
|
|
err := rs.ForAll(func(_ *retributionInfo) error {
|
2017-05-08 00:58:53 +03:00
|
|
|
count++
|
2017-05-07 14:09:22 +03:00
|
|
|
return nil
|
2020-10-20 17:18:40 +03:00
|
|
|
}, func() {
|
|
|
|
count = 0
|
2017-05-07 14:09:22 +03:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to list retributions in db: %v", err)
|
|
|
|
}
|
|
|
|
return count
|
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreAddRemove executes a generic test suite for any concrete
|
|
|
|
// implementation of the RetributionStore interface. This test adds all
|
|
|
|
// retributions to the store, confirms that they are all present, and then
|
|
|
|
// removes each one individually. Between each addition or removal, the number
|
|
|
|
// of elements in the store is checked to ensure that it only changes by one.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreAddRemove(frs FailingRetributionStore, t *testing.T) {
|
2018-02-07 06:11:11 +03:00
|
|
|
// Make sure that a new retribution store is actually empty.
|
2017-07-28 03:43:38 +03:00
|
|
|
if count := countRetributions(t, frs); count != 0 {
|
2017-05-07 14:09:22 +03:00
|
|
|
t.Fatalf("expected 0 retributions, found %v", count)
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
// Add all retributions, check that ForAll returns the correct
|
|
|
|
// information, and then remove all retributions.
|
|
|
|
testRetributionStoreAdds(frs, t, false)
|
|
|
|
testRetributionStoreForAll(frs, t, false)
|
|
|
|
testRetributionStoreRemoves(frs, t, false)
|
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStorePersistence executes the same general test as
|
|
|
|
// testRetributionStoreAddRemove, except that it also restarts the store between
|
|
|
|
// each operation to ensure that the results are properly persisted.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStorePersistence(frs FailingRetributionStore, t *testing.T) {
|
2018-02-07 06:11:11 +03:00
|
|
|
// Make sure that a new retribution store is still empty after failing
|
2017-07-28 03:43:38 +03:00
|
|
|
// right off the bat.
|
|
|
|
frs.Restart()
|
|
|
|
if count := countRetributions(t, frs); count != 0 {
|
2017-07-26 08:57:29 +03:00
|
|
|
t.Fatalf("expected 1 retributions, found %v", count)
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
// Insert all retributions into the database, restarting and checking
|
|
|
|
// between subsequent calls to test that each intermediate additions are
|
|
|
|
// persisted.
|
|
|
|
testRetributionStoreAdds(frs, t, true)
|
|
|
|
|
|
|
|
// After all retributions have been inserted, verify that the store
|
|
|
|
// emits a distinct set of retributions that are equivalent to the test
|
|
|
|
// vector.
|
|
|
|
testRetributionStoreForAll(frs, t, true)
|
|
|
|
|
|
|
|
// Remove all retributions from the database, restarting and checking
|
|
|
|
// between subsequent calls to test that each intermediate removals are
|
|
|
|
// persisted.
|
|
|
|
testRetributionStoreRemoves(frs, t, true)
|
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreInit ensures that a retribution store is always
|
|
|
|
// initialized with no retributions.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreInit(frs FailingRetributionStore, t *testing.T) {
|
|
|
|
// Make sure that a new retribution store starts empty.
|
|
|
|
if count := countRetributions(t, frs); count != 0 {
|
|
|
|
t.Fatalf("expected 0 retributions, found %v", count)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
2017-07-28 03:43:38 +03:00
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreRemoveEmpty ensures that a retribution store will not
|
|
|
|
// fail or panic if it is instructed to remove an entry while empty.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreRemoveEmpty(frs FailingRetributionStore, t *testing.T) {
|
|
|
|
testRetributionStoreRemoves(frs, t, false)
|
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreOverwrite ensures that attempts to write retribution
|
|
|
|
// information regarding a channel point that already exists does not change the
|
|
|
|
// total number of entries held by the retribution store.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreOverwrite(frs FailingRetributionStore, t *testing.T) {
|
|
|
|
// Initially, add all retributions to store.
|
|
|
|
testRetributionStoreAdds(frs, t, false)
|
|
|
|
|
|
|
|
// Overwrite the initial entries again.
|
|
|
|
for i, retInfo := range retributions {
|
|
|
|
if err := frs.Add(&retInfo); err != nil {
|
|
|
|
t.Fatalf("unable to add to retribution %v to store: %v",
|
|
|
|
i, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that retribution store still has 2 entries.
|
|
|
|
if count := countRetributions(t, frs); count != 2 {
|
2017-05-07 14:09:22 +03:00
|
|
|
t.Fatalf("expected 2 retributions, found %v", count)
|
|
|
|
}
|
2017-07-28 03:43:38 +03:00
|
|
|
}
|
2017-05-07 14:09:22 +03:00
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreAdds adds all of the test retributions to the database,
|
|
|
|
// ensuring that the total number of elements increases by exactly 1 after each
|
|
|
|
// operation. If the `failing` flag is provide, the test will restart the
|
|
|
|
// database and confirm that the delta is still 1.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreAdds(
|
|
|
|
frs FailingRetributionStore,
|
|
|
|
t *testing.T,
|
|
|
|
failing bool) {
|
|
|
|
|
|
|
|
// Iterate over retributions, adding each from the store. If we are
|
|
|
|
// testing the store under failures, we restart the store and verify
|
|
|
|
// that the contents are the same.
|
|
|
|
for i, retInfo := range retributions {
|
2017-12-18 05:40:05 +03:00
|
|
|
// Snapshot number of entries before and after the addition.
|
2017-07-28 03:43:38 +03:00
|
|
|
nbefore := countRetributions(t, frs)
|
|
|
|
if err := frs.Add(&retInfo); err != nil {
|
|
|
|
t.Fatalf("unable to add to retribution %v to store: %v",
|
|
|
|
i, err)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
2017-07-28 03:43:38 +03:00
|
|
|
nafter := countRetributions(t, frs)
|
2017-05-07 14:09:22 +03:00
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
// Check that only one retribution was added.
|
|
|
|
if nafter-nbefore != 1 {
|
|
|
|
t.Fatalf("expected %v retributions, found %v",
|
|
|
|
nbefore+1, nafter)
|
|
|
|
}
|
|
|
|
|
|
|
|
if failing {
|
|
|
|
frs.Restart()
|
|
|
|
|
|
|
|
// Check that retribution store has persisted addition
|
|
|
|
// after restarting.
|
|
|
|
nrestart := countRetributions(t, frs)
|
|
|
|
if nrestart-nbefore != 1 {
|
|
|
|
t.Fatalf("expected %v retributions, found %v",
|
|
|
|
nbefore+1, nrestart)
|
|
|
|
}
|
|
|
|
}
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
2017-07-28 03:43:38 +03:00
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreRemoves removes all of the test retributions to the
|
|
|
|
// database, ensuring that the total number of elements decreases by exactly 1
|
|
|
|
// after each operation. If the `failing` flag is provide, the test will
|
|
|
|
// restart the database and confirm that the delta is the same.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreRemoves(
|
|
|
|
frs FailingRetributionStore,
|
|
|
|
t *testing.T,
|
|
|
|
failing bool) {
|
|
|
|
|
|
|
|
// Iterate over retributions, removing each from the store. If we are
|
|
|
|
// testing the store under failures, we restart the store and verify
|
|
|
|
// that the contents are the same.
|
|
|
|
for i, retInfo := range retributions {
|
2017-12-18 05:40:05 +03:00
|
|
|
// Snapshot number of entries before and after the removal.
|
2017-07-28 03:43:38 +03:00
|
|
|
nbefore := countRetributions(t, frs)
|
2018-11-30 07:04:21 +03:00
|
|
|
err := frs.Remove(&retInfo.chanPoint)
|
|
|
|
switch {
|
|
|
|
case nbefore == 0 && err == nil:
|
|
|
|
|
|
|
|
case nbefore > 0 && err != nil:
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("unable to remove to retribution %v "+
|
|
|
|
"from store: %v", i, err)
|
|
|
|
}
|
|
|
|
nafter := countRetributions(t, frs)
|
|
|
|
|
|
|
|
// If the store is empty, increment nbefore to simulate the
|
|
|
|
// removal of one element.
|
|
|
|
if nbefore == 0 {
|
|
|
|
nbefore++
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that only one retribution was removed.
|
|
|
|
if nbefore-nafter != 1 {
|
|
|
|
t.Fatalf("expected %v retributions, found %v",
|
|
|
|
nbefore-1, nafter)
|
|
|
|
}
|
|
|
|
|
|
|
|
if failing {
|
|
|
|
frs.Restart()
|
|
|
|
|
|
|
|
// Check that retribution store has persisted removal
|
|
|
|
// after restarting.
|
|
|
|
nrestart := countRetributions(t, frs)
|
|
|
|
if nbefore-nrestart != 1 {
|
|
|
|
t.Fatalf("expected %v retributions, found %v",
|
|
|
|
nbefore-1, nrestart)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreForAll iterates over the current entries in the
|
|
|
|
// retribution store, ensuring that each entry in the database is unique, and
|
|
|
|
// corresponds to exactly one of the entries in the test vector. If the
|
|
|
|
// `failing` flag is provide, the test will restart the database and confirm
|
|
|
|
// that the entries again validate against the test vectors.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreForAll(
|
|
|
|
frs FailingRetributionStore,
|
|
|
|
t *testing.T,
|
|
|
|
failing bool) {
|
|
|
|
|
|
|
|
// nrets is the number of retributions in the test vector
|
|
|
|
nrets := len(retributions)
|
|
|
|
|
|
|
|
// isRestart indicates whether or not the database has been restarted.
|
|
|
|
// When testing for failures, this allows the test case to make a second
|
|
|
|
// attempt without causing a subsequent restart on the second pass.
|
|
|
|
var isRestart bool
|
|
|
|
|
|
|
|
restartCheck:
|
2018-02-07 06:11:11 +03:00
|
|
|
// Construct a set of all channel points presented by the store. Entries
|
2017-07-28 03:43:38 +03:00
|
|
|
// are only be added to the set if their corresponding retribution
|
2018-02-07 06:11:11 +03:00
|
|
|
// information matches the test vector.
|
2020-10-20 17:18:40 +03:00
|
|
|
var foundSet map[wire.OutPoint]struct{}
|
2017-07-28 03:43:38 +03:00
|
|
|
|
|
|
|
// Iterate through the stored retributions, checking to see if we have
|
|
|
|
// an equivalent retribution in the test vector. This will return an
|
|
|
|
// error unless all persisted retributions exist in the test vector.
|
|
|
|
if err := frs.ForAll(func(ret *retributionInfo) error {
|
|
|
|
// Fetch the retribution information from the test vector. If
|
|
|
|
// the entry does not exist, the test returns an error.
|
|
|
|
if exRetInfo, ok := retributionMap[ret.chanPoint]; ok {
|
|
|
|
// Compare the presented retribution information with
|
|
|
|
// the expected value, fail if they are inconsistent.
|
|
|
|
if !reflect.DeepEqual(ret, &exRetInfo) {
|
|
|
|
return fmt.Errorf("unexpected retribution "+
|
|
|
|
"retrieved from db --\n"+
|
|
|
|
"want: %#v\ngot: %#v", exRetInfo, ret,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retribution information from database matches the
|
|
|
|
// test vector, record the channel point in the found
|
|
|
|
// map.
|
|
|
|
foundSet[ret.chanPoint] = struct{}{}
|
|
|
|
|
|
|
|
} else {
|
2018-02-07 06:11:11 +03:00
|
|
|
return fmt.Errorf("unknown retribution retrieved "+
|
2017-08-22 02:58:21 +03:00
|
|
|
"from db: %v", ret)
|
2017-07-28 03:43:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2020-10-20 17:18:40 +03:00
|
|
|
}, func() {
|
|
|
|
foundSet = make(map[wire.OutPoint]struct{})
|
2017-07-28 03:43:38 +03:00
|
|
|
}); err != nil {
|
|
|
|
t.Fatalf("failed to iterate over persistent retributions: %v",
|
|
|
|
err)
|
2017-07-26 08:57:29 +03:00
|
|
|
}
|
|
|
|
|
2017-12-18 05:40:05 +03:00
|
|
|
// Check that retribution store emits nrets entries
|
2017-07-28 03:43:38 +03:00
|
|
|
if count := countRetributions(t, frs); count != nrets {
|
|
|
|
t.Fatalf("expected %v retributions, found %v", nrets, count)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
// Confirm that all of the retributions emitted from the iteration
|
|
|
|
// correspond to unique channel points.
|
|
|
|
nunique := len(foundSet)
|
|
|
|
if nunique != nrets {
|
|
|
|
t.Fatalf("expected %v unique retributions, only found %v",
|
|
|
|
nrets, nunique)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If in failure mode on only on first pass, restart the database and
|
|
|
|
// rexecute the test.
|
|
|
|
if failing && !isRestart {
|
|
|
|
frs.Restart()
|
|
|
|
isRestart = true
|
|
|
|
|
|
|
|
goto restartCheck
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
}
|
2017-11-21 10:57:08 +03:00
|
|
|
|
2018-05-31 13:41:51 +03:00
|
|
|
func initBreachedState(t *testing.T) (*breachArbiter,
|
|
|
|
*lnwallet.LightningChannel, *lnwallet.LightningChannel,
|
|
|
|
*lnwallet.LocalForceCloseSummary, chan *ContractBreachEvent,
|
|
|
|
func(), func()) {
|
2017-11-21 10:57:08 +03:00
|
|
|
// Create a pair of channels using a notifier that allows us to signal
|
|
|
|
// a spend of the funding transaction. Alice's channel will be the on
|
|
|
|
// observing a breach.
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
alice, bob, cleanUpChans, err := createInitChannels(1)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test channels: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Instantiate a breach arbiter to handle the breach of alice's channel.
|
2018-04-19 12:09:25 +03:00
|
|
|
contractBreaches := make(chan *ContractBreachEvent)
|
|
|
|
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
brar, cleanUpArb, err := createTestArbiter(
|
2018-04-19 12:09:25 +03:00
|
|
|
t, contractBreaches, alice.State().Db,
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to initialize test breach arbiter: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send one HTLC to Bob and perform a state transition to lock it in.
|
|
|
|
htlcAmount := lnwire.NewMSatFromSatoshis(20000)
|
|
|
|
htlc, _ := createHTLC(0, htlcAmount)
|
2018-02-24 09:42:16 +03:00
|
|
|
if _, err := alice.AddHTLC(htlc, nil); err != nil {
|
2017-11-21 10:57:08 +03:00
|
|
|
t.Fatalf("alice unable to add htlc: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := bob.ReceiveHTLC(htlc); err != nil {
|
|
|
|
t.Fatalf("bob unable to recv add htlc: %v", err)
|
|
|
|
}
|
|
|
|
if err := forceStateTransition(alice, bob); err != nil {
|
|
|
|
t.Fatalf("Can't update the channel state: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate the force close summary at this point in time, this will
|
|
|
|
// serve as the old state bob will broadcast.
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
bobClose, err := bob.ForceClose()
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to force close bob's channel: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now send another HTLC and perform a state transition, this ensures
|
|
|
|
// Alice is ahead of the state Bob will broadcast.
|
|
|
|
htlc2, _ := createHTLC(1, htlcAmount)
|
2018-02-24 09:42:16 +03:00
|
|
|
if _, err := alice.AddHTLC(htlc2, nil); err != nil {
|
2017-11-21 10:57:08 +03:00
|
|
|
t.Fatalf("alice unable to add htlc: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := bob.ReceiveHTLC(htlc2); err != nil {
|
|
|
|
t.Fatalf("bob unable to recv add htlc: %v", err)
|
|
|
|
}
|
|
|
|
if err := forceStateTransition(alice, bob); err != nil {
|
|
|
|
t.Fatalf("Can't update the channel state: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-05-31 13:41:51 +03:00
|
|
|
return brar, alice, bob, bobClose, contractBreaches, cleanUpChans,
|
|
|
|
cleanUpArb
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestBreachHandoffSuccess tests that a channel's close observer properly
|
|
|
|
// delivers retribution information to the breach arbiter in response to a
|
|
|
|
// breach close. This test verifies correctness in the event that the handoff
|
|
|
|
// experiences no interruptions.
|
|
|
|
func TestBreachHandoffSuccess(t *testing.T) {
|
|
|
|
brar, alice, _, bobClose, contractBreaches,
|
|
|
|
cleanUpChans, cleanUpArb := initBreachedState(t)
|
|
|
|
defer cleanUpChans()
|
|
|
|
defer cleanUpArb()
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
chanPoint := alice.ChanPoint
|
|
|
|
|
|
|
|
// Signal a spend of the funding transaction and wait for the close
|
|
|
|
// observer to exit.
|
2021-04-21 13:51:04 +03:00
|
|
|
processACK := make(chan error)
|
2018-04-19 12:09:25 +03:00
|
|
|
breach := &ContractBreachEvent{
|
2021-04-21 13:51:04 +03:00
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ProcessACK: func(brarErr error) {
|
|
|
|
processACK <- brarErr
|
|
|
|
},
|
2018-04-19 12:09:25 +03:00
|
|
|
BreachRetribution: &lnwallet.BreachRetribution{
|
|
|
|
BreachTransaction: bobClose.CloseTx,
|
2019-01-16 17:47:43 +03:00
|
|
|
LocalOutputSignDesc: &input.SignDescriptor{
|
2018-05-31 08:16:57 +03:00
|
|
|
Output: &wire.TxOut{
|
|
|
|
PkScript: breachKeys[0],
|
|
|
|
},
|
|
|
|
},
|
2018-04-19 12:09:25 +03:00
|
|
|
},
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
}
|
2018-04-19 12:09:25 +03:00
|
|
|
contractBreaches <- breach
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
|
|
|
|
// We'll also wait to consume the ACK back from the breach arbiter.
|
|
|
|
select {
|
2021-04-21 13:51:04 +03:00
|
|
|
case err := <-processACK:
|
2018-04-19 12:09:25 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("handoff failed: %v", err)
|
|
|
|
}
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("breach arbiter didn't send ack back")
|
|
|
|
}
|
2017-11-21 10:57:08 +03:00
|
|
|
|
|
|
|
// After exiting, the breach arbiter should have persisted the
|
|
|
|
// retribution information and the channel should be shown as pending
|
|
|
|
// force closed.
|
|
|
|
assertArbiterBreach(t, brar, chanPoint)
|
2018-04-26 15:19:57 +03:00
|
|
|
|
|
|
|
// Send another breach event. Since the handoff for this channel was
|
|
|
|
// already ACKed, the breach arbiter should immediately ACK and ignore
|
|
|
|
// this event.
|
|
|
|
breach = &ContractBreachEvent{
|
2021-04-21 13:51:04 +03:00
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ProcessACK: func(brarErr error) {
|
|
|
|
processACK <- brarErr
|
|
|
|
},
|
2018-04-26 15:19:57 +03:00
|
|
|
BreachRetribution: &lnwallet.BreachRetribution{
|
|
|
|
BreachTransaction: bobClose.CloseTx,
|
2019-01-16 17:47:43 +03:00
|
|
|
LocalOutputSignDesc: &input.SignDescriptor{
|
2018-05-31 08:16:57 +03:00
|
|
|
Output: &wire.TxOut{
|
|
|
|
PkScript: breachKeys[0],
|
|
|
|
},
|
|
|
|
},
|
2018-04-26 15:19:57 +03:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
contractBreaches <- breach
|
|
|
|
|
|
|
|
// We'll also wait to consume the ACK back from the breach arbiter.
|
|
|
|
select {
|
2021-04-21 13:51:04 +03:00
|
|
|
case err := <-processACK:
|
2018-04-26 15:19:57 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("handoff failed: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("breach arbiter didn't send ack back")
|
|
|
|
}
|
|
|
|
|
|
|
|
// State should not have changed.
|
|
|
|
assertArbiterBreach(t, brar, chanPoint)
|
2017-11-21 10:57:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestBreachHandoffFail tests that a channel's close observer properly
|
|
|
|
// delivers retribution information to the breach arbiter in response to a
|
|
|
|
// breach close. This test verifies correctness in the event that the breach
|
|
|
|
// arbiter fails to write the information to disk, and that a subsequent attempt
|
|
|
|
// at the handoff succeeds.
|
|
|
|
func TestBreachHandoffFail(t *testing.T) {
|
2018-05-31 13:41:51 +03:00
|
|
|
brar, alice, _, bobClose, contractBreaches,
|
|
|
|
cleanUpChans, cleanUpArb := initBreachedState(t)
|
2017-11-21 10:57:08 +03:00
|
|
|
defer cleanUpChans()
|
|
|
|
defer cleanUpArb()
|
|
|
|
|
|
|
|
// Before alerting Alice of the breach, instruct our failing retribution
|
|
|
|
// store to fail the next database operation, which we expect to write
|
|
|
|
// the information handed off by the channel's close observer.
|
|
|
|
fstore := brar.cfg.Store.(*failingRetributionStore)
|
|
|
|
fstore.FailNextAdd(nil)
|
|
|
|
|
|
|
|
// Signal the notifier to dispatch spend notifications of the funding
|
|
|
|
// transaction using the transaction from bob's closing summary.
|
|
|
|
chanPoint := alice.ChanPoint
|
2021-04-21 13:51:04 +03:00
|
|
|
processACK := make(chan error)
|
2018-04-19 12:09:25 +03:00
|
|
|
breach := &ContractBreachEvent{
|
2021-04-21 13:51:04 +03:00
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ProcessACK: func(brarErr error) {
|
|
|
|
processACK <- brarErr
|
|
|
|
},
|
2018-04-19 12:09:25 +03:00
|
|
|
BreachRetribution: &lnwallet.BreachRetribution{
|
|
|
|
BreachTransaction: bobClose.CloseTx,
|
2019-01-16 17:47:43 +03:00
|
|
|
LocalOutputSignDesc: &input.SignDescriptor{
|
2018-05-31 08:16:57 +03:00
|
|
|
Output: &wire.TxOut{
|
|
|
|
PkScript: breachKeys[0],
|
|
|
|
},
|
|
|
|
},
|
2018-04-19 12:09:25 +03:00
|
|
|
},
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
}
|
2018-04-19 12:09:25 +03:00
|
|
|
contractBreaches <- breach
|
|
|
|
|
|
|
|
// We'll also wait to consume the ACK back from the breach arbiter.
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
select {
|
2021-04-21 13:51:04 +03:00
|
|
|
case err := <-processACK:
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("breach write should have failed")
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("breach arbiter didn't send ack back")
|
|
|
|
}
|
2017-11-21 10:57:08 +03:00
|
|
|
|
|
|
|
// Since the handoff failed, the breach arbiter should not show the
|
|
|
|
// channel as breached, and the channel should also not have been marked
|
|
|
|
// pending closed.
|
|
|
|
assertNoArbiterBreach(t, brar, chanPoint)
|
|
|
|
assertNotPendingClosed(t, alice)
|
|
|
|
|
2018-05-31 13:41:51 +03:00
|
|
|
brar, cleanUpArb, err := createTestArbiter(
|
2018-04-19 12:09:25 +03:00
|
|
|
t, contractBreaches, alice.State().Db,
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to initialize test breach arbiter: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUpArb()
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
// Signal a spend of the funding transaction and wait for the close
|
|
|
|
// observer to exit. This time we are allowing the handoff to succeed.
|
2018-04-19 12:09:25 +03:00
|
|
|
breach = &ContractBreachEvent{
|
2021-04-21 13:51:04 +03:00
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ProcessACK: func(brarErr error) {
|
|
|
|
processACK <- brarErr
|
|
|
|
},
|
2018-04-19 12:09:25 +03:00
|
|
|
BreachRetribution: &lnwallet.BreachRetribution{
|
|
|
|
BreachTransaction: bobClose.CloseTx,
|
2019-01-16 17:47:43 +03:00
|
|
|
LocalOutputSignDesc: &input.SignDescriptor{
|
2018-05-31 08:16:57 +03:00
|
|
|
Output: &wire.TxOut{
|
|
|
|
PkScript: breachKeys[0],
|
|
|
|
},
|
|
|
|
},
|
2018-04-19 12:09:25 +03:00
|
|
|
},
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
}
|
2018-04-19 12:09:25 +03:00
|
|
|
|
|
|
|
contractBreaches <- breach
|
|
|
|
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
select {
|
2021-04-21 13:51:04 +03:00
|
|
|
case err := <-processACK:
|
2018-04-19 12:09:25 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("handoff failed: %v", err)
|
|
|
|
}
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("breach arbiter didn't send ack back")
|
|
|
|
}
|
2017-11-21 10:57:08 +03:00
|
|
|
|
|
|
|
// Check that the breach was properly recorded in the breach arbiter,
|
|
|
|
// and that the close observer marked the channel as pending closed
|
|
|
|
// before exiting.
|
|
|
|
assertArbiterBreach(t, brar, chanPoint)
|
|
|
|
}
|
|
|
|
|
2021-02-12 14:36:45 +03:00
|
|
|
// TestBreachCreateJusticeTx tests that we create three different variants of
|
|
|
|
// the justice tx.
|
|
|
|
func TestBreachCreateJusticeTx(t *testing.T) {
|
|
|
|
brar, _, _, _, _, cleanUpChans, cleanUpArb := initBreachedState(t)
|
|
|
|
defer cleanUpChans()
|
|
|
|
defer cleanUpArb()
|
|
|
|
|
|
|
|
// In this test we just want to check that the correct inputs are added
|
|
|
|
// to the justice tx, not that we create a valid spend, so we just set
|
|
|
|
// some params making the script generation succeed.
|
|
|
|
aliceKeyPriv, _ := btcec.PrivKeyFromBytes(
|
|
|
|
btcec.S256(), channels.AlicesPrivKey,
|
|
|
|
)
|
|
|
|
alicePubKey := aliceKeyPriv.PubKey()
|
|
|
|
|
|
|
|
signDesc := &breachedOutputs[0].signDesc
|
|
|
|
signDesc.KeyDesc.PubKey = alicePubKey
|
|
|
|
signDesc.DoubleTweak = aliceKeyPriv
|
|
|
|
|
|
|
|
// We'll test all the different types of outputs we'll sweep with the
|
|
|
|
// justice tx.
|
|
|
|
outputTypes := []input.StandardWitnessType{
|
|
|
|
input.CommitmentNoDelay,
|
|
|
|
input.CommitSpendNoDelayTweakless,
|
|
|
|
input.CommitmentToRemoteConfirmed,
|
|
|
|
input.CommitmentRevoke,
|
|
|
|
input.HtlcAcceptedRevoke,
|
|
|
|
input.HtlcOfferedRevoke,
|
|
|
|
input.HtlcSecondLevelRevoke,
|
|
|
|
}
|
|
|
|
|
|
|
|
breachedOutputs := make([]breachedOutput, len(outputTypes))
|
|
|
|
for i, wt := range outputTypes {
|
|
|
|
// Create a fake breached output for each type, ensuring they
|
|
|
|
// have different outpoints for our logic to accept them.
|
|
|
|
op := breachedOutputs[0].outpoint
|
|
|
|
op.Index = uint32(i)
|
|
|
|
breachedOutputs[i] = makeBreachedOutput(
|
|
|
|
&op,
|
|
|
|
wt,
|
|
|
|
// Second level scripts doesn't matter in this test.
|
|
|
|
nil,
|
|
|
|
signDesc,
|
|
|
|
1,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create the justice transactions.
|
|
|
|
justiceTxs, err := brar.createJusticeTx(breachedOutputs)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, justiceTxs)
|
|
|
|
|
|
|
|
// The spendAll tx should be spending all the outputs. This is the
|
|
|
|
// "regular" justice transaction type.
|
|
|
|
require.Len(t, justiceTxs.spendAll.TxIn, len(breachedOutputs))
|
|
|
|
|
|
|
|
// The spendCommitOuts tx should be spending the 4 typed of commit outs
|
|
|
|
// (note that in practice there will be at most two commit outputs per
|
|
|
|
// commmit, but we test all 4 types here).
|
|
|
|
require.Len(t, justiceTxs.spendCommitOuts.TxIn, 4)
|
|
|
|
|
|
|
|
// Finally check that the spendHTLCs tx are spending the two revoked
|
|
|
|
// HTLC types, and the second level type.
|
|
|
|
require.Len(t, justiceTxs.spendHTLCs.TxIn, 3)
|
|
|
|
}
|
|
|
|
|
2021-04-20 15:54:31 +03:00
|
|
|
type publAssertion func(*testing.T, map[wire.OutPoint]struct{},
|
2021-04-20 16:42:23 +03:00
|
|
|
chan *wire.MsgTx, chainhash.Hash) *wire.MsgTx
|
2019-03-20 05:22:59 +03:00
|
|
|
|
|
|
|
type breachTest struct {
|
|
|
|
name string
|
|
|
|
|
|
|
|
// spend2ndLevel requests that second level htlcs be spent *again*, as
|
|
|
|
// if by a remote party or watchtower. The outpoint of the second level
|
|
|
|
// htlc is in effect "readded" to the set of inputs.
|
|
|
|
spend2ndLevel bool
|
|
|
|
|
2021-04-20 09:46:23 +03:00
|
|
|
// sweepHtlc tests that the HTLC output is swept using the revocation
|
|
|
|
// path in a separate tx.
|
|
|
|
sweepHtlc bool
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
// sendFinalConf informs the test to send a confirmation for the justice
|
|
|
|
// transaction before asserting the arbiter is cleaned up.
|
|
|
|
sendFinalConf bool
|
|
|
|
|
|
|
|
// whenNonZeroInputs is called after spending an input but there are
|
|
|
|
// further inputs to spend in the test.
|
|
|
|
whenNonZeroInputs publAssertion
|
|
|
|
|
|
|
|
// whenZeroInputs is called after spending an input but there are no
|
|
|
|
// further inputs to spend in the test.
|
|
|
|
whenZeroInputs publAssertion
|
|
|
|
}
|
|
|
|
|
2021-04-20 10:24:10 +03:00
|
|
|
type spendTxs struct {
|
|
|
|
commitSpendTx *wire.MsgTx
|
|
|
|
htlc2ndLevlTx *wire.MsgTx
|
|
|
|
htlc2ndLevlSpend *wire.MsgTx
|
2021-04-20 09:46:23 +03:00
|
|
|
htlcSweep *wire.MsgTx
|
2021-04-20 10:24:10 +03:00
|
|
|
}
|
|
|
|
|
2021-04-20 09:46:23 +03:00
|
|
|
func getSpendTransactions(signer input.Signer, chanPoint *wire.OutPoint,
|
|
|
|
retribution *lnwallet.BreachRetribution) (*spendTxs, error) {
|
2021-04-20 10:24:10 +03:00
|
|
|
|
|
|
|
localOutpoint := retribution.LocalOutpoint
|
|
|
|
remoteOutpoint := retribution.RemoteOutpoint
|
|
|
|
htlcOutpoint := retribution.HtlcRetributions[0].OutPoint
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
// commitSpendTx is used to spend commitment outputs.
|
2021-04-20 10:24:10 +03:00
|
|
|
commitSpendTx := &wire.MsgTx{
|
|
|
|
TxIn: []*wire.TxIn{
|
|
|
|
{
|
|
|
|
PreviousOutPoint: localOutpoint,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
PreviousOutPoint: remoteOutpoint,
|
|
|
|
},
|
|
|
|
},
|
2019-03-20 05:22:59 +03:00
|
|
|
TxOut: []*wire.TxOut{
|
|
|
|
{Value: 500000000},
|
|
|
|
},
|
|
|
|
}
|
2021-04-20 10:24:10 +03:00
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
// htlc2ndLevlTx is used to transition an htlc output on the commitment
|
|
|
|
// transaction to a second level htlc.
|
2021-04-20 10:24:10 +03:00
|
|
|
htlc2ndLevlTx := &wire.MsgTx{
|
|
|
|
TxIn: []*wire.TxIn{
|
|
|
|
{
|
|
|
|
PreviousOutPoint: htlcOutpoint,
|
|
|
|
},
|
|
|
|
},
|
2019-03-20 05:22:59 +03:00
|
|
|
TxOut: []*wire.TxOut{
|
|
|
|
{Value: 20000},
|
|
|
|
},
|
|
|
|
}
|
2021-04-20 10:24:10 +03:00
|
|
|
|
|
|
|
secondLvlOp := wire.OutPoint{
|
|
|
|
Hash: htlc2ndLevlTx.TxHash(),
|
|
|
|
Index: 0,
|
|
|
|
}
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
// htlcSpendTx is used to spend from a second level htlc.
|
2021-04-20 10:24:10 +03:00
|
|
|
htlcSpendTx := &wire.MsgTx{
|
|
|
|
TxIn: []*wire.TxIn{
|
|
|
|
{
|
|
|
|
PreviousOutPoint: secondLvlOp,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
TxOut: []*wire.TxOut{
|
|
|
|
{Value: 10000},
|
|
|
|
},
|
|
|
|
}
|
2021-04-20 10:24:10 +03:00
|
|
|
|
2021-04-20 09:46:23 +03:00
|
|
|
// htlcSweep is used to spend the HTLC output directly using the
|
|
|
|
// revocation key.
|
|
|
|
htlcSweep := &wire.MsgTx{
|
|
|
|
TxIn: []*wire.TxIn{
|
|
|
|
{
|
|
|
|
PreviousOutPoint: htlcOutpoint,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
TxOut: []*wire.TxOut{
|
|
|
|
{Value: 21000},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// In order for the breacharbiter to detect that it is being spent
|
|
|
|
// using the revocation key, it will inspect the witness. Therefore
|
|
|
|
// sign and add the witness to the HTLC sweep.
|
|
|
|
retInfo := newRetributionInfo(chanPoint, retribution)
|
|
|
|
|
|
|
|
hashCache := txscript.NewTxSigHashes(htlcSweep)
|
|
|
|
for i := range retInfo.breachedOutputs {
|
|
|
|
inp := &retInfo.breachedOutputs[i]
|
|
|
|
|
|
|
|
// Find the HTLC output. so we can add the witness.
|
|
|
|
switch inp.witnessType {
|
|
|
|
case input.HtlcAcceptedRevoke:
|
|
|
|
fallthrough
|
|
|
|
case input.HtlcOfferedRevoke:
|
|
|
|
inputScript, err := inp.CraftInputScript(
|
|
|
|
signer, htlcSweep, hashCache, 0,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
htlcSweep.TxIn[0].Witness = inputScript.Witness
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-20 10:24:10 +03:00
|
|
|
return &spendTxs{
|
|
|
|
commitSpendTx: commitSpendTx,
|
|
|
|
htlc2ndLevlTx: htlc2ndLevlTx,
|
|
|
|
htlc2ndLevlSpend: htlcSpendTx,
|
2021-04-20 09:46:23 +03:00
|
|
|
htlcSweep: htlcSweep,
|
|
|
|
}, nil
|
2021-04-20 10:24:10 +03:00
|
|
|
}
|
2019-03-20 05:22:59 +03:00
|
|
|
|
|
|
|
var breachTests = []breachTest{
|
|
|
|
{
|
|
|
|
name: "all spends",
|
|
|
|
spend2ndLevel: true,
|
|
|
|
whenNonZeroInputs: func(t *testing.T,
|
2021-04-20 15:54:31 +03:00
|
|
|
inputs map[wire.OutPoint]struct{},
|
2021-04-20 16:42:23 +03:00
|
|
|
publTx chan *wire.MsgTx, _ chainhash.Hash) *wire.MsgTx {
|
2019-03-20 05:22:59 +03:00
|
|
|
|
|
|
|
var tx *wire.MsgTx
|
|
|
|
select {
|
|
|
|
case tx = <-publTx:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("tx was not published")
|
|
|
|
}
|
|
|
|
|
2021-04-20 15:54:31 +03:00
|
|
|
// The justice transaction should have the same number
|
2019-03-20 05:22:59 +03:00
|
|
|
// of inputs as we are tracking in the test.
|
|
|
|
if len(tx.TxIn) != len(inputs) {
|
|
|
|
t.Fatalf("expected justice txn to have %d "+
|
|
|
|
"inputs, found %d", len(inputs),
|
|
|
|
len(tx.TxIn))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that each input exists on the justice
|
|
|
|
// transaction.
|
|
|
|
for in := range inputs {
|
|
|
|
findInputIndex(t, in, tx)
|
|
|
|
}
|
|
|
|
|
2021-04-20 16:42:23 +03:00
|
|
|
return tx
|
2019-03-20 05:22:59 +03:00
|
|
|
},
|
|
|
|
whenZeroInputs: func(t *testing.T,
|
2021-04-20 15:54:31 +03:00
|
|
|
inputs map[wire.OutPoint]struct{},
|
2021-04-20 16:42:23 +03:00
|
|
|
publTx chan *wire.MsgTx, _ chainhash.Hash) *wire.MsgTx {
|
2019-03-20 05:22:59 +03:00
|
|
|
|
|
|
|
// Sanity check to ensure the brar doesn't try to
|
|
|
|
// broadcast another sweep, since all outputs have been
|
|
|
|
// spent externally.
|
|
|
|
select {
|
|
|
|
case <-publTx:
|
|
|
|
t.Fatalf("tx published unexpectedly")
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
}
|
2021-04-20 16:42:23 +03:00
|
|
|
|
|
|
|
return nil
|
2019-03-20 05:22:59 +03:00
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "commit spends, second level sweep",
|
|
|
|
spend2ndLevel: false,
|
|
|
|
sendFinalConf: true,
|
|
|
|
whenNonZeroInputs: func(t *testing.T,
|
2021-04-20 15:54:31 +03:00
|
|
|
inputs map[wire.OutPoint]struct{},
|
2021-04-20 16:42:23 +03:00
|
|
|
publTx chan *wire.MsgTx, _ chainhash.Hash) *wire.MsgTx {
|
2019-03-20 05:22:59 +03:00
|
|
|
|
2021-04-20 15:54:31 +03:00
|
|
|
var tx *wire.MsgTx
|
2019-03-20 05:22:59 +03:00
|
|
|
select {
|
2021-04-20 15:54:31 +03:00
|
|
|
case tx = <-publTx:
|
2019-03-20 05:22:59 +03:00
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("tx was not published")
|
|
|
|
}
|
2021-04-20 15:54:31 +03:00
|
|
|
|
|
|
|
// The justice transaction should have the same number
|
|
|
|
// of inputs as we are tracking in the test.
|
|
|
|
if len(tx.TxIn) != len(inputs) {
|
|
|
|
t.Fatalf("expected justice txn to have %d "+
|
|
|
|
"inputs, found %d", len(inputs),
|
|
|
|
len(tx.TxIn))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that each input exists on the justice
|
|
|
|
// transaction.
|
|
|
|
for in := range inputs {
|
|
|
|
findInputIndex(t, in, tx)
|
|
|
|
}
|
|
|
|
|
2021-04-20 16:42:23 +03:00
|
|
|
return tx
|
2019-03-20 05:22:59 +03:00
|
|
|
},
|
|
|
|
whenZeroInputs: func(t *testing.T,
|
2021-04-20 15:54:31 +03:00
|
|
|
inputs map[wire.OutPoint]struct{},
|
2021-04-20 10:24:10 +03:00
|
|
|
publTx chan *wire.MsgTx,
|
2021-04-20 16:42:23 +03:00
|
|
|
htlc2ndLevlTxHash chainhash.Hash) *wire.MsgTx {
|
2019-03-20 05:22:59 +03:00
|
|
|
|
|
|
|
// Now a transaction attempting to spend from the second
|
|
|
|
// level tx should be published instead. Let this
|
|
|
|
// publish succeed by setting the publishing error to
|
|
|
|
// nil.
|
|
|
|
var tx *wire.MsgTx
|
|
|
|
select {
|
|
|
|
case tx = <-publTx:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("tx was not published")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The commitment outputs should be gone, and there
|
|
|
|
// should only be a single htlc spend.
|
|
|
|
if len(tx.TxIn) != 1 {
|
|
|
|
t.Fatalf("expect 1 htlc output, found %d "+
|
|
|
|
"outputs", len(tx.TxIn))
|
|
|
|
}
|
|
|
|
|
|
|
|
// The remaining TxIn previously attempting to spend
|
|
|
|
// the HTLC outpoint should now be spending from the
|
|
|
|
// second level tx.
|
|
|
|
//
|
|
|
|
// NOTE: Commitment outputs and htlc sweeps are spent
|
|
|
|
// with a different transactions (and thus txids),
|
|
|
|
// ensuring we aren't mistaking this for a different
|
|
|
|
// output type.
|
|
|
|
onlyInput := tx.TxIn[0].PreviousOutPoint.Hash
|
2021-04-20 10:24:10 +03:00
|
|
|
if onlyInput != htlc2ndLevlTxHash {
|
2019-03-20 05:22:59 +03:00
|
|
|
t.Fatalf("tx not attempting to spend second "+
|
|
|
|
"level tx, %v", tx.TxIn[0])
|
|
|
|
}
|
2021-04-20 16:42:23 +03:00
|
|
|
|
|
|
|
return tx
|
2019-03-20 05:22:59 +03:00
|
|
|
},
|
|
|
|
},
|
2021-04-20 09:46:23 +03:00
|
|
|
{ // nolint: dupl
|
|
|
|
// Test that if the HTLC output is swept via the revoke path
|
|
|
|
// (by us) in a separate tx, it will be handled correctly.
|
|
|
|
name: "sweep htlc",
|
|
|
|
sweepHtlc: true,
|
|
|
|
whenNonZeroInputs: func(t *testing.T,
|
|
|
|
inputs map[wire.OutPoint]struct{},
|
2021-04-20 16:42:23 +03:00
|
|
|
publTx chan *wire.MsgTx, _ chainhash.Hash) *wire.MsgTx {
|
2021-04-20 09:46:23 +03:00
|
|
|
|
|
|
|
var tx *wire.MsgTx
|
|
|
|
select {
|
|
|
|
case tx = <-publTx:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("tx was not published")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The justice transaction should have the same number
|
|
|
|
// of inputs as we are tracking in the test.
|
|
|
|
if len(tx.TxIn) != len(inputs) {
|
|
|
|
t.Fatalf("expected justice txn to have %d "+
|
|
|
|
"inputs, found %d", len(inputs),
|
|
|
|
len(tx.TxIn))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Ensure that each input exists on the justice
|
|
|
|
// transaction.
|
|
|
|
for in := range inputs {
|
|
|
|
findInputIndex(t, in, tx)
|
|
|
|
}
|
2021-04-20 16:42:23 +03:00
|
|
|
|
|
|
|
return tx
|
2021-04-20 09:46:23 +03:00
|
|
|
},
|
|
|
|
whenZeroInputs: func(t *testing.T,
|
|
|
|
inputs map[wire.OutPoint]struct{},
|
2021-04-20 16:42:23 +03:00
|
|
|
publTx chan *wire.MsgTx, _ chainhash.Hash) *wire.MsgTx {
|
2021-04-20 09:46:23 +03:00
|
|
|
|
|
|
|
// Sanity check to ensure the brar doesn't try to
|
|
|
|
// broadcast another sweep, since all outputs have been
|
|
|
|
// spent externally.
|
|
|
|
select {
|
|
|
|
case <-publTx:
|
|
|
|
t.Fatalf("tx published unexpectedly")
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
}
|
2021-04-20 16:42:23 +03:00
|
|
|
|
|
|
|
return nil
|
2021-04-20 09:46:23 +03:00
|
|
|
},
|
|
|
|
},
|
2019-03-20 05:22:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestBreachSpends checks the behavior of the breach arbiter in response to
|
|
|
|
// spend events on a channels outputs by asserting that it properly removes or
|
|
|
|
// modifies the inputs from the justice txn.
|
|
|
|
func TestBreachSpends(t *testing.T) {
|
|
|
|
for _, test := range breachTests {
|
|
|
|
tc := test
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
|
|
testBreachSpends(t, tc)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func testBreachSpends(t *testing.T, test breachTest) {
|
2018-05-31 13:52:02 +03:00
|
|
|
brar, alice, _, bobClose, contractBreaches,
|
|
|
|
cleanUpChans, cleanUpArb := initBreachedState(t)
|
|
|
|
defer cleanUpChans()
|
|
|
|
defer cleanUpArb()
|
|
|
|
|
|
|
|
var (
|
|
|
|
height = bobClose.ChanSnapshot.CommitHeight
|
|
|
|
forceCloseTx = bobClose.CloseTx
|
|
|
|
chanPoint = alice.ChanPoint
|
|
|
|
publTx = make(chan *wire.MsgTx)
|
|
|
|
publErr error
|
2019-03-20 05:22:59 +03:00
|
|
|
publMtx sync.Mutex
|
2018-05-31 13:52:02 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// Make PublishTransaction always return ErrDoubleSpend to begin with.
|
|
|
|
publErr = lnwallet.ErrDoubleSpend
|
2020-05-18 15:13:23 +03:00
|
|
|
brar.cfg.PublishTransaction = func(tx *wire.MsgTx, _ string) error {
|
2020-04-17 13:15:23 +03:00
|
|
|
publMtx.Lock()
|
|
|
|
err := publErr
|
|
|
|
publMtx.Unlock()
|
2018-05-31 13:52:02 +03:00
|
|
|
publTx <- tx
|
2019-03-20 05:22:59 +03:00
|
|
|
|
2020-04-17 13:15:23 +03:00
|
|
|
return err
|
2018-05-31 13:52:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Notify the breach arbiter about the breach.
|
|
|
|
retribution, err := lnwallet.NewBreachRetribution(
|
2019-08-08 06:29:15 +03:00
|
|
|
alice.State(), height, 1,
|
|
|
|
)
|
2018-05-31 13:52:02 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create breach retribution: %v", err)
|
|
|
|
}
|
|
|
|
|
2021-04-21 13:51:04 +03:00
|
|
|
processACK := make(chan error)
|
2018-05-31 13:52:02 +03:00
|
|
|
breach := &ContractBreachEvent{
|
2021-04-21 13:51:04 +03:00
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ProcessACK: func(brarErr error) {
|
|
|
|
processACK <- brarErr
|
|
|
|
},
|
2018-05-31 13:52:02 +03:00
|
|
|
BreachRetribution: retribution,
|
|
|
|
}
|
2021-04-20 16:42:23 +03:00
|
|
|
select {
|
|
|
|
case contractBreaches <- breach:
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("breach not delivered")
|
|
|
|
}
|
2018-05-31 13:52:02 +03:00
|
|
|
|
|
|
|
// We'll also wait to consume the ACK back from the breach arbiter.
|
|
|
|
select {
|
2021-04-21 13:51:04 +03:00
|
|
|
case err := <-processACK:
|
2018-05-31 13:52:02 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("handoff failed: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("breach arbiter didn't send ack back")
|
|
|
|
}
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
state := alice.State()
|
|
|
|
err = state.CloseChannel(&channeldb.ChannelCloseSummary{
|
|
|
|
ChanPoint: state.FundingOutpoint,
|
|
|
|
ChainHash: state.ChainHash,
|
|
|
|
RemotePub: state.IdentityPub,
|
|
|
|
CloseType: channeldb.BreachClose,
|
|
|
|
Capacity: state.Capacity,
|
|
|
|
IsPending: true,
|
|
|
|
ShortChanID: state.ShortChanID(),
|
|
|
|
RemoteCurrentRevocation: state.RemoteCurrentRevocation,
|
|
|
|
RemoteNextRevocation: state.RemoteNextRevocation,
|
|
|
|
LocalChanConfig: state.LocalChanCfg,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to close channel: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-05-31 13:52:02 +03:00
|
|
|
// After exiting, the breach arbiter should have persisted the
|
|
|
|
// retribution information and the channel should be shown as pending
|
|
|
|
// force closed.
|
|
|
|
assertArbiterBreach(t, brar, chanPoint)
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
// Assert that the database sees the channel as pending close, otherwise
|
|
|
|
// the breach arbiter won't be able to fully close it.
|
|
|
|
assertPendingClosed(t, alice)
|
|
|
|
|
2018-05-31 13:52:02 +03:00
|
|
|
// Notify that the breaching transaction is confirmed, to trigger the
|
|
|
|
// retribution logic.
|
2020-08-18 22:44:42 +03:00
|
|
|
notifier := brar.cfg.Notifier.(*mock.SpendNotifier)
|
2021-04-20 16:42:23 +03:00
|
|
|
|
|
|
|
select {
|
|
|
|
case notifier.ConfChan <- &chainntnfs.TxConfirmation{}:
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("conf not delivered")
|
|
|
|
}
|
2018-05-31 13:52:02 +03:00
|
|
|
|
|
|
|
// The breach arbiter should attempt to sweep all outputs on the
|
|
|
|
// breached commitment. We'll pretend that the HTLC output has been
|
|
|
|
// spent by the channel counter party's second level tx already.
|
|
|
|
var tx *wire.MsgTx
|
|
|
|
select {
|
|
|
|
case tx = <-publTx:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("tx was not published")
|
|
|
|
}
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
// All outputs should initially spend from the force closed txn.
|
|
|
|
forceTxID := forceCloseTx.TxHash()
|
|
|
|
for _, txIn := range tx.TxIn {
|
|
|
|
if txIn.PreviousOutPoint.Hash != forceTxID {
|
|
|
|
t.Fatalf("og justice tx not spending commitment")
|
2018-05-31 13:52:02 +03:00
|
|
|
}
|
|
|
|
}
|
2019-03-20 05:22:59 +03:00
|
|
|
|
|
|
|
localOutpoint := retribution.LocalOutpoint
|
|
|
|
remoteOutpoint := retribution.RemoteOutpoint
|
|
|
|
htlcOutpoint := retribution.HtlcRetributions[0].OutPoint
|
|
|
|
|
2021-04-20 09:46:23 +03:00
|
|
|
spendTxs, err := getSpendTransactions(
|
2021-04-20 10:24:10 +03:00
|
|
|
brar.cfg.Signer, chanPoint, retribution,
|
|
|
|
)
|
2021-04-20 09:46:23 +03:00
|
|
|
require.NoError(t, err)
|
2021-04-20 10:24:10 +03:00
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
// Construct a map from outpoint on the force close to the transaction
|
|
|
|
// we want it to be spent by. As the test progresses, this map will be
|
|
|
|
// updated to contain only the set of commitment or second level
|
|
|
|
// outpoints that remain to be spent.
|
2021-04-20 15:54:31 +03:00
|
|
|
spentBy := map[wire.OutPoint]*wire.MsgTx{
|
2021-04-20 10:24:10 +03:00
|
|
|
htlcOutpoint: spendTxs.htlc2ndLevlTx,
|
|
|
|
localOutpoint: spendTxs.commitSpendTx,
|
|
|
|
remoteOutpoint: spendTxs.commitSpendTx,
|
2018-05-31 13:52:02 +03:00
|
|
|
}
|
|
|
|
|
2021-04-20 15:54:31 +03:00
|
|
|
// We also keep a map of those remaining outputs we expect the
|
|
|
|
// breacharbiter to try and sweep.
|
|
|
|
inputsToSweep := map[wire.OutPoint]struct{}{
|
|
|
|
htlcOutpoint: {},
|
|
|
|
localOutpoint: {},
|
|
|
|
remoteOutpoint: {},
|
|
|
|
}
|
|
|
|
|
2021-04-20 10:24:10 +03:00
|
|
|
htlc2ndLevlTx := spendTxs.htlc2ndLevlTx
|
|
|
|
htlcSpendTx := spendTxs.htlc2ndLevlSpend
|
2021-04-20 09:46:23 +03:00
|
|
|
|
|
|
|
// If the test is checking sweep of the HTLC directly without the
|
|
|
|
// second level, insert the sweep tx instead.
|
|
|
|
if test.sweepHtlc {
|
|
|
|
spentBy[htlcOutpoint] = spendTxs.htlcSweep
|
|
|
|
}
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
// Until no more inputs to spend remain, deliver the spend events and
|
|
|
|
// process the assertions prescribed by the test case.
|
2021-04-20 16:42:23 +03:00
|
|
|
var justiceTx *wire.MsgTx
|
2021-04-20 15:54:31 +03:00
|
|
|
for len(spentBy) > 0 {
|
2019-03-20 05:22:59 +03:00
|
|
|
var (
|
|
|
|
op wire.OutPoint
|
|
|
|
spendTx *wire.MsgTx
|
|
|
|
)
|
|
|
|
|
|
|
|
// Pick an outpoint at random from the set of inputs.
|
2021-04-20 15:54:31 +03:00
|
|
|
for op, spendTx = range spentBy {
|
|
|
|
delete(spentBy, op)
|
2019-03-20 05:22:59 +03:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deliver the spend notification for the chosen transaction.
|
|
|
|
notifier.Spend(&op, 2, spendTx)
|
|
|
|
|
2021-04-20 15:54:31 +03:00
|
|
|
// Since the remote just swept this input, we expect our next
|
|
|
|
// justice transaction to not include them.
|
|
|
|
delete(inputsToSweep, op)
|
|
|
|
|
|
|
|
// If this is the second-level spend, we must add the new
|
|
|
|
// outpoint to our expected sweeps.
|
2019-03-20 05:22:59 +03:00
|
|
|
spendTxID := spendTx.TxHash()
|
2021-04-20 15:54:31 +03:00
|
|
|
if spendTxID == htlc2ndLevlTx.TxHash() {
|
|
|
|
// Create the second level outpoint that will
|
|
|
|
// be spent, the index is always zero for these
|
|
|
|
// 1-in-1-out txns.
|
2019-03-20 05:22:59 +03:00
|
|
|
spendOp := wire.OutPoint{Hash: spendTxID}
|
2021-04-20 15:54:31 +03:00
|
|
|
inputsToSweep[spendOp] = struct{}{}
|
|
|
|
|
|
|
|
// When the second layer transfer is detected, add back
|
|
|
|
// the outpoint of the second layer tx so that we can
|
|
|
|
// spend it again. Only do so if the test requests this
|
|
|
|
// behavior.
|
|
|
|
if test.spend2ndLevel {
|
|
|
|
spentBy[spendOp] = htlcSpendTx
|
|
|
|
}
|
2019-03-20 05:22:59 +03:00
|
|
|
}
|
|
|
|
|
2021-04-20 15:54:31 +03:00
|
|
|
if len(spentBy) > 0 {
|
2021-04-20 16:42:23 +03:00
|
|
|
justiceTx = test.whenNonZeroInputs(t, inputsToSweep, publTx, htlc2ndLevlTx.TxHash())
|
2019-03-20 05:22:59 +03:00
|
|
|
} else {
|
|
|
|
// Reset the publishing error so that any publication,
|
|
|
|
// made by the breach arbiter, if any, will succeed.
|
|
|
|
publMtx.Lock()
|
|
|
|
publErr = nil
|
|
|
|
publMtx.Unlock()
|
2021-04-20 16:42:23 +03:00
|
|
|
justiceTx = test.whenZeroInputs(t, inputsToSweep, publTx, htlc2ndLevlTx.TxHash())
|
2019-03-20 05:22:59 +03:00
|
|
|
}
|
2018-05-31 13:52:02 +03:00
|
|
|
}
|
|
|
|
|
2021-04-20 16:42:23 +03:00
|
|
|
// Deliver confirmation of sweep if the test expects it. Since we are
|
|
|
|
// looking for the final justice tx to confirme, we deliver a spend of
|
|
|
|
// all its inputs.
|
2019-03-20 05:22:59 +03:00
|
|
|
if test.sendFinalConf {
|
2021-04-20 16:42:23 +03:00
|
|
|
for _, txin := range justiceTx.TxIn {
|
|
|
|
op := txin.PreviousOutPoint
|
|
|
|
notifier.Spend(&op, 3, justiceTx)
|
|
|
|
}
|
2018-05-31 13:52:02 +03:00
|
|
|
}
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
// Assert that the channel is fully resolved.
|
|
|
|
assertBrarCleanup(t, brar, alice.ChanPoint, alice.State().Db)
|
|
|
|
}
|
|
|
|
|
2021-02-15 15:31:08 +03:00
|
|
|
// TestBreachDelayedJusticeConfirmation tests that the breach arbiter will
|
|
|
|
// "split" the justice tx in case the first justice tx doesn't confirm within
|
|
|
|
// a reasonable time.
|
|
|
|
func TestBreachDelayedJusticeConfirmation(t *testing.T) {
|
|
|
|
brar, alice, _, bobClose, contractBreaches,
|
|
|
|
cleanUpChans, cleanUpArb := initBreachedState(t)
|
|
|
|
defer cleanUpChans()
|
|
|
|
defer cleanUpArb()
|
|
|
|
|
|
|
|
var (
|
|
|
|
height = bobClose.ChanSnapshot.CommitHeight
|
|
|
|
blockHeight = int32(10)
|
|
|
|
forceCloseTx = bobClose.CloseTx
|
|
|
|
chanPoint = alice.ChanPoint
|
|
|
|
publTx = make(chan *wire.MsgTx)
|
|
|
|
)
|
|
|
|
|
|
|
|
// Make PublishTransaction always return succeed.
|
|
|
|
brar.cfg.PublishTransaction = func(tx *wire.MsgTx, _ string) error {
|
|
|
|
publTx <- tx
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify the breach arbiter about the breach.
|
|
|
|
retribution, err := lnwallet.NewBreachRetribution(
|
|
|
|
alice.State(), height, uint32(blockHeight),
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create breach retribution: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
processACK := make(chan error, 1)
|
|
|
|
breach := &ContractBreachEvent{
|
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ProcessACK: func(brarErr error) {
|
|
|
|
processACK <- brarErr
|
|
|
|
},
|
|
|
|
BreachRetribution: retribution,
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case contractBreaches <- breach:
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("breach not delivered")
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll also wait to consume the ACK back from the breach arbiter.
|
|
|
|
select {
|
|
|
|
case err := <-processACK:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("handoff failed: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("breach arbiter didn't send ack back")
|
|
|
|
}
|
|
|
|
|
|
|
|
state := alice.State()
|
|
|
|
err = state.CloseChannel(&channeldb.ChannelCloseSummary{
|
|
|
|
ChanPoint: state.FundingOutpoint,
|
|
|
|
ChainHash: state.ChainHash,
|
|
|
|
RemotePub: state.IdentityPub,
|
|
|
|
CloseType: channeldb.BreachClose,
|
|
|
|
Capacity: state.Capacity,
|
|
|
|
IsPending: true,
|
|
|
|
ShortChanID: state.ShortChanID(),
|
|
|
|
RemoteCurrentRevocation: state.RemoteCurrentRevocation,
|
|
|
|
RemoteNextRevocation: state.RemoteNextRevocation,
|
|
|
|
LocalChanConfig: state.LocalChanCfg,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to close channel: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// After exiting, the breach arbiter should have persisted the
|
|
|
|
// retribution information and the channel should be shown as pending
|
|
|
|
// force closed.
|
|
|
|
assertArbiterBreach(t, brar, chanPoint)
|
|
|
|
|
|
|
|
// Assert that the database sees the channel as pending close, otherwise
|
|
|
|
// the breach arbiter won't be able to fully close it.
|
|
|
|
assertPendingClosed(t, alice)
|
|
|
|
|
|
|
|
// Notify that the breaching transaction is confirmed, to trigger the
|
|
|
|
// retribution logic.
|
|
|
|
notifier := brar.cfg.Notifier.(*mock.SpendNotifier)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case notifier.ConfChan <- &chainntnfs.TxConfirmation{}:
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("conf not delivered")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The breach arbiter should attempt to sweep all outputs on the
|
|
|
|
// breached commitment.
|
|
|
|
var justiceTx *wire.MsgTx
|
|
|
|
select {
|
|
|
|
case justiceTx = <-publTx:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("tx was not published")
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Len(t, justiceTx.TxIn, 3)
|
|
|
|
|
|
|
|
// All outputs should initially spend from the force closed txn.
|
|
|
|
forceTxID := forceCloseTx.TxHash()
|
|
|
|
for _, txIn := range justiceTx.TxIn {
|
|
|
|
if txIn.PreviousOutPoint.Hash != forceTxID {
|
|
|
|
t.Fatalf("og justice tx not spending commitment")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we'll pretend some blocks pass without the justice tx
|
|
|
|
// confirming.
|
|
|
|
for i := int32(0); i <= 3; i++ {
|
|
|
|
notifier.EpochChan <- &chainntnfs.BlockEpoch{
|
|
|
|
Height: blockHeight + i,
|
|
|
|
}
|
|
|
|
|
|
|
|
// On every epoch, check that no new tx is published.
|
|
|
|
select {
|
|
|
|
case <-publTx:
|
|
|
|
t.Fatalf("tx was published")
|
|
|
|
case <-time.After(20 * time.Millisecond):
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now mine another block without the justice tx confirming. This
|
|
|
|
// should lead to the breacharbiter publishing the split justice tx
|
|
|
|
// variants.
|
|
|
|
notifier.EpochChan <- &chainntnfs.BlockEpoch{
|
|
|
|
Height: blockHeight + 4,
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
splits []*wire.MsgTx
|
|
|
|
spending = make(map[wire.OutPoint]struct{})
|
|
|
|
maxIndex = uint32(len(forceCloseTx.TxOut)) - 1
|
|
|
|
)
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
|
|
|
|
var tx *wire.MsgTx
|
|
|
|
select {
|
|
|
|
case tx = <-publTx:
|
|
|
|
splits = append(splits, tx)
|
|
|
|
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("tx not published")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that every input is from the breached tx and that
|
|
|
|
// there are no duplicates.
|
|
|
|
for _, in := range tx.TxIn {
|
|
|
|
op := in.PreviousOutPoint
|
|
|
|
_, ok := spending[op]
|
|
|
|
if ok {
|
|
|
|
t.Fatal("already spent")
|
|
|
|
}
|
|
|
|
|
|
|
|
if op.Hash != forceTxID || op.Index > maxIndex {
|
|
|
|
t.Fatalf("not spending breach")
|
|
|
|
}
|
|
|
|
|
|
|
|
spending[op] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// All the inputs from the original justice transaction should have
|
|
|
|
// been spent by the 2 splits.
|
|
|
|
require.Len(t, spending, len(justiceTx.TxIn))
|
|
|
|
require.Len(t, splits, 2)
|
|
|
|
|
|
|
|
// Finally notify that they confirm, making the breach arbiter clean
|
|
|
|
// up.
|
|
|
|
for _, tx := range splits {
|
|
|
|
for _, in := range tx.TxIn {
|
|
|
|
op := &in.PreviousOutPoint
|
|
|
|
notifier.Spend(op, blockHeight+5, tx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Assert that the channel is fully resolved.
|
|
|
|
assertBrarCleanup(t, brar, alice.ChanPoint, alice.State().Db)
|
|
|
|
}
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
// findInputIndex returns the index of the input that spends from the given
|
|
|
|
// outpoint. This method fails if the outpoint is not found.
|
|
|
|
func findInputIndex(t *testing.T, op wire.OutPoint, tx *wire.MsgTx) int {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
inputIdx := -1
|
|
|
|
for i, txIn := range tx.TxIn {
|
|
|
|
if txIn.PreviousOutPoint == op {
|
|
|
|
inputIdx = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if inputIdx == -1 {
|
|
|
|
t.Fatalf("input %v in not found", op)
|
2018-05-31 13:52:02 +03:00
|
|
|
}
|
2019-03-20 05:22:59 +03:00
|
|
|
|
|
|
|
return inputIdx
|
2018-05-31 13:52:02 +03:00
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
// assertArbiterBreach checks that the breach arbiter has persisted the breach
|
|
|
|
// information for a particular channel.
|
|
|
|
func assertArbiterBreach(t *testing.T, brar *breachArbiter,
|
|
|
|
chanPoint *wire.OutPoint) {
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
t.Helper()
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
isBreached, err := brar.IsBreached(chanPoint)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to determine if channel is "+
|
|
|
|
"breached: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !isBreached {
|
|
|
|
t.Fatalf("channel %v was never marked breached",
|
|
|
|
chanPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// assertNoArbiterBreach checks that the breach arbiter has not persisted the
|
|
|
|
// breach information for a particular channel.
|
|
|
|
func assertNoArbiterBreach(t *testing.T, brar *breachArbiter,
|
|
|
|
chanPoint *wire.OutPoint) {
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
t.Helper()
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
isBreached, err := brar.IsBreached(chanPoint)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to determine if channel is "+
|
|
|
|
"breached: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if isBreached {
|
|
|
|
t.Fatalf("channel %v was marked breached",
|
|
|
|
chanPoint)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-20 05:22:59 +03:00
|
|
|
// assertBrarCleanup blocks until the given channel point has been removed the
|
|
|
|
// retribution store and the channel is fully closed in the database.
|
|
|
|
func assertBrarCleanup(t *testing.T, brar *breachArbiter,
|
|
|
|
chanPoint *wire.OutPoint, db *channeldb.DB) {
|
|
|
|
|
|
|
|
t.Helper()
|
|
|
|
|
2019-09-19 22:46:29 +03:00
|
|
|
err := wait.NoError(func() error {
|
2019-03-20 05:22:59 +03:00
|
|
|
isBreached, err := brar.IsBreached(chanPoint)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if isBreached {
|
|
|
|
return fmt.Errorf("channel %v still breached",
|
|
|
|
chanPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
closedChans, err := db.FetchClosedChannels(false)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, channel := range closedChans {
|
|
|
|
switch {
|
|
|
|
// Wrong channel.
|
|
|
|
case channel.ChanPoint != *chanPoint:
|
|
|
|
continue
|
|
|
|
|
|
|
|
// Right channel, fully closed!
|
|
|
|
case !channel.IsPending:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Still pending.
|
|
|
|
return fmt.Errorf("channel %v still pending "+
|
|
|
|
"close", chanPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
return fmt.Errorf("channel %v not closed", chanPoint)
|
|
|
|
|
|
|
|
}, time.Second)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(err.Error())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// assertPendingClosed checks that the channel has been marked pending closed in
|
|
|
|
// the channel database.
|
|
|
|
func assertPendingClosed(t *testing.T, c *lnwallet.LightningChannel) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
closedChans, err := c.State().Db.FetchClosedChannels(true)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to load pending closed channels: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, chanSummary := range closedChans {
|
|
|
|
if chanSummary.ChanPoint == *c.ChanPoint {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Fatalf("channel %v was not marked pending closed", c.ChanPoint)
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
// assertNotPendingClosed checks that the channel has not been marked pending
|
|
|
|
// closed in the channel database.
|
|
|
|
func assertNotPendingClosed(t *testing.T, c *lnwallet.LightningChannel) {
|
2019-03-20 05:22:59 +03:00
|
|
|
t.Helper()
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
closedChans, err := c.State().Db.FetchClosedChannels(true)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to load pending closed channels: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, chanSummary := range closedChans {
|
|
|
|
if chanSummary.ChanPoint == *c.ChanPoint {
|
|
|
|
t.Fatalf("channel %v was marked pending closed",
|
|
|
|
c.ChanPoint)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// createTestArbiter instantiates a breach arbiter with a failing retribution
|
|
|
|
// store, so that controlled failures can be tested.
|
2018-04-19 12:09:25 +03:00
|
|
|
func createTestArbiter(t *testing.T, contractBreaches chan *ContractBreachEvent,
|
2017-11-21 10:57:08 +03:00
|
|
|
db *channeldb.DB) (*breachArbiter, func(), error) {
|
|
|
|
|
|
|
|
// Create a failing retribution store, that wraps a normal one.
|
|
|
|
store := newFailingRetributionStore(func() RetributionStore {
|
|
|
|
return newRetributionStore(db)
|
|
|
|
})
|
|
|
|
|
|
|
|
aliceKeyPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(),
|
2021-01-08 00:26:24 +03:00
|
|
|
channels.AlicesPrivKey)
|
2020-08-26 21:18:02 +03:00
|
|
|
signer := &mock.SingleSigner{Privkey: aliceKeyPriv}
|
2017-11-21 10:57:08 +03:00
|
|
|
|
|
|
|
// Assemble our test arbiter.
|
2020-08-18 22:44:42 +03:00
|
|
|
notifier := mock.MakeMockSpendNotifier()
|
2017-11-21 10:57:08 +03:00
|
|
|
ba := newBreachArbiter(&BreachConfig{
|
2018-04-19 12:09:25 +03:00
|
|
|
CloseLink: func(_ *wire.OutPoint, _ htlcswitch.ChannelCloseType) {},
|
|
|
|
DB: db,
|
2019-10-31 05:43:05 +03:00
|
|
|
Estimator: chainfee.NewStaticEstimator(12500, 0),
|
2018-04-19 12:09:25 +03:00
|
|
|
GenSweepScript: func() ([]byte, error) { return nil, nil },
|
|
|
|
ContractBreaches: contractBreaches,
|
2017-11-21 10:57:08 +03:00
|
|
|
Signer: signer,
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
Notifier: notifier,
|
2020-05-18 15:13:23 +03:00
|
|
|
PublishTransaction: func(_ *wire.MsgTx, _ string) error { return nil },
|
2017-11-21 10:57:08 +03:00
|
|
|
Store: store,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err := ba.Start(); err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The caller is responsible for closing the database.
|
|
|
|
cleanUp := func() {
|
|
|
|
ba.Stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
return ba, cleanUp, nil
|
|
|
|
}
|
|
|
|
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
// createInitChannels creates two initialized test channels funded with 10 BTC,
|
|
|
|
// with 5 BTC allocated to each side. Within the channel, Alice is the
|
|
|
|
// initiator.
|
|
|
|
func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwallet.LightningChannel, func(), error) {
|
2017-11-21 10:57:08 +03:00
|
|
|
|
|
|
|
aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(),
|
2021-01-08 00:26:24 +03:00
|
|
|
channels.AlicesPrivKey)
|
2017-11-21 10:57:08 +03:00
|
|
|
bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(btcec.S256(),
|
2021-01-08 00:26:24 +03:00
|
|
|
channels.BobsPrivKey)
|
2017-11-21 10:57:08 +03:00
|
|
|
|
2018-03-26 05:16:39 +03:00
|
|
|
channelCapacity, err := btcutil.NewAmount(10)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
channelBal := channelCapacity / 2
|
|
|
|
aliceDustLimit := btcutil.Amount(200)
|
|
|
|
bobDustLimit := btcutil.Amount(1300)
|
|
|
|
csvTimeoutAlice := uint32(5)
|
|
|
|
csvTimeoutBob := uint32(4)
|
|
|
|
|
|
|
|
prevOut := &wire.OutPoint{
|
2021-01-08 00:26:24 +03:00
|
|
|
Hash: channels.TestHdSeed,
|
2017-11-21 10:57:08 +03:00
|
|
|
Index: 0,
|
|
|
|
}
|
|
|
|
fundingTxIn := wire.NewTxIn(prevOut, nil, nil)
|
|
|
|
|
|
|
|
aliceCfg := channeldb.ChannelConfig{
|
|
|
|
ChannelConstraints: channeldb.ChannelConstraints{
|
|
|
|
DustLimit: aliceDustLimit,
|
|
|
|
MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
|
2018-02-08 06:36:34 +03:00
|
|
|
ChanReserve: 0,
|
|
|
|
MinHTLC: 0,
|
2017-11-21 10:57:08 +03:00
|
|
|
MaxAcceptedHtlcs: uint16(rand.Int31()),
|
2018-12-11 00:56:41 +03:00
|
|
|
CsvDelay: uint16(csvTimeoutAlice),
|
2017-11-21 10:57:08 +03:00
|
|
|
},
|
2018-02-18 02:29:01 +03:00
|
|
|
MultiSigKey: keychain.KeyDescriptor{
|
|
|
|
PubKey: aliceKeyPub,
|
|
|
|
},
|
|
|
|
RevocationBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: aliceKeyPub,
|
|
|
|
},
|
|
|
|
PaymentBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: aliceKeyPub,
|
|
|
|
},
|
|
|
|
DelayBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: aliceKeyPub,
|
|
|
|
},
|
|
|
|
HtlcBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: aliceKeyPub,
|
|
|
|
},
|
2017-11-21 10:57:08 +03:00
|
|
|
}
|
|
|
|
bobCfg := channeldb.ChannelConfig{
|
|
|
|
ChannelConstraints: channeldb.ChannelConstraints{
|
|
|
|
DustLimit: bobDustLimit,
|
|
|
|
MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
|
2018-02-08 06:36:34 +03:00
|
|
|
ChanReserve: 0,
|
|
|
|
MinHTLC: 0,
|
2017-11-21 10:57:08 +03:00
|
|
|
MaxAcceptedHtlcs: uint16(rand.Int31()),
|
2018-12-11 00:56:41 +03:00
|
|
|
CsvDelay: uint16(csvTimeoutBob),
|
2017-11-21 10:57:08 +03:00
|
|
|
},
|
2018-02-18 02:29:01 +03:00
|
|
|
MultiSigKey: keychain.KeyDescriptor{
|
|
|
|
PubKey: bobKeyPub,
|
|
|
|
},
|
|
|
|
RevocationBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: bobKeyPub,
|
|
|
|
},
|
|
|
|
PaymentBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: bobKeyPub,
|
|
|
|
},
|
|
|
|
DelayBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: bobKeyPub,
|
|
|
|
},
|
|
|
|
HtlcBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: bobKeyPub,
|
|
|
|
},
|
2017-11-21 10:57:08 +03:00
|
|
|
}
|
|
|
|
|
2018-02-18 02:29:01 +03:00
|
|
|
bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize())
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot)
|
2017-11-21 10:57:08 +03:00
|
|
|
bobFirstRevoke, err := bobPreimageProducer.AtIndex(0)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
2019-01-16 17:47:43 +03:00
|
|
|
bobCommitPoint := input.ComputeCommitmentPoint(bobFirstRevoke[:])
|
2017-11-21 10:57:08 +03:00
|
|
|
|
2018-02-18 02:29:01 +03:00
|
|
|
aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize())
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot)
|
2017-11-21 10:57:08 +03:00
|
|
|
aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
2019-01-16 17:47:43 +03:00
|
|
|
aliceCommitPoint := input.ComputeCommitmentPoint(aliceFirstRevoke[:])
|
2017-11-21 10:57:08 +03:00
|
|
|
|
2019-08-01 06:16:52 +03:00
|
|
|
aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns(
|
|
|
|
channelBal, channelBal, &aliceCfg, &bobCfg, aliceCommitPoint,
|
2020-01-06 13:42:04 +03:00
|
|
|
bobCommitPoint, *fundingTxIn, channeldb.SingleFunderTweaklessBit,
|
2019-08-01 06:16:52 +03:00
|
|
|
)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
alicePath, err := ioutil.TempDir("", "alicedb")
|
2019-09-13 05:59:07 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
dbAlice, err := channeldb.Open(alicePath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
bobPath, err := ioutil.TempDir("", "bobdb")
|
2019-09-13 05:59:07 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
dbBob, err := channeldb.Open(bobPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
2019-10-31 05:43:05 +03:00
|
|
|
estimator := chainfee.NewStaticEstimator(12500, 0)
|
2018-07-28 04:20:58 +03:00
|
|
|
feePerKw, err := estimator.EstimateFeePerKW(1)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
2021-02-22 20:07:21 +03:00
|
|
|
commitFee := feePerKw.FeeForWeight(input.CommitWeight)
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
// TODO(roasbeef): need to factor in commit fee?
|
|
|
|
aliceCommit := channeldb.ChannelCommitment{
|
|
|
|
CommitHeight: 0,
|
2021-02-22 20:07:21 +03:00
|
|
|
LocalBalance: lnwire.NewMSatFromSatoshis(channelBal - commitFee),
|
2017-11-21 10:57:08 +03:00
|
|
|
RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
2018-02-13 17:15:14 +03:00
|
|
|
FeePerKw: btcutil.Amount(feePerKw),
|
2021-02-22 20:07:21 +03:00
|
|
|
CommitFee: commitFee,
|
2017-11-21 10:57:08 +03:00
|
|
|
CommitTx: aliceCommitTx,
|
|
|
|
CommitSig: bytes.Repeat([]byte{1}, 71),
|
|
|
|
}
|
|
|
|
bobCommit := channeldb.ChannelCommitment{
|
|
|
|
CommitHeight: 0,
|
|
|
|
LocalBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
2021-02-22 20:07:21 +03:00
|
|
|
RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal - commitFee),
|
2018-02-13 17:15:14 +03:00
|
|
|
FeePerKw: btcutil.Amount(feePerKw),
|
2021-02-22 20:07:21 +03:00
|
|
|
CommitFee: commitFee,
|
2017-11-21 10:57:08 +03:00
|
|
|
CommitTx: bobCommitTx,
|
|
|
|
CommitSig: bytes.Repeat([]byte{1}, 71),
|
|
|
|
}
|
|
|
|
|
2018-02-24 06:28:36 +03:00
|
|
|
var chanIDBytes [8]byte
|
|
|
|
if _, err := io.ReadFull(crand.Reader, chanIDBytes[:]); err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
shortChanID := lnwire.NewShortChanIDFromInt(
|
|
|
|
binary.BigEndian.Uint64(chanIDBytes[:]),
|
|
|
|
)
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
aliceChannelState := &channeldb.OpenChannel{
|
|
|
|
LocalChanCfg: aliceCfg,
|
|
|
|
RemoteChanCfg: bobCfg,
|
|
|
|
IdentityPub: aliceKeyPub,
|
|
|
|
FundingOutpoint: *prevOut,
|
2018-05-02 02:27:20 +03:00
|
|
|
ShortChannelID: shortChanID,
|
2019-10-31 05:24:49 +03:00
|
|
|
ChanType: channeldb.SingleFunderTweaklessBit,
|
2017-11-21 10:57:08 +03:00
|
|
|
IsInitiator: true,
|
|
|
|
Capacity: channelCapacity,
|
|
|
|
RemoteCurrentRevocation: bobCommitPoint,
|
|
|
|
RevocationProducer: alicePreimageProducer,
|
|
|
|
RevocationStore: shachain.NewRevocationStore(),
|
|
|
|
LocalCommitment: aliceCommit,
|
|
|
|
RemoteCommitment: aliceCommit,
|
|
|
|
Db: dbAlice,
|
2018-02-24 06:28:36 +03:00
|
|
|
Packager: channeldb.NewChannelPackager(shortChanID),
|
2021-01-08 00:26:24 +03:00
|
|
|
FundingTxn: channels.TestFundingTx,
|
2017-11-21 10:57:08 +03:00
|
|
|
}
|
|
|
|
bobChannelState := &channeldb.OpenChannel{
|
|
|
|
LocalChanCfg: bobCfg,
|
|
|
|
RemoteChanCfg: aliceCfg,
|
|
|
|
IdentityPub: bobKeyPub,
|
|
|
|
FundingOutpoint: *prevOut,
|
2018-05-02 02:27:20 +03:00
|
|
|
ShortChannelID: shortChanID,
|
2019-10-31 05:24:49 +03:00
|
|
|
ChanType: channeldb.SingleFunderTweaklessBit,
|
2017-11-21 10:57:08 +03:00
|
|
|
IsInitiator: false,
|
|
|
|
Capacity: channelCapacity,
|
|
|
|
RemoteCurrentRevocation: aliceCommitPoint,
|
|
|
|
RevocationProducer: bobPreimageProducer,
|
|
|
|
RevocationStore: shachain.NewRevocationStore(),
|
|
|
|
LocalCommitment: bobCommit,
|
|
|
|
RemoteCommitment: bobCommit,
|
|
|
|
Db: dbBob,
|
2018-02-24 06:28:36 +03:00
|
|
|
Packager: channeldb.NewChannelPackager(shortChanID),
|
2017-11-21 10:57:08 +03:00
|
|
|
}
|
|
|
|
|
2020-08-26 21:18:02 +03:00
|
|
|
aliceSigner := &mock.SingleSigner{Privkey: aliceKeyPriv}
|
|
|
|
bobSigner := &mock.SingleSigner{Privkey: bobKeyPriv}
|
2017-11-21 10:57:08 +03:00
|
|
|
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 03:35:07 +03:00
|
|
|
alicePool := lnwallet.NewSigPool(1, aliceSigner)
|
2018-01-17 06:57:04 +03:00
|
|
|
channelAlice, err := lnwallet.NewLightningChannel(
|
2019-04-15 15:24:43 +03:00
|
|
|
aliceSigner, aliceChannelState, alicePool,
|
2018-01-17 06:57:04 +03:00
|
|
|
)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 03:35:07 +03:00
|
|
|
alicePool.Start()
|
|
|
|
|
|
|
|
bobPool := lnwallet.NewSigPool(1, bobSigner)
|
2018-01-17 06:57:04 +03:00
|
|
|
channelBob, err := lnwallet.NewLightningChannel(
|
2019-04-15 15:24:43 +03:00
|
|
|
bobSigner, bobChannelState, bobPool,
|
2018-01-17 06:57:04 +03:00
|
|
|
)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the
`lnwallet.LightningChannel` struct. With this change, we ensure that as
the number of channels grows, the number of gouroutines idling in the
sigPool stays constant. It's the case that currently on the daemon, most
channels are likely inactive, with only a hand full actually
consistently carrying out channel updates. As a result, this change
should reduce the amount of idle CPU usage, as we have less active
goroutines in select loops.
In order to make this change, the `SigPool` itself has been publicly
exported such that outside callers can make a `SigPool` and pass it into
newly created channels. Since the sig pool now lives outside the
channel, we were also able to do away with the Stop() method on the
channel all together.
Finally, the server is the sub-system that is currently responsible for
managing the `SigPool` within lnd.
2018-12-15 03:35:07 +03:00
|
|
|
bobPool.Start()
|
2017-11-21 10:57:08 +03:00
|
|
|
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
addr := &net.TCPAddr{
|
|
|
|
IP: net.ParseIP("127.0.0.1"),
|
|
|
|
Port: 18556,
|
|
|
|
}
|
|
|
|
if err := channelAlice.State().SyncPending(addr, 101); err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = &net.TCPAddr{
|
|
|
|
IP: net.ParseIP("127.0.0.1"),
|
|
|
|
Port: 18555,
|
|
|
|
}
|
|
|
|
if err := channelBob.State().SyncPending(addr, 101); err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
2017-11-21 10:57:08 +03:00
|
|
|
|
|
|
|
cleanUpFunc := func() {
|
|
|
|
dbBob.Close()
|
|
|
|
dbAlice.Close()
|
|
|
|
os.RemoveAll(bobPath)
|
|
|
|
os.RemoveAll(alicePath)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the channel are open, simulate the start of a session by
|
|
|
|
// having Alice and Bob extend their revocation windows to each other.
|
|
|
|
err = initRevocationWindows(channelAlice, channelBob, revocationWindow)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return channelAlice, channelBob, cleanUpFunc, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// initRevocationWindows simulates a new channel being opened within the p2p
|
|
|
|
// network by populating the initial revocation windows of the passed
|
|
|
|
// commitment state machines.
|
|
|
|
//
|
|
|
|
// TODO(conner) remove code duplication
|
|
|
|
func initRevocationWindows(chanA, chanB *lnwallet.LightningChannel, windowSize int) error {
|
|
|
|
aliceNextRevoke, err := chanA.NextRevocationKey()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := chanB.InitNextRevocation(aliceNextRevoke); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
bobNextRevoke, err := chanB.NextRevocationKey()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := chanA.InitNextRevocation(bobNextRevoke); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createHTLC is a utility function for generating an HTLC with a given
|
|
|
|
// preimage and a given amount.
|
|
|
|
// TODO(conner) remove code duplication
|
|
|
|
func createHTLC(data int, amount lnwire.MilliSatoshi) (*lnwire.UpdateAddHTLC, [32]byte) {
|
|
|
|
preimage := bytes.Repeat([]byte{byte(data)}, 32)
|
|
|
|
paymentHash := sha256.Sum256(preimage)
|
|
|
|
|
|
|
|
var returnPreimage [32]byte
|
|
|
|
copy(returnPreimage[:], preimage)
|
|
|
|
|
|
|
|
return &lnwire.UpdateAddHTLC{
|
|
|
|
ID: uint64(data),
|
|
|
|
PaymentHash: paymentHash,
|
|
|
|
Amount: amount,
|
|
|
|
Expiry: uint32(5),
|
|
|
|
}, returnPreimage
|
|
|
|
}
|
|
|
|
|
|
|
|
// forceStateTransition executes the necessary interaction between the two
|
|
|
|
// commitment state machines to transition to a new state locking in any
|
|
|
|
// pending updates.
|
|
|
|
// TODO(conner) remove code duplication
|
|
|
|
func forceStateTransition(chanA, chanB *lnwallet.LightningChannel) error {
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
aliceSig, aliceHtlcSigs, _, err := chanA.SignNextCommitment()
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = chanB.ReceiveNewCommitment(aliceSig, aliceHtlcSigs); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-17 06:57:04 +03:00
|
|
|
bobRevocation, _, err := chanB.RevokeCurrentCommitment()
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
bobSig, bobHtlcSigs, _, err := chanB.SignNextCommitment()
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
_, _, _, _, err = chanA.ReceiveRevocation(bobRevocation)
|
|
|
|
if err != nil {
|
2017-11-21 10:57:08 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := chanA.ReceiveNewCommitment(bobSig, bobHtlcSigs); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-17 06:57:04 +03:00
|
|
|
aliceRevocation, _, err := chanA.RevokeCurrentCommitment()
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
_, _, _, _, err = chanB.ReceiveRevocation(aliceRevocation)
|
|
|
|
if err != nil {
|
2017-11-21 10:57:08 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|