2017-08-31 06:37:39 +03:00
|
|
|
// +build !rpctest
|
|
|
|
|
2017-05-07 14:09:22 +03:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2018-02-24 06:28:36 +03:00
|
|
|
crand "crypto/rand"
|
2017-11-21 10:57:08 +03:00
|
|
|
"crypto/sha256"
|
2018-02-24 06:28:36 +03:00
|
|
|
"encoding/binary"
|
2017-05-07 14:09:22 +03:00
|
|
|
"fmt"
|
2018-02-24 06:28:36 +03:00
|
|
|
"io"
|
2017-05-07 14:09:22 +03:00
|
|
|
"io/ioutil"
|
2017-11-21 10:57:08 +03:00
|
|
|
"math/rand"
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
"net"
|
2017-05-07 14:09:22 +03:00
|
|
|
"os"
|
|
|
|
"reflect"
|
2017-07-26 08:57:29 +03:00
|
|
|
"sync"
|
2017-05-07 14:09:22 +03:00
|
|
|
"testing"
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
"time"
|
2017-05-07 14:09:22 +03:00
|
|
|
|
2018-07-18 05:23:47 +03:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/txscript"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
2017-07-28 03:43:38 +03:00
|
|
|
"github.com/btcsuite/btclog"
|
2018-07-18 05:23:47 +03:00
|
|
|
"github.com/btcsuite/btcutil"
|
2017-11-21 10:57:08 +03:00
|
|
|
"github.com/go-errors/errors"
|
2018-05-31 13:52:02 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2017-05-07 14:09:22 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2017-11-21 10:57:08 +03:00
|
|
|
"github.com/lightningnetwork/lnd/htlcswitch"
|
2018-02-18 02:29:01 +03:00
|
|
|
"github.com/lightningnetwork/lnd/keychain"
|
2017-05-07 14:09:22 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
2017-11-21 10:57:08 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
|
|
|
"github.com/lightningnetwork/lnd/shachain"
|
2017-05-07 14:09:22 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
breachOutPoints = []wire.OutPoint{
|
|
|
|
{
|
|
|
|
Hash: [chainhash.HashSize]byte{
|
|
|
|
0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0x2d, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
|
|
|
|
0x1f, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
|
|
|
|
},
|
|
|
|
Index: 9,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Hash: [chainhash.HashSize]byte{
|
|
|
|
0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
|
|
|
|
0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
|
|
|
|
0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
|
|
|
|
0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
|
|
|
|
},
|
|
|
|
Index: 49,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Hash: [chainhash.HashSize]byte{
|
|
|
|
0x81, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x63, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0xd, 0xe7, 0x95, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
|
|
|
|
0x1e, 0xb, 0x4c, 0xfd, 0x9e, 0xc5, 0x8c, 0xe9,
|
|
|
|
},
|
|
|
|
Index: 23,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
breachKeys = [][]byte{
|
|
|
|
{0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
|
|
|
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
|
|
|
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
|
|
|
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
|
|
|
|
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
|
|
|
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
|
|
|
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
|
|
|
0xb4, 0x12, 0xa3,
|
|
|
|
},
|
|
|
|
{0x07, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
|
|
|
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
|
|
|
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
|
|
|
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
|
|
|
|
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
|
|
|
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
|
|
|
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
|
|
|
0xb4, 0x12, 0xa3,
|
|
|
|
},
|
|
|
|
{0x02, 0xce, 0x0b, 0x14, 0xfb, 0x84, 0x2b, 0x1b,
|
|
|
|
0xa5, 0x49, 0xfd, 0xd6, 0x75, 0xc9, 0x80, 0x75, 0xf1,
|
|
|
|
0x2e, 0x9c, 0x51, 0x0f, 0x8e, 0xf5, 0x2b, 0xd0, 0x21,
|
|
|
|
0xa9, 0xa1, 0xf4, 0x80, 0x9d, 0x3b, 0x4d,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
breachedOutputs = []breachedOutput{
|
|
|
|
{
|
2017-08-22 02:58:21 +03:00
|
|
|
amt: btcutil.Amount(1e7),
|
|
|
|
outpoint: breachOutPoints[0],
|
|
|
|
witnessType: lnwallet.CommitmentNoDelay,
|
2017-09-21 02:01:28 +03:00
|
|
|
signDesc: lnwallet.SignDescriptor{
|
|
|
|
SingleTweak: []byte{
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02,
|
|
|
|
},
|
|
|
|
WitnessScript: []byte{
|
|
|
|
0x00, 0x14, 0xee, 0x91, 0x41, 0x7e,
|
|
|
|
0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91,
|
|
|
|
0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
|
|
|
|
0xef, 0xb5, 0x71, 0x48,
|
|
|
|
},
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
Value: 5000000000,
|
|
|
|
PkScript: []byte{
|
|
|
|
0x41, // OP_DATA_65
|
|
|
|
0x04, 0xd6, 0x4b, 0xdf, 0xd0,
|
|
|
|
0x9e, 0xb1, 0xc5, 0xfe, 0x29,
|
|
|
|
0x5a, 0xbd, 0xeb, 0x1d, 0xca,
|
|
|
|
0x42, 0x81, 0xbe, 0x98, 0x8e,
|
|
|
|
0x2d, 0xa0, 0xb6, 0xc1, 0xc6,
|
|
|
|
0xa5, 0x9d, 0xc2, 0x26, 0xc2,
|
|
|
|
0x86, 0x24, 0xe1, 0x81, 0x75,
|
|
|
|
0xe8, 0x51, 0xc9, 0x6b, 0x97,
|
|
|
|
0x3d, 0x81, 0xb0, 0x1c, 0xc3,
|
|
|
|
0x1f, 0x04, 0x78, 0x34, 0xbc,
|
|
|
|
0x06, 0xd6, 0xd6, 0xed, 0xf6,
|
|
|
|
0x20, 0xd1, 0x84, 0x24, 0x1a,
|
|
|
|
0x6a, 0xed, 0x8b, 0x63,
|
|
|
|
0xa6, // 65-byte signature
|
|
|
|
0xac, // OP_CHECKSIG
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HashType: txscript.SigHashAll,
|
|
|
|
},
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
secondLevelWitnessScript: breachKeys[0],
|
2017-05-07 14:09:22 +03:00
|
|
|
},
|
|
|
|
{
|
2017-08-22 02:58:21 +03:00
|
|
|
amt: btcutil.Amount(2e9),
|
|
|
|
outpoint: breachOutPoints[1],
|
|
|
|
witnessType: lnwallet.CommitmentRevoke,
|
2017-09-21 02:01:28 +03:00
|
|
|
signDesc: lnwallet.SignDescriptor{
|
|
|
|
SingleTweak: []byte{
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02,
|
|
|
|
},
|
|
|
|
WitnessScript: []byte{
|
|
|
|
0x00, 0x14, 0xee, 0x91, 0x41, 0x7e,
|
|
|
|
0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91,
|
|
|
|
0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
|
|
|
|
0xef, 0xb5, 0x71, 0x48,
|
|
|
|
},
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
Value: 5000000000,
|
|
|
|
PkScript: []byte{
|
|
|
|
0x41, // OP_DATA_65
|
|
|
|
0x04, 0xd6, 0x4b, 0xdf, 0xd0,
|
|
|
|
0x9e, 0xb1, 0xc5, 0xfe, 0x29,
|
|
|
|
0x5a, 0xbd, 0xeb, 0x1d, 0xca,
|
|
|
|
0x42, 0x81, 0xbe, 0x98, 0x8e,
|
|
|
|
0x2d, 0xa0, 0xb6, 0xc1, 0xc6,
|
|
|
|
0xa5, 0x9d, 0xc2, 0x26, 0xc2,
|
|
|
|
0x86, 0x24, 0xe1, 0x81, 0x75,
|
|
|
|
0xe8, 0x51, 0xc9, 0x6b, 0x97,
|
|
|
|
0x3d, 0x81, 0xb0, 0x1c, 0xc3,
|
|
|
|
0x1f, 0x04, 0x78, 0x34, 0xbc,
|
|
|
|
0x06, 0xd6, 0xd6, 0xed, 0xf6,
|
|
|
|
0x20, 0xd1, 0x84, 0x24, 0x1a,
|
|
|
|
0x6a, 0xed, 0x8b, 0x63,
|
|
|
|
0xa6, // 65-byte signature
|
|
|
|
0xac, // OP_CHECKSIG
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HashType: txscript.SigHashAll,
|
|
|
|
},
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
secondLevelWitnessScript: breachKeys[0],
|
2017-05-07 14:09:22 +03:00
|
|
|
},
|
|
|
|
{
|
2017-08-22 02:58:21 +03:00
|
|
|
amt: btcutil.Amount(3e4),
|
|
|
|
outpoint: breachOutPoints[2],
|
|
|
|
witnessType: lnwallet.CommitmentDelayOutput,
|
2017-09-21 02:01:28 +03:00
|
|
|
signDesc: lnwallet.SignDescriptor{
|
|
|
|
SingleTweak: []byte{
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02,
|
|
|
|
},
|
|
|
|
WitnessScript: []byte{
|
|
|
|
0x00, 0x14, 0xee, 0x91, 0x41, 0x7e,
|
|
|
|
0x85, 0x6c, 0xde, 0x10, 0xa2, 0x91,
|
|
|
|
0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
|
|
|
|
0xef, 0xb5, 0x71, 0x48,
|
|
|
|
},
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
Value: 5000000000,
|
|
|
|
PkScript: []byte{
|
|
|
|
0x41, // OP_DATA_65
|
|
|
|
0x04, 0xd6, 0x4b, 0xdf, 0xd0,
|
|
|
|
0x9e, 0xb1, 0xc5, 0xfe, 0x29,
|
|
|
|
0x5a, 0xbd, 0xeb, 0x1d, 0xca,
|
|
|
|
0x42, 0x81, 0xbe, 0x98, 0x8e,
|
|
|
|
0x2d, 0xa0, 0xb6, 0xc1, 0xc6,
|
|
|
|
0xa5, 0x9d, 0xc2, 0x26, 0xc2,
|
|
|
|
0x86, 0x24, 0xe1, 0x81, 0x75,
|
|
|
|
0xe8, 0x51, 0xc9, 0x6b, 0x97,
|
|
|
|
0x3d, 0x81, 0xb0, 0x1c, 0xc3,
|
|
|
|
0x1f, 0x04, 0x78, 0x34, 0xbc,
|
|
|
|
0x06, 0xd6, 0xd6, 0xed, 0xf6,
|
|
|
|
0x20, 0xd1, 0x84, 0x24, 0x1a,
|
|
|
|
0x6a, 0xed, 0x8b, 0x63,
|
|
|
|
0xa6, // 65-byte signature
|
|
|
|
0xac, // OP_CHECKSIG
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HashType: txscript.SigHashAll,
|
|
|
|
},
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
secondLevelWitnessScript: breachKeys[0],
|
2017-05-07 14:09:22 +03:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
retributionMap = make(map[wire.OutPoint]retributionInfo)
|
|
|
|
retributions = []retributionInfo{
|
2017-05-07 14:09:22 +03:00
|
|
|
{
|
|
|
|
commitHash: [chainhash.HashSize]byte{
|
|
|
|
0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
|
|
|
|
0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
|
|
|
|
0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
|
|
|
|
0x6a, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
|
|
|
|
},
|
2017-11-11 02:23:28 +03:00
|
|
|
chainHash: [chainhash.HashSize]byte{
|
|
|
|
0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
|
|
|
|
0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
|
|
|
|
0xb7, 0x94, 0x38, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
|
|
|
|
0x6b, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
|
|
|
|
},
|
2017-11-21 10:57:08 +03:00
|
|
|
chanPoint: breachOutPoints[0],
|
|
|
|
breachHeight: 337,
|
2017-09-21 02:01:28 +03:00
|
|
|
// Set to breachedOutputs 0 and 1 in init()
|
|
|
|
breachedOutputs: []breachedOutput{{}, {}},
|
2017-05-07 14:09:22 +03:00
|
|
|
},
|
|
|
|
{
|
|
|
|
commitHash: [chainhash.HashSize]byte{
|
|
|
|
0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0x2d, 0xe7, 0x93, 0xe4, 0xb7, 0x25, 0xb8, 0x4d,
|
|
|
|
0x1f, 0xb, 0x4c, 0xf9, 0x9e, 0xc5, 0x8c, 0xe9,
|
|
|
|
},
|
2017-11-11 02:23:28 +03:00
|
|
|
chainHash: [chainhash.HashSize]byte{
|
|
|
|
0x4f, 0x2f, 0x6f, 0x25, 0x88, 0xa3, 0xef, 0xb9,
|
|
|
|
0xb7, 0x94, 0x39, 0x5f, 0x2d, 0x1e, 0xf7, 0xab,
|
|
|
|
0x6b, 0x49, 0x18, 0x83, 0x31, 0x98, 0x47, 0x53,
|
|
|
|
0x4d, 0x92, 0x73, 0xd1, 0x90, 0x63, 0x81, 0xb4,
|
|
|
|
},
|
2017-11-21 10:57:08 +03:00
|
|
|
chanPoint: breachOutPoints[1],
|
|
|
|
breachHeight: 420420,
|
2017-09-21 02:01:28 +03:00
|
|
|
// Set to breachedOutputs 1 and 2 in init()
|
|
|
|
breachedOutputs: []breachedOutput{{}, {}},
|
2017-05-07 14:09:22 +03:00
|
|
|
},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
func init() {
|
2017-11-21 10:57:08 +03:00
|
|
|
channeldb.UseLogger(btclog.Disabled)
|
|
|
|
lnwallet.UseLogger(btclog.Disabled)
|
|
|
|
brarLog = btclog.Disabled
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
// Ensure that breached outputs are initialized before starting tests.
|
|
|
|
if err := initBreachedOutputs(); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Populate a retribution map to for convenience, to allow lookups by
|
|
|
|
// channel point.
|
|
|
|
for i := range retributions {
|
|
|
|
retInfo := &retributions[i]
|
2017-09-21 02:01:28 +03:00
|
|
|
retInfo.breachedOutputs[0] = breachedOutputs[i]
|
|
|
|
retInfo.breachedOutputs[1] = breachedOutputs[i+1]
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
retributionMap[retInfo.chanPoint] = *retInfo
|
2017-09-21 02:01:28 +03:00
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// FailingRetributionStore wraps a RetributionStore and supports controlled
|
|
|
|
// restarts of the persistent instance. This allows us to test (1) that no
|
|
|
|
// modifications to the entries are made between calls or through side effects,
|
|
|
|
// and (2) that the database is actually being persisted between actions.
|
|
|
|
type FailingRetributionStore interface {
|
|
|
|
RetributionStore
|
|
|
|
|
|
|
|
Restart()
|
|
|
|
}
|
|
|
|
|
|
|
|
// failingRetributionStore is a concrete implementation of a
|
|
|
|
// FailingRetributionStore. It wraps an underlying RetributionStore and is
|
|
|
|
// parameterized entirely by a restart function, which is intended to simulate a
|
|
|
|
// full stop/start of the store.
|
|
|
|
type failingRetributionStore struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
|
|
|
|
rs RetributionStore
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
nextAddErr error
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
restart func() RetributionStore
|
|
|
|
}
|
|
|
|
|
|
|
|
// newFailingRetributionStore creates a new failing retribution store. The given
|
|
|
|
// restart closure should ensure that it is reloading its contents from the
|
|
|
|
// persistent source.
|
|
|
|
func newFailingRetributionStore(
|
|
|
|
restart func() RetributionStore) *failingRetributionStore {
|
|
|
|
|
|
|
|
return &failingRetributionStore{
|
|
|
|
mu: sync.Mutex{},
|
|
|
|
rs: restart(),
|
|
|
|
restart: restart,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
// FailNextAdd instructs the retribution store to return the provided error. If
|
|
|
|
// the error is nil, a generic default will be used.
|
|
|
|
func (frs *failingRetributionStore) FailNextAdd(err error) {
|
|
|
|
if err == nil {
|
|
|
|
err = errors.New("retribution store failed")
|
|
|
|
}
|
|
|
|
|
|
|
|
frs.mu.Lock()
|
|
|
|
frs.nextAddErr = err
|
|
|
|
frs.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
func (frs *failingRetributionStore) Restart() {
|
|
|
|
frs.mu.Lock()
|
|
|
|
frs.rs = frs.restart()
|
|
|
|
frs.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
// Add forwards the call to the underlying retribution store, unless this Add
|
|
|
|
// has been previously instructed to fail.
|
2017-07-28 03:43:38 +03:00
|
|
|
func (frs *failingRetributionStore) Add(retInfo *retributionInfo) error {
|
|
|
|
frs.mu.Lock()
|
|
|
|
defer frs.mu.Unlock()
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
if frs.nextAddErr != nil {
|
|
|
|
err := frs.nextAddErr
|
|
|
|
frs.nextAddErr = nil
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
return frs.rs.Add(retInfo)
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
func (frs *failingRetributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, error) {
|
|
|
|
frs.mu.Lock()
|
|
|
|
defer frs.mu.Unlock()
|
|
|
|
|
|
|
|
return frs.rs.IsBreached(chanPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (frs *failingRetributionStore) Finalize(chanPoint *wire.OutPoint,
|
|
|
|
finalTx *wire.MsgTx) error {
|
|
|
|
|
|
|
|
frs.mu.Lock()
|
|
|
|
defer frs.mu.Unlock()
|
|
|
|
|
|
|
|
return frs.rs.Finalize(chanPoint, finalTx)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (frs *failingRetributionStore) GetFinalizedTxn(
|
|
|
|
chanPoint *wire.OutPoint) (*wire.MsgTx, error) {
|
|
|
|
|
|
|
|
frs.mu.Lock()
|
|
|
|
defer frs.mu.Unlock()
|
|
|
|
|
|
|
|
return frs.rs.GetFinalizedTxn(chanPoint)
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
func (frs *failingRetributionStore) Remove(key *wire.OutPoint) error {
|
|
|
|
frs.mu.Lock()
|
|
|
|
defer frs.mu.Unlock()
|
|
|
|
|
|
|
|
return frs.rs.Remove(key)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (frs *failingRetributionStore) ForAll(cb func(*retributionInfo) error) error {
|
|
|
|
frs.mu.Lock()
|
|
|
|
defer frs.mu.Unlock()
|
|
|
|
|
|
|
|
return frs.rs.ForAll(cb)
|
|
|
|
}
|
|
|
|
|
2017-05-07 14:09:22 +03:00
|
|
|
// Parse the pubkeys in the breached outputs.
|
|
|
|
func initBreachedOutputs() error {
|
2017-07-26 08:57:29 +03:00
|
|
|
for i := range breachedOutputs {
|
2017-05-07 14:09:22 +03:00
|
|
|
bo := &breachedOutputs[i]
|
|
|
|
|
|
|
|
// Parse the sign descriptor's pubkey.
|
|
|
|
pubkey, err := btcec.ParsePubKey(breachKeys[i], btcec.S256())
|
|
|
|
if err != nil {
|
2017-07-28 03:43:38 +03:00
|
|
|
return fmt.Errorf("unable to parse pubkey: %v",
|
|
|
|
breachKeys[i])
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
2018-02-18 02:29:01 +03:00
|
|
|
bo.signDesc.KeyDesc.PubKey = pubkey
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that breachedOutput Encode/Decode works.
|
|
|
|
func TestBreachedOutputSerialization(t *testing.T) {
|
2017-09-21 02:01:28 +03:00
|
|
|
for i := range breachedOutputs {
|
2017-05-07 14:09:22 +03:00
|
|
|
bo := &breachedOutputs[i]
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
if err := bo.Encode(&buf); err != nil {
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("unable to serialize breached output [%v]: %v",
|
|
|
|
i, err)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
desBo := &breachedOutput{}
|
|
|
|
if err := desBo.Decode(&buf); err != nil {
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("unable to deserialize "+
|
|
|
|
"breached output [%v]: %v", i, err)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(bo, desBo) {
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("original and deserialized "+
|
|
|
|
"breached outputs not equal:\n"+
|
2017-05-07 14:09:22 +03:00
|
|
|
"original : %+v\n"+
|
|
|
|
"deserialized : %+v\n",
|
|
|
|
bo, desBo)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that retribution Encode/Decode works.
|
|
|
|
func TestRetributionSerialization(t *testing.T) {
|
2017-09-21 02:01:28 +03:00
|
|
|
for i := range retributions {
|
2017-05-07 14:09:22 +03:00
|
|
|
ret := &retributions[i]
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
|
|
|
|
if err := ret.Encode(&buf); err != nil {
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("unable to serialize retribution [%v]: %v",
|
|
|
|
i, err)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
2017-07-26 06:14:03 +03:00
|
|
|
desRet := &retributionInfo{}
|
2017-05-07 14:09:22 +03:00
|
|
|
if err := desRet.Decode(&buf); err != nil {
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("unable to deserialize retribution [%v]: %v",
|
|
|
|
i, err)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(ret, desRet) {
|
2017-07-28 03:43:38 +03:00
|
|
|
t.Fatalf("original and deserialized "+
|
|
|
|
"retribution infos not equal:\n"+
|
2017-05-07 14:09:22 +03:00
|
|
|
"original : %+v\n"+
|
|
|
|
"deserialized : %+v\n",
|
|
|
|
ret, desRet)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-26 08:57:29 +03:00
|
|
|
// copyRetInfo creates a complete copy of the given retributionInfo.
|
|
|
|
func copyRetInfo(retInfo *retributionInfo) *retributionInfo {
|
2017-09-21 02:01:28 +03:00
|
|
|
nOutputs := len(retInfo.breachedOutputs)
|
2017-07-28 03:43:38 +03:00
|
|
|
|
2017-07-26 08:57:29 +03:00
|
|
|
ret := &retributionInfo{
|
2017-09-21 02:01:28 +03:00
|
|
|
commitHash: retInfo.commitHash,
|
2017-11-11 02:23:28 +03:00
|
|
|
chainHash: retInfo.chainHash,
|
2017-09-21 02:01:28 +03:00
|
|
|
chanPoint: retInfo.chanPoint,
|
2017-11-21 10:57:08 +03:00
|
|
|
breachHeight: retInfo.breachHeight,
|
2017-09-21 02:01:28 +03:00
|
|
|
breachedOutputs: make([]breachedOutput, nOutputs),
|
2017-07-26 08:57:29 +03:00
|
|
|
}
|
|
|
|
|
2017-09-21 02:01:28 +03:00
|
|
|
for i := range retInfo.breachedOutputs {
|
|
|
|
ret.breachedOutputs[i] = retInfo.breachedOutputs[i]
|
2017-07-26 08:57:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
// mockRetributionStore implements the RetributionStore interface and is backed
|
|
|
|
// by an in-memory map. Access to the internal state is provided by a mutex.
|
|
|
|
// TODO(cfromknecht) extend to support and test controlled failures.
|
|
|
|
type mockRetributionStore struct {
|
2017-11-21 10:57:08 +03:00
|
|
|
mu sync.Mutex
|
|
|
|
state map[wire.OutPoint]*retributionInfo
|
|
|
|
finalTxs map[wire.OutPoint]*wire.MsgTx
|
2017-07-26 08:57:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func newMockRetributionStore() *mockRetributionStore {
|
|
|
|
return &mockRetributionStore{
|
2017-11-21 10:57:08 +03:00
|
|
|
mu: sync.Mutex{},
|
|
|
|
state: make(map[wire.OutPoint]*retributionInfo),
|
|
|
|
finalTxs: make(map[wire.OutPoint]*wire.MsgTx),
|
2017-07-26 08:57:29 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rs *mockRetributionStore) Add(retInfo *retributionInfo) error {
|
|
|
|
rs.mu.Lock()
|
|
|
|
rs.state[retInfo.chanPoint] = copyRetInfo(retInfo)
|
|
|
|
rs.mu.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
func (rs *mockRetributionStore) IsBreached(chanPoint *wire.OutPoint) (bool, error) {
|
|
|
|
rs.mu.Lock()
|
|
|
|
_, ok := rs.state[*chanPoint]
|
|
|
|
rs.mu.Unlock()
|
|
|
|
|
|
|
|
return ok, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rs *mockRetributionStore) Finalize(chanPoint *wire.OutPoint,
|
|
|
|
finalTx *wire.MsgTx) error {
|
|
|
|
|
|
|
|
rs.mu.Lock()
|
|
|
|
rs.finalTxs[*chanPoint] = finalTx
|
|
|
|
rs.mu.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rs *mockRetributionStore) GetFinalizedTxn(
|
|
|
|
chanPoint *wire.OutPoint) (*wire.MsgTx, error) {
|
|
|
|
|
|
|
|
rs.mu.Lock()
|
|
|
|
finalTx := rs.finalTxs[*chanPoint]
|
|
|
|
rs.mu.Unlock()
|
|
|
|
|
|
|
|
return finalTx, nil
|
|
|
|
}
|
|
|
|
|
2017-07-26 08:57:29 +03:00
|
|
|
func (rs *mockRetributionStore) Remove(key *wire.OutPoint) error {
|
|
|
|
rs.mu.Lock()
|
|
|
|
delete(rs.state, *key)
|
2017-11-21 10:57:08 +03:00
|
|
|
delete(rs.finalTxs, *key)
|
2017-07-26 08:57:29 +03:00
|
|
|
rs.mu.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (rs *mockRetributionStore) ForAll(cb func(*retributionInfo) error) error {
|
|
|
|
rs.mu.Lock()
|
|
|
|
defer rs.mu.Unlock()
|
2017-05-07 14:09:22 +03:00
|
|
|
|
2017-07-26 08:57:29 +03:00
|
|
|
for _, retInfo := range rs.state {
|
|
|
|
if err := cb(copyRetInfo(retInfo)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
var retributionStoreTestSuite = []struct {
|
|
|
|
name string
|
|
|
|
test func(FailingRetributionStore, *testing.T)
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
"Initialization",
|
|
|
|
testRetributionStoreInit,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"Add/Remove",
|
|
|
|
testRetributionStoreAddRemove,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"Persistence",
|
|
|
|
testRetributionStorePersistence,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"Overwrite",
|
|
|
|
testRetributionStoreOverwrite,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"RemoveEmpty",
|
|
|
|
testRetributionStoreRemoveEmpty,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2017-07-26 08:57:29 +03:00
|
|
|
// TestMockRetributionStore instantiates a mockRetributionStore and tests its
|
|
|
|
// behavior using the general RetributionStore test suite.
|
|
|
|
func TestMockRetributionStore(t *testing.T) {
|
2017-07-28 03:43:38 +03:00
|
|
|
for _, test := range retributionStoreTestSuite {
|
|
|
|
t.Run(
|
|
|
|
"mockRetributionStore."+test.name,
|
|
|
|
func(tt *testing.T) {
|
|
|
|
mrs := newMockRetributionStore()
|
|
|
|
frs := newFailingRetributionStore(
|
|
|
|
func() RetributionStore { return mrs },
|
|
|
|
)
|
|
|
|
test.test(frs, tt)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
2017-07-26 08:57:29 +03:00
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
func makeTestChannelDB() (*channeldb.DB, func(), error) {
|
2017-05-07 14:09:22 +03:00
|
|
|
// First, create a temporary directory to be used for the duration of
|
|
|
|
// this test.
|
|
|
|
tempDirName, err := ioutil.TempDir("", "channeldb")
|
|
|
|
if err != nil {
|
2017-11-21 10:57:08 +03:00
|
|
|
return nil, nil, err
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
cleanUp := func() {
|
|
|
|
os.RemoveAll(tempDirName)
|
|
|
|
}
|
2017-07-28 03:43:38 +03:00
|
|
|
|
2017-07-26 08:57:29 +03:00
|
|
|
db, err := channeldb.Open(tempDirName)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
cleanUp()
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return db, cleanUp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestChannelDBRetributionStore instantiates a retributionStore backed by a
|
|
|
|
// channeldb.DB, and tests its behavior using the general RetributionStore test
|
|
|
|
// suite.
|
|
|
|
func TestChannelDBRetributionStore(t *testing.T) {
|
|
|
|
db, cleanUp, err := makeTestChannelDB()
|
2017-05-07 14:09:22 +03:00
|
|
|
if err != nil {
|
2017-07-26 08:57:29 +03:00
|
|
|
t.Fatalf("unable to open channeldb: %v", err)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
2017-07-26 08:57:29 +03:00
|
|
|
defer db.Close()
|
2017-11-21 10:57:08 +03:00
|
|
|
defer cleanUp()
|
2017-05-07 14:09:22 +03:00
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
restartDb := func() RetributionStore {
|
|
|
|
// Close and reopen channeldb
|
|
|
|
if err = db.Close(); err != nil {
|
2018-02-07 06:11:11 +03:00
|
|
|
t.Fatalf("unable to close channeldb during restart: %v",
|
2017-07-28 03:43:38 +03:00
|
|
|
err)
|
|
|
|
}
|
2017-11-21 10:57:08 +03:00
|
|
|
db, err = channeldb.Open(db.Path())
|
2017-07-28 03:43:38 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to open channeldb: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return newRetributionStore(db)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, instantiate retribution store and execute RetributionStore
|
|
|
|
// test suite.
|
|
|
|
for _, test := range retributionStoreTestSuite {
|
|
|
|
t.Run(
|
|
|
|
"channeldbDBRetributionStore."+test.name,
|
|
|
|
func(tt *testing.T) {
|
|
|
|
if err = db.Wipe(); err != nil {
|
|
|
|
t.Fatalf("unable to wipe channeldb: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
|
|
|
|
frs := newFailingRetributionStore(restartDb)
|
|
|
|
test.test(frs, tt)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
// countRetributions uses a retribution store's ForAll to count the number of
|
|
|
|
// elements emitted from the store.
|
2017-07-26 08:57:29 +03:00
|
|
|
func countRetributions(t *testing.T, rs RetributionStore) int {
|
2017-05-07 14:09:22 +03:00
|
|
|
count := 0
|
2017-07-26 06:14:03 +03:00
|
|
|
err := rs.ForAll(func(_ *retributionInfo) error {
|
2017-05-08 00:58:53 +03:00
|
|
|
count++
|
2017-05-07 14:09:22 +03:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to list retributions in db: %v", err)
|
|
|
|
}
|
|
|
|
return count
|
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreAddRemove executes a generic test suite for any concrete
|
|
|
|
// implementation of the RetributionStore interface. This test adds all
|
|
|
|
// retributions to the store, confirms that they are all present, and then
|
|
|
|
// removes each one individually. Between each addition or removal, the number
|
|
|
|
// of elements in the store is checked to ensure that it only changes by one.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreAddRemove(frs FailingRetributionStore, t *testing.T) {
|
2018-02-07 06:11:11 +03:00
|
|
|
// Make sure that a new retribution store is actually empty.
|
2017-07-28 03:43:38 +03:00
|
|
|
if count := countRetributions(t, frs); count != 0 {
|
2017-05-07 14:09:22 +03:00
|
|
|
t.Fatalf("expected 0 retributions, found %v", count)
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
// Add all retributions, check that ForAll returns the correct
|
|
|
|
// information, and then remove all retributions.
|
|
|
|
testRetributionStoreAdds(frs, t, false)
|
|
|
|
testRetributionStoreForAll(frs, t, false)
|
|
|
|
testRetributionStoreRemoves(frs, t, false)
|
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStorePersistence executes the same general test as
|
|
|
|
// testRetributionStoreAddRemove, except that it also restarts the store between
|
|
|
|
// each operation to ensure that the results are properly persisted.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStorePersistence(frs FailingRetributionStore, t *testing.T) {
|
2018-02-07 06:11:11 +03:00
|
|
|
// Make sure that a new retribution store is still empty after failing
|
2017-07-28 03:43:38 +03:00
|
|
|
// right off the bat.
|
|
|
|
frs.Restart()
|
|
|
|
if count := countRetributions(t, frs); count != 0 {
|
2017-07-26 08:57:29 +03:00
|
|
|
t.Fatalf("expected 1 retributions, found %v", count)
|
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
// Insert all retributions into the database, restarting and checking
|
|
|
|
// between subsequent calls to test that each intermediate additions are
|
|
|
|
// persisted.
|
|
|
|
testRetributionStoreAdds(frs, t, true)
|
|
|
|
|
|
|
|
// After all retributions have been inserted, verify that the store
|
|
|
|
// emits a distinct set of retributions that are equivalent to the test
|
|
|
|
// vector.
|
|
|
|
testRetributionStoreForAll(frs, t, true)
|
|
|
|
|
|
|
|
// Remove all retributions from the database, restarting and checking
|
|
|
|
// between subsequent calls to test that each intermediate removals are
|
|
|
|
// persisted.
|
|
|
|
testRetributionStoreRemoves(frs, t, true)
|
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreInit ensures that a retribution store is always
|
|
|
|
// initialized with no retributions.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreInit(frs FailingRetributionStore, t *testing.T) {
|
|
|
|
// Make sure that a new retribution store starts empty.
|
|
|
|
if count := countRetributions(t, frs); count != 0 {
|
|
|
|
t.Fatalf("expected 0 retributions, found %v", count)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
2017-07-28 03:43:38 +03:00
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreRemoveEmpty ensures that a retribution store will not
|
|
|
|
// fail or panic if it is instructed to remove an entry while empty.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreRemoveEmpty(frs FailingRetributionStore, t *testing.T) {
|
|
|
|
testRetributionStoreRemoves(frs, t, false)
|
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreOverwrite ensures that attempts to write retribution
|
|
|
|
// information regarding a channel point that already exists does not change the
|
|
|
|
// total number of entries held by the retribution store.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreOverwrite(frs FailingRetributionStore, t *testing.T) {
|
|
|
|
// Initially, add all retributions to store.
|
|
|
|
testRetributionStoreAdds(frs, t, false)
|
|
|
|
|
|
|
|
// Overwrite the initial entries again.
|
|
|
|
for i, retInfo := range retributions {
|
|
|
|
if err := frs.Add(&retInfo); err != nil {
|
|
|
|
t.Fatalf("unable to add to retribution %v to store: %v",
|
|
|
|
i, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that retribution store still has 2 entries.
|
|
|
|
if count := countRetributions(t, frs); count != 2 {
|
2017-05-07 14:09:22 +03:00
|
|
|
t.Fatalf("expected 2 retributions, found %v", count)
|
|
|
|
}
|
2017-07-28 03:43:38 +03:00
|
|
|
}
|
2017-05-07 14:09:22 +03:00
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreAdds adds all of the test retributions to the database,
|
|
|
|
// ensuring that the total number of elements increases by exactly 1 after each
|
|
|
|
// operation. If the `failing` flag is provide, the test will restart the
|
|
|
|
// database and confirm that the delta is still 1.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreAdds(
|
|
|
|
frs FailingRetributionStore,
|
|
|
|
t *testing.T,
|
|
|
|
failing bool) {
|
|
|
|
|
|
|
|
// Iterate over retributions, adding each from the store. If we are
|
|
|
|
// testing the store under failures, we restart the store and verify
|
|
|
|
// that the contents are the same.
|
|
|
|
for i, retInfo := range retributions {
|
2017-12-18 05:40:05 +03:00
|
|
|
// Snapshot number of entries before and after the addition.
|
2017-07-28 03:43:38 +03:00
|
|
|
nbefore := countRetributions(t, frs)
|
|
|
|
if err := frs.Add(&retInfo); err != nil {
|
|
|
|
t.Fatalf("unable to add to retribution %v to store: %v",
|
|
|
|
i, err)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
2017-07-28 03:43:38 +03:00
|
|
|
nafter := countRetributions(t, frs)
|
2017-05-07 14:09:22 +03:00
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
// Check that only one retribution was added.
|
|
|
|
if nafter-nbefore != 1 {
|
|
|
|
t.Fatalf("expected %v retributions, found %v",
|
|
|
|
nbefore+1, nafter)
|
|
|
|
}
|
|
|
|
|
|
|
|
if failing {
|
|
|
|
frs.Restart()
|
|
|
|
|
|
|
|
// Check that retribution store has persisted addition
|
|
|
|
// after restarting.
|
|
|
|
nrestart := countRetributions(t, frs)
|
|
|
|
if nrestart-nbefore != 1 {
|
|
|
|
t.Fatalf("expected %v retributions, found %v",
|
|
|
|
nbefore+1, nrestart)
|
|
|
|
}
|
|
|
|
}
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
2017-07-28 03:43:38 +03:00
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreRemoves removes all of the test retributions to the
|
|
|
|
// database, ensuring that the total number of elements decreases by exactly 1
|
|
|
|
// after each operation. If the `failing` flag is provide, the test will
|
|
|
|
// restart the database and confirm that the delta is the same.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreRemoves(
|
|
|
|
frs FailingRetributionStore,
|
|
|
|
t *testing.T,
|
|
|
|
failing bool) {
|
|
|
|
|
|
|
|
// Iterate over retributions, removing each from the store. If we are
|
|
|
|
// testing the store under failures, we restart the store and verify
|
|
|
|
// that the contents are the same.
|
|
|
|
for i, retInfo := range retributions {
|
2017-12-18 05:40:05 +03:00
|
|
|
// Snapshot number of entries before and after the removal.
|
2017-07-28 03:43:38 +03:00
|
|
|
nbefore := countRetributions(t, frs)
|
|
|
|
if err := frs.Remove(&retInfo.chanPoint); err != nil {
|
|
|
|
t.Fatalf("unable to remove to retribution %v "+
|
|
|
|
"from store: %v", i, err)
|
|
|
|
}
|
|
|
|
nafter := countRetributions(t, frs)
|
|
|
|
|
|
|
|
// If the store is empty, increment nbefore to simulate the
|
|
|
|
// removal of one element.
|
|
|
|
if nbefore == 0 {
|
|
|
|
nbefore++
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check that only one retribution was removed.
|
|
|
|
if nbefore-nafter != 1 {
|
|
|
|
t.Fatalf("expected %v retributions, found %v",
|
|
|
|
nbefore-1, nafter)
|
|
|
|
}
|
|
|
|
|
|
|
|
if failing {
|
|
|
|
frs.Restart()
|
|
|
|
|
|
|
|
// Check that retribution store has persisted removal
|
|
|
|
// after restarting.
|
|
|
|
nrestart := countRetributions(t, frs)
|
|
|
|
if nbefore-nrestart != 1 {
|
|
|
|
t.Fatalf("expected %v retributions, found %v",
|
|
|
|
nbefore-1, nrestart)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-08-15 01:45:32 +03:00
|
|
|
// testRetributionStoreForAll iterates over the current entries in the
|
|
|
|
// retribution store, ensuring that each entry in the database is unique, and
|
|
|
|
// corresponds to exactly one of the entries in the test vector. If the
|
|
|
|
// `failing` flag is provide, the test will restart the database and confirm
|
|
|
|
// that the entries again validate against the test vectors.
|
2017-07-28 03:43:38 +03:00
|
|
|
func testRetributionStoreForAll(
|
|
|
|
frs FailingRetributionStore,
|
|
|
|
t *testing.T,
|
|
|
|
failing bool) {
|
|
|
|
|
|
|
|
// nrets is the number of retributions in the test vector
|
|
|
|
nrets := len(retributions)
|
|
|
|
|
|
|
|
// isRestart indicates whether or not the database has been restarted.
|
|
|
|
// When testing for failures, this allows the test case to make a second
|
|
|
|
// attempt without causing a subsequent restart on the second pass.
|
|
|
|
var isRestart bool
|
|
|
|
|
|
|
|
restartCheck:
|
2018-02-07 06:11:11 +03:00
|
|
|
// Construct a set of all channel points presented by the store. Entries
|
2017-07-28 03:43:38 +03:00
|
|
|
// are only be added to the set if their corresponding retribution
|
2018-02-07 06:11:11 +03:00
|
|
|
// information matches the test vector.
|
2017-07-28 03:43:38 +03:00
|
|
|
var foundSet = make(map[wire.OutPoint]struct{})
|
|
|
|
|
|
|
|
// Iterate through the stored retributions, checking to see if we have
|
|
|
|
// an equivalent retribution in the test vector. This will return an
|
|
|
|
// error unless all persisted retributions exist in the test vector.
|
|
|
|
if err := frs.ForAll(func(ret *retributionInfo) error {
|
|
|
|
// Fetch the retribution information from the test vector. If
|
|
|
|
// the entry does not exist, the test returns an error.
|
|
|
|
if exRetInfo, ok := retributionMap[ret.chanPoint]; ok {
|
|
|
|
// Compare the presented retribution information with
|
|
|
|
// the expected value, fail if they are inconsistent.
|
|
|
|
if !reflect.DeepEqual(ret, &exRetInfo) {
|
|
|
|
return fmt.Errorf("unexpected retribution "+
|
|
|
|
"retrieved from db --\n"+
|
|
|
|
"want: %#v\ngot: %#v", exRetInfo, ret,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retribution information from database matches the
|
|
|
|
// test vector, record the channel point in the found
|
|
|
|
// map.
|
|
|
|
foundSet[ret.chanPoint] = struct{}{}
|
|
|
|
|
|
|
|
} else {
|
2018-02-07 06:11:11 +03:00
|
|
|
return fmt.Errorf("unknown retribution retrieved "+
|
2017-08-22 02:58:21 +03:00
|
|
|
"from db: %v", ret)
|
2017-07-28 03:43:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}); err != nil {
|
|
|
|
t.Fatalf("failed to iterate over persistent retributions: %v",
|
|
|
|
err)
|
2017-07-26 08:57:29 +03:00
|
|
|
}
|
|
|
|
|
2017-12-18 05:40:05 +03:00
|
|
|
// Check that retribution store emits nrets entries
|
2017-07-28 03:43:38 +03:00
|
|
|
if count := countRetributions(t, frs); count != nrets {
|
|
|
|
t.Fatalf("expected %v retributions, found %v", nrets, count)
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
|
2017-07-28 03:43:38 +03:00
|
|
|
// Confirm that all of the retributions emitted from the iteration
|
|
|
|
// correspond to unique channel points.
|
|
|
|
nunique := len(foundSet)
|
|
|
|
if nunique != nrets {
|
|
|
|
t.Fatalf("expected %v unique retributions, only found %v",
|
|
|
|
nrets, nunique)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If in failure mode on only on first pass, restart the database and
|
|
|
|
// rexecute the test.
|
|
|
|
if failing && !isRestart {
|
|
|
|
frs.Restart()
|
|
|
|
isRestart = true
|
|
|
|
|
|
|
|
goto restartCheck
|
2017-05-07 14:09:22 +03:00
|
|
|
}
|
|
|
|
}
|
2017-11-21 10:57:08 +03:00
|
|
|
|
2018-05-31 13:41:51 +03:00
|
|
|
func initBreachedState(t *testing.T) (*breachArbiter,
|
|
|
|
*lnwallet.LightningChannel, *lnwallet.LightningChannel,
|
|
|
|
*lnwallet.LocalForceCloseSummary, chan *ContractBreachEvent,
|
|
|
|
func(), func()) {
|
2017-11-21 10:57:08 +03:00
|
|
|
// Create a pair of channels using a notifier that allows us to signal
|
|
|
|
// a spend of the funding transaction. Alice's channel will be the on
|
|
|
|
// observing a breach.
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
alice, bob, cleanUpChans, err := createInitChannels(1)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test channels: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Instantiate a breach arbiter to handle the breach of alice's channel.
|
2018-04-19 12:09:25 +03:00
|
|
|
contractBreaches := make(chan *ContractBreachEvent)
|
|
|
|
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
brar, cleanUpArb, err := createTestArbiter(
|
2018-04-19 12:09:25 +03:00
|
|
|
t, contractBreaches, alice.State().Db,
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to initialize test breach arbiter: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send one HTLC to Bob and perform a state transition to lock it in.
|
|
|
|
htlcAmount := lnwire.NewMSatFromSatoshis(20000)
|
|
|
|
htlc, _ := createHTLC(0, htlcAmount)
|
2018-02-24 09:42:16 +03:00
|
|
|
if _, err := alice.AddHTLC(htlc, nil); err != nil {
|
2017-11-21 10:57:08 +03:00
|
|
|
t.Fatalf("alice unable to add htlc: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := bob.ReceiveHTLC(htlc); err != nil {
|
|
|
|
t.Fatalf("bob unable to recv add htlc: %v", err)
|
|
|
|
}
|
|
|
|
if err := forceStateTransition(alice, bob); err != nil {
|
|
|
|
t.Fatalf("Can't update the channel state: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Generate the force close summary at this point in time, this will
|
|
|
|
// serve as the old state bob will broadcast.
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
bobClose, err := bob.ForceClose()
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to force close bob's channel: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now send another HTLC and perform a state transition, this ensures
|
|
|
|
// Alice is ahead of the state Bob will broadcast.
|
|
|
|
htlc2, _ := createHTLC(1, htlcAmount)
|
2018-02-24 09:42:16 +03:00
|
|
|
if _, err := alice.AddHTLC(htlc2, nil); err != nil {
|
2017-11-21 10:57:08 +03:00
|
|
|
t.Fatalf("alice unable to add htlc: %v", err)
|
|
|
|
}
|
|
|
|
if _, err := bob.ReceiveHTLC(htlc2); err != nil {
|
|
|
|
t.Fatalf("bob unable to recv add htlc: %v", err)
|
|
|
|
}
|
|
|
|
if err := forceStateTransition(alice, bob); err != nil {
|
|
|
|
t.Fatalf("Can't update the channel state: %v", err)
|
|
|
|
}
|
|
|
|
|
2018-05-31 13:41:51 +03:00
|
|
|
return brar, alice, bob, bobClose, contractBreaches, cleanUpChans,
|
|
|
|
cleanUpArb
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestBreachHandoffSuccess tests that a channel's close observer properly
|
|
|
|
// delivers retribution information to the breach arbiter in response to a
|
|
|
|
// breach close. This test verifies correctness in the event that the handoff
|
|
|
|
// experiences no interruptions.
|
|
|
|
func TestBreachHandoffSuccess(t *testing.T) {
|
|
|
|
brar, alice, _, bobClose, contractBreaches,
|
|
|
|
cleanUpChans, cleanUpArb := initBreachedState(t)
|
|
|
|
defer cleanUpChans()
|
|
|
|
defer cleanUpArb()
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
chanPoint := alice.ChanPoint
|
|
|
|
|
|
|
|
// Signal a spend of the funding transaction and wait for the close
|
|
|
|
// observer to exit.
|
2018-04-19 12:09:25 +03:00
|
|
|
breach := &ContractBreachEvent{
|
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ProcessACK: make(chan error, 1),
|
|
|
|
BreachRetribution: &lnwallet.BreachRetribution{
|
|
|
|
BreachTransaction: bobClose.CloseTx,
|
2018-05-31 08:16:57 +03:00
|
|
|
LocalOutputSignDesc: &lnwallet.SignDescriptor{
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
PkScript: breachKeys[0],
|
|
|
|
},
|
|
|
|
},
|
2018-04-19 12:09:25 +03:00
|
|
|
},
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
}
|
2018-04-19 12:09:25 +03:00
|
|
|
contractBreaches <- breach
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
|
|
|
|
// We'll also wait to consume the ACK back from the breach arbiter.
|
|
|
|
select {
|
2018-04-19 12:09:25 +03:00
|
|
|
case err := <-breach.ProcessACK:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("handoff failed: %v", err)
|
|
|
|
}
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("breach arbiter didn't send ack back")
|
|
|
|
}
|
2017-11-21 10:57:08 +03:00
|
|
|
|
|
|
|
// After exiting, the breach arbiter should have persisted the
|
|
|
|
// retribution information and the channel should be shown as pending
|
|
|
|
// force closed.
|
|
|
|
assertArbiterBreach(t, brar, chanPoint)
|
2018-04-26 15:19:57 +03:00
|
|
|
|
|
|
|
// Send another breach event. Since the handoff for this channel was
|
|
|
|
// already ACKed, the breach arbiter should immediately ACK and ignore
|
|
|
|
// this event.
|
|
|
|
breach = &ContractBreachEvent{
|
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ProcessACK: make(chan error, 1),
|
|
|
|
BreachRetribution: &lnwallet.BreachRetribution{
|
|
|
|
BreachTransaction: bobClose.CloseTx,
|
2018-05-31 08:16:57 +03:00
|
|
|
LocalOutputSignDesc: &lnwallet.SignDescriptor{
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
PkScript: breachKeys[0],
|
|
|
|
},
|
|
|
|
},
|
2018-04-26 15:19:57 +03:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
contractBreaches <- breach
|
|
|
|
|
|
|
|
// We'll also wait to consume the ACK back from the breach arbiter.
|
|
|
|
select {
|
|
|
|
case err := <-breach.ProcessACK:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("handoff failed: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("breach arbiter didn't send ack back")
|
|
|
|
}
|
|
|
|
|
|
|
|
// State should not have changed.
|
|
|
|
assertArbiterBreach(t, brar, chanPoint)
|
2017-11-21 10:57:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestBreachHandoffFail tests that a channel's close observer properly
|
|
|
|
// delivers retribution information to the breach arbiter in response to a
|
|
|
|
// breach close. This test verifies correctness in the event that the breach
|
|
|
|
// arbiter fails to write the information to disk, and that a subsequent attempt
|
|
|
|
// at the handoff succeeds.
|
|
|
|
func TestBreachHandoffFail(t *testing.T) {
|
2018-05-31 13:41:51 +03:00
|
|
|
brar, alice, _, bobClose, contractBreaches,
|
|
|
|
cleanUpChans, cleanUpArb := initBreachedState(t)
|
2017-11-21 10:57:08 +03:00
|
|
|
defer cleanUpChans()
|
|
|
|
defer cleanUpArb()
|
|
|
|
|
|
|
|
// Before alerting Alice of the breach, instruct our failing retribution
|
|
|
|
// store to fail the next database operation, which we expect to write
|
|
|
|
// the information handed off by the channel's close observer.
|
|
|
|
fstore := brar.cfg.Store.(*failingRetributionStore)
|
|
|
|
fstore.FailNextAdd(nil)
|
|
|
|
|
|
|
|
// Signal the notifier to dispatch spend notifications of the funding
|
|
|
|
// transaction using the transaction from bob's closing summary.
|
|
|
|
chanPoint := alice.ChanPoint
|
2018-04-19 12:09:25 +03:00
|
|
|
breach := &ContractBreachEvent{
|
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ProcessACK: make(chan error, 1),
|
|
|
|
BreachRetribution: &lnwallet.BreachRetribution{
|
|
|
|
BreachTransaction: bobClose.CloseTx,
|
2018-05-31 08:16:57 +03:00
|
|
|
LocalOutputSignDesc: &lnwallet.SignDescriptor{
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
PkScript: breachKeys[0],
|
|
|
|
},
|
|
|
|
},
|
2018-04-19 12:09:25 +03:00
|
|
|
},
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
}
|
2018-04-19 12:09:25 +03:00
|
|
|
contractBreaches <- breach
|
|
|
|
|
|
|
|
// We'll also wait to consume the ACK back from the breach arbiter.
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
select {
|
2018-04-19 12:09:25 +03:00
|
|
|
case err := <-breach.ProcessACK:
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("breach write should have failed")
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("breach arbiter didn't send ack back")
|
|
|
|
}
|
2017-11-21 10:57:08 +03:00
|
|
|
|
|
|
|
// Since the handoff failed, the breach arbiter should not show the
|
|
|
|
// channel as breached, and the channel should also not have been marked
|
|
|
|
// pending closed.
|
|
|
|
assertNoArbiterBreach(t, brar, chanPoint)
|
|
|
|
assertNotPendingClosed(t, alice)
|
|
|
|
|
2018-05-31 13:41:51 +03:00
|
|
|
brar, cleanUpArb, err := createTestArbiter(
|
2018-04-19 12:09:25 +03:00
|
|
|
t, contractBreaches, alice.State().Db,
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to initialize test breach arbiter: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUpArb()
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
// Instantiate a second lightning channel for alice, using the state of
|
|
|
|
// her last channel.
|
|
|
|
aliceKeyPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(),
|
|
|
|
alicesPrivKey)
|
|
|
|
aliceSigner := &mockSigner{aliceKeyPriv}
|
|
|
|
|
2018-01-20 04:25:06 +03:00
|
|
|
alice2, err := lnwallet.NewLightningChannel(aliceSigner, nil, alice.State())
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test channels: %v", err)
|
|
|
|
}
|
|
|
|
defer alice2.Stop()
|
|
|
|
|
|
|
|
// Signal a spend of the funding transaction and wait for the close
|
|
|
|
// observer to exit. This time we are allowing the handoff to succeed.
|
2018-04-19 12:09:25 +03:00
|
|
|
breach = &ContractBreachEvent{
|
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ProcessACK: make(chan error, 1),
|
|
|
|
BreachRetribution: &lnwallet.BreachRetribution{
|
|
|
|
BreachTransaction: bobClose.CloseTx,
|
2018-05-31 08:16:57 +03:00
|
|
|
LocalOutputSignDesc: &lnwallet.SignDescriptor{
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
PkScript: breachKeys[0],
|
|
|
|
},
|
|
|
|
},
|
2018-04-19 12:09:25 +03:00
|
|
|
},
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
}
|
2018-04-19 12:09:25 +03:00
|
|
|
|
|
|
|
contractBreaches <- breach
|
|
|
|
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
select {
|
2018-04-19 12:09:25 +03:00
|
|
|
case err := <-breach.ProcessACK:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("handoff failed: %v", err)
|
|
|
|
}
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("breach arbiter didn't send ack back")
|
|
|
|
}
|
2017-11-21 10:57:08 +03:00
|
|
|
|
|
|
|
// Check that the breach was properly recorded in the breach arbiter,
|
|
|
|
// and that the close observer marked the channel as pending closed
|
|
|
|
// before exiting.
|
|
|
|
assertArbiterBreach(t, brar, chanPoint)
|
|
|
|
}
|
|
|
|
|
2018-05-31 13:52:02 +03:00
|
|
|
// TestBreachSecondLevelTransfer tests that sweep of a HTLC output on a
|
|
|
|
// breached commitment is transferred to a second level spend if the output is
|
|
|
|
// already spent.
|
|
|
|
func TestBreachSecondLevelTransfer(t *testing.T) {
|
|
|
|
brar, alice, _, bobClose, contractBreaches,
|
|
|
|
cleanUpChans, cleanUpArb := initBreachedState(t)
|
|
|
|
defer cleanUpChans()
|
|
|
|
defer cleanUpArb()
|
|
|
|
|
|
|
|
var (
|
|
|
|
height = bobClose.ChanSnapshot.CommitHeight
|
|
|
|
forceCloseTx = bobClose.CloseTx
|
|
|
|
chanPoint = alice.ChanPoint
|
|
|
|
publTx = make(chan *wire.MsgTx)
|
|
|
|
publErr error
|
|
|
|
)
|
|
|
|
|
|
|
|
// Make PublishTransaction always return ErrDoubleSpend to begin with.
|
|
|
|
publErr = lnwallet.ErrDoubleSpend
|
|
|
|
brar.cfg.PublishTransaction = func(tx *wire.MsgTx) error {
|
|
|
|
publTx <- tx
|
|
|
|
return publErr
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify the breach arbiter about the breach.
|
|
|
|
retribution, err := lnwallet.NewBreachRetribution(
|
|
|
|
alice.State(), height, forceCloseTx, 1)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create breach retribution: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
breach := &ContractBreachEvent{
|
|
|
|
ChanPoint: *chanPoint,
|
|
|
|
ProcessACK: make(chan error, 1),
|
|
|
|
BreachRetribution: retribution,
|
|
|
|
}
|
|
|
|
contractBreaches <- breach
|
|
|
|
|
|
|
|
// We'll also wait to consume the ACK back from the breach arbiter.
|
|
|
|
select {
|
|
|
|
case err := <-breach.ProcessACK:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("handoff failed: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second * 15):
|
|
|
|
t.Fatalf("breach arbiter didn't send ack back")
|
|
|
|
}
|
|
|
|
|
|
|
|
// After exiting, the breach arbiter should have persisted the
|
|
|
|
// retribution information and the channel should be shown as pending
|
|
|
|
// force closed.
|
|
|
|
assertArbiterBreach(t, brar, chanPoint)
|
|
|
|
|
|
|
|
// Notify that the breaching transaction is confirmed, to trigger the
|
|
|
|
// retribution logic.
|
|
|
|
notifier := brar.cfg.Notifier.(*mockSpendNotifier)
|
|
|
|
notifier.confChannel <- &chainntnfs.TxConfirmation{}
|
|
|
|
|
|
|
|
// The breach arbiter should attempt to sweep all outputs on the
|
|
|
|
// breached commitment. We'll pretend that the HTLC output has been
|
|
|
|
// spent by the channel counter party's second level tx already.
|
|
|
|
var tx *wire.MsgTx
|
|
|
|
select {
|
|
|
|
case tx = <-publTx:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("tx was not published")
|
|
|
|
}
|
|
|
|
|
|
|
|
if tx.TxIn[0].PreviousOutPoint.Hash != forceCloseTx.TxHash() {
|
|
|
|
t.Fatalf("tx not attempting to spend commitment")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Find the index of the TxIn spending the HTLC output.
|
|
|
|
htlcOutpoint := &retribution.HtlcRetributions[0].OutPoint
|
|
|
|
htlcIn := -1
|
|
|
|
for i, txIn := range tx.TxIn {
|
|
|
|
if txIn.PreviousOutPoint == *htlcOutpoint {
|
|
|
|
htlcIn = i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if htlcIn == -1 {
|
|
|
|
t.Fatalf("htlc in not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Since publishing the transaction failed above, the breach arbiter
|
|
|
|
// will attempt another second level check. Now notify that the htlc
|
|
|
|
// output is spent by a second level tx.
|
|
|
|
secondLvlTx := &wire.MsgTx{
|
|
|
|
TxOut: []*wire.TxOut{
|
2018-07-31 11:29:12 +03:00
|
|
|
{Value: 1},
|
2018-05-31 13:52:02 +03:00
|
|
|
},
|
|
|
|
}
|
|
|
|
notifier.Spend(htlcOutpoint, 2, secondLvlTx)
|
|
|
|
|
|
|
|
// Now a transaction attempting to spend from the second level tx
|
|
|
|
// should be published instead. Let this publish succeed by setting the
|
|
|
|
// publishing error to nil.
|
|
|
|
publErr = nil
|
|
|
|
select {
|
|
|
|
case tx = <-publTx:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("tx was not published")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The TxIn previously attempting to spend the HTLC outpoint should now
|
|
|
|
// be spending from the second level tx.
|
|
|
|
if tx.TxIn[htlcIn].PreviousOutPoint.Hash != secondLvlTx.TxHash() {
|
|
|
|
t.Fatalf("tx not attempting to spend second level tx, %v", tx.TxIn[0])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
// assertArbiterBreach checks that the breach arbiter has persisted the breach
|
|
|
|
// information for a particular channel.
|
|
|
|
func assertArbiterBreach(t *testing.T, brar *breachArbiter,
|
|
|
|
chanPoint *wire.OutPoint) {
|
|
|
|
|
|
|
|
isBreached, err := brar.IsBreached(chanPoint)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to determine if channel is "+
|
|
|
|
"breached: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !isBreached {
|
|
|
|
t.Fatalf("channel %v was never marked breached",
|
|
|
|
chanPoint)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// assertNoArbiterBreach checks that the breach arbiter has not persisted the
|
|
|
|
// breach information for a particular channel.
|
|
|
|
func assertNoArbiterBreach(t *testing.T, brar *breachArbiter,
|
|
|
|
chanPoint *wire.OutPoint) {
|
|
|
|
|
|
|
|
isBreached, err := brar.IsBreached(chanPoint)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to determine if channel is "+
|
|
|
|
"breached: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if isBreached {
|
|
|
|
t.Fatalf("channel %v was marked breached",
|
|
|
|
chanPoint)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// assertNotPendingClosed checks that the channel has not been marked pending
|
|
|
|
// closed in the channel database.
|
|
|
|
func assertNotPendingClosed(t *testing.T, c *lnwallet.LightningChannel) {
|
|
|
|
closedChans, err := c.State().Db.FetchClosedChannels(true)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to load pending closed channels: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, chanSummary := range closedChans {
|
|
|
|
if chanSummary.ChanPoint == *c.ChanPoint {
|
|
|
|
t.Fatalf("channel %v was marked pending closed",
|
|
|
|
c.ChanPoint)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// createTestArbiter instantiates a breach arbiter with a failing retribution
|
|
|
|
// store, so that controlled failures can be tested.
|
2018-04-19 12:09:25 +03:00
|
|
|
func createTestArbiter(t *testing.T, contractBreaches chan *ContractBreachEvent,
|
2017-11-21 10:57:08 +03:00
|
|
|
db *channeldb.DB) (*breachArbiter, func(), error) {
|
|
|
|
|
|
|
|
// Create a failing retribution store, that wraps a normal one.
|
|
|
|
store := newFailingRetributionStore(func() RetributionStore {
|
|
|
|
return newRetributionStore(db)
|
|
|
|
})
|
|
|
|
|
|
|
|
aliceKeyPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(),
|
|
|
|
alicesPrivKey)
|
|
|
|
signer := &mockSigner{key: aliceKeyPriv}
|
|
|
|
|
|
|
|
// Assemble our test arbiter.
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
notifier := makeMockSpendNotifier()
|
2017-11-21 10:57:08 +03:00
|
|
|
ba := newBreachArbiter(&BreachConfig{
|
2018-04-19 12:09:25 +03:00
|
|
|
CloseLink: func(_ *wire.OutPoint, _ htlcswitch.ChannelCloseType) {},
|
|
|
|
DB: db,
|
|
|
|
Estimator: &lnwallet.StaticFeeEstimator{FeeRate: 50},
|
|
|
|
GenSweepScript: func() ([]byte, error) { return nil, nil },
|
|
|
|
ContractBreaches: contractBreaches,
|
2017-11-21 10:57:08 +03:00
|
|
|
Signer: signer,
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
Notifier: notifier,
|
2017-11-21 10:57:08 +03:00
|
|
|
PublishTransaction: func(_ *wire.MsgTx) error { return nil },
|
|
|
|
Store: store,
|
|
|
|
})
|
|
|
|
|
|
|
|
if err := ba.Start(); err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// The caller is responsible for closing the database.
|
|
|
|
cleanUp := func() {
|
|
|
|
ba.Stop()
|
|
|
|
}
|
|
|
|
|
|
|
|
return ba, cleanUp, nil
|
|
|
|
}
|
|
|
|
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
// createInitChannels creates two initialized test channels funded with 10 BTC,
|
|
|
|
// with 5 BTC allocated to each side. Within the channel, Alice is the
|
|
|
|
// initiator.
|
|
|
|
func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwallet.LightningChannel, func(), error) {
|
2017-11-21 10:57:08 +03:00
|
|
|
|
|
|
|
aliceKeyPriv, aliceKeyPub := btcec.PrivKeyFromBytes(btcec.S256(),
|
|
|
|
alicesPrivKey)
|
|
|
|
bobKeyPriv, bobKeyPub := btcec.PrivKeyFromBytes(btcec.S256(),
|
|
|
|
bobsPrivKey)
|
|
|
|
|
2018-03-26 05:16:39 +03:00
|
|
|
channelCapacity, err := btcutil.NewAmount(10)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
channelBal := channelCapacity / 2
|
|
|
|
aliceDustLimit := btcutil.Amount(200)
|
|
|
|
bobDustLimit := btcutil.Amount(1300)
|
|
|
|
csvTimeoutAlice := uint32(5)
|
|
|
|
csvTimeoutBob := uint32(4)
|
|
|
|
|
|
|
|
prevOut := &wire.OutPoint{
|
|
|
|
Hash: chainhash.Hash(testHdSeed),
|
|
|
|
Index: 0,
|
|
|
|
}
|
|
|
|
fundingTxIn := wire.NewTxIn(prevOut, nil, nil)
|
|
|
|
|
|
|
|
aliceCfg := channeldb.ChannelConfig{
|
|
|
|
ChannelConstraints: channeldb.ChannelConstraints{
|
|
|
|
DustLimit: aliceDustLimit,
|
|
|
|
MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
|
2018-02-08 06:36:34 +03:00
|
|
|
ChanReserve: 0,
|
|
|
|
MinHTLC: 0,
|
2017-11-21 10:57:08 +03:00
|
|
|
MaxAcceptedHtlcs: uint16(rand.Int31()),
|
|
|
|
},
|
2018-02-18 02:29:01 +03:00
|
|
|
CsvDelay: uint16(csvTimeoutAlice),
|
|
|
|
MultiSigKey: keychain.KeyDescriptor{
|
|
|
|
PubKey: aliceKeyPub,
|
|
|
|
},
|
|
|
|
RevocationBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: aliceKeyPub,
|
|
|
|
},
|
|
|
|
PaymentBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: aliceKeyPub,
|
|
|
|
},
|
|
|
|
DelayBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: aliceKeyPub,
|
|
|
|
},
|
|
|
|
HtlcBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: aliceKeyPub,
|
|
|
|
},
|
2017-11-21 10:57:08 +03:00
|
|
|
}
|
|
|
|
bobCfg := channeldb.ChannelConfig{
|
|
|
|
ChannelConstraints: channeldb.ChannelConstraints{
|
|
|
|
DustLimit: bobDustLimit,
|
|
|
|
MaxPendingAmount: lnwire.MilliSatoshi(rand.Int63()),
|
2018-02-08 06:36:34 +03:00
|
|
|
ChanReserve: 0,
|
|
|
|
MinHTLC: 0,
|
2017-11-21 10:57:08 +03:00
|
|
|
MaxAcceptedHtlcs: uint16(rand.Int31()),
|
|
|
|
},
|
2018-02-18 02:29:01 +03:00
|
|
|
CsvDelay: uint16(csvTimeoutBob),
|
|
|
|
MultiSigKey: keychain.KeyDescriptor{
|
|
|
|
PubKey: bobKeyPub,
|
|
|
|
},
|
|
|
|
RevocationBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: bobKeyPub,
|
|
|
|
},
|
|
|
|
PaymentBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: bobKeyPub,
|
|
|
|
},
|
|
|
|
DelayBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: bobKeyPub,
|
|
|
|
},
|
|
|
|
HtlcBasePoint: keychain.KeyDescriptor{
|
|
|
|
PubKey: bobKeyPub,
|
|
|
|
},
|
2017-11-21 10:57:08 +03:00
|
|
|
}
|
|
|
|
|
2018-02-18 02:29:01 +03:00
|
|
|
bobRoot, err := chainhash.NewHash(bobKeyPriv.Serialize())
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
bobPreimageProducer := shachain.NewRevocationProducer(*bobRoot)
|
2017-11-21 10:57:08 +03:00
|
|
|
bobFirstRevoke, err := bobPreimageProducer.AtIndex(0)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
bobCommitPoint := lnwallet.ComputeCommitmentPoint(bobFirstRevoke[:])
|
|
|
|
|
2018-02-18 02:29:01 +03:00
|
|
|
aliceRoot, err := chainhash.NewHash(aliceKeyPriv.Serialize())
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
alicePreimageProducer := shachain.NewRevocationProducer(*aliceRoot)
|
2017-11-21 10:57:08 +03:00
|
|
|
aliceFirstRevoke, err := alicePreimageProducer.AtIndex(0)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
aliceCommitPoint := lnwallet.ComputeCommitmentPoint(aliceFirstRevoke[:])
|
|
|
|
|
|
|
|
aliceCommitTx, bobCommitTx, err := lnwallet.CreateCommitmentTxns(channelBal,
|
|
|
|
channelBal, &aliceCfg, &bobCfg, aliceCommitPoint, bobCommitPoint,
|
|
|
|
*fundingTxIn)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
alicePath, err := ioutil.TempDir("", "alicedb")
|
|
|
|
dbAlice, err := channeldb.Open(alicePath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
bobPath, err := ioutil.TempDir("", "bobdb")
|
|
|
|
dbBob, err := channeldb.Open(bobPath)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
estimator := &lnwallet.StaticFeeEstimator{FeeRate: 50}
|
2018-02-13 17:15:14 +03:00
|
|
|
feePerVSize, err := estimator.EstimateFeePerVSize(1)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
2018-02-13 17:15:14 +03:00
|
|
|
feePerKw := feePerVSize.FeePerKWeight()
|
2017-11-21 10:57:08 +03:00
|
|
|
|
|
|
|
// TODO(roasbeef): need to factor in commit fee?
|
|
|
|
aliceCommit := channeldb.ChannelCommitment{
|
|
|
|
CommitHeight: 0,
|
|
|
|
LocalBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
|
|
|
RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
2018-02-13 17:15:14 +03:00
|
|
|
FeePerKw: btcutil.Amount(feePerKw),
|
2017-11-21 10:57:08 +03:00
|
|
|
CommitFee: 8688,
|
|
|
|
CommitTx: aliceCommitTx,
|
|
|
|
CommitSig: bytes.Repeat([]byte{1}, 71),
|
|
|
|
}
|
|
|
|
bobCommit := channeldb.ChannelCommitment{
|
|
|
|
CommitHeight: 0,
|
|
|
|
LocalBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
|
|
|
RemoteBalance: lnwire.NewMSatFromSatoshis(channelBal),
|
2018-02-13 17:15:14 +03:00
|
|
|
FeePerKw: btcutil.Amount(feePerKw),
|
2017-11-21 10:57:08 +03:00
|
|
|
CommitFee: 8688,
|
|
|
|
CommitTx: bobCommitTx,
|
|
|
|
CommitSig: bytes.Repeat([]byte{1}, 71),
|
|
|
|
}
|
|
|
|
|
2018-02-24 06:28:36 +03:00
|
|
|
var chanIDBytes [8]byte
|
|
|
|
if _, err := io.ReadFull(crand.Reader, chanIDBytes[:]); err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
shortChanID := lnwire.NewShortChanIDFromInt(
|
|
|
|
binary.BigEndian.Uint64(chanIDBytes[:]),
|
|
|
|
)
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
aliceChannelState := &channeldb.OpenChannel{
|
|
|
|
LocalChanCfg: aliceCfg,
|
|
|
|
RemoteChanCfg: bobCfg,
|
|
|
|
IdentityPub: aliceKeyPub,
|
|
|
|
FundingOutpoint: *prevOut,
|
2018-05-02 02:27:20 +03:00
|
|
|
ShortChannelID: shortChanID,
|
2017-11-21 10:57:08 +03:00
|
|
|
ChanType: channeldb.SingleFunder,
|
|
|
|
IsInitiator: true,
|
|
|
|
Capacity: channelCapacity,
|
|
|
|
RemoteCurrentRevocation: bobCommitPoint,
|
|
|
|
RevocationProducer: alicePreimageProducer,
|
|
|
|
RevocationStore: shachain.NewRevocationStore(),
|
|
|
|
LocalCommitment: aliceCommit,
|
|
|
|
RemoteCommitment: aliceCommit,
|
|
|
|
Db: dbAlice,
|
2018-02-24 06:28:36 +03:00
|
|
|
Packager: channeldb.NewChannelPackager(shortChanID),
|
2018-03-11 04:27:51 +03:00
|
|
|
FundingTxn: testTx,
|
2017-11-21 10:57:08 +03:00
|
|
|
}
|
|
|
|
bobChannelState := &channeldb.OpenChannel{
|
|
|
|
LocalChanCfg: bobCfg,
|
|
|
|
RemoteChanCfg: aliceCfg,
|
|
|
|
IdentityPub: bobKeyPub,
|
|
|
|
FundingOutpoint: *prevOut,
|
2018-05-02 02:27:20 +03:00
|
|
|
ShortChannelID: shortChanID,
|
2017-11-21 10:57:08 +03:00
|
|
|
ChanType: channeldb.SingleFunder,
|
|
|
|
IsInitiator: false,
|
|
|
|
Capacity: channelCapacity,
|
|
|
|
RemoteCurrentRevocation: aliceCommitPoint,
|
|
|
|
RevocationProducer: bobPreimageProducer,
|
|
|
|
RevocationStore: shachain.NewRevocationStore(),
|
|
|
|
LocalCommitment: bobCommit,
|
|
|
|
RemoteCommitment: bobCommit,
|
|
|
|
Db: dbBob,
|
2018-02-24 06:28:36 +03:00
|
|
|
Packager: channeldb.NewChannelPackager(shortChanID),
|
2017-11-21 10:57:08 +03:00
|
|
|
}
|
|
|
|
|
2018-01-17 06:57:04 +03:00
|
|
|
pCache := &mockPreimageCache{
|
|
|
|
// hash -> preimage
|
|
|
|
preimageMap: make(map[[32]byte][]byte),
|
|
|
|
}
|
|
|
|
|
2017-11-21 10:57:08 +03:00
|
|
|
aliceSigner := &mockSigner{aliceKeyPriv}
|
|
|
|
bobSigner := &mockSigner{bobKeyPriv}
|
|
|
|
|
2018-01-17 06:57:04 +03:00
|
|
|
channelAlice, err := lnwallet.NewLightningChannel(
|
2018-01-20 04:25:06 +03:00
|
|
|
aliceSigner, pCache, aliceChannelState,
|
2018-01-17 06:57:04 +03:00
|
|
|
)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
2018-01-17 06:57:04 +03:00
|
|
|
channelBob, err := lnwallet.NewLightningChannel(
|
2018-01-20 04:25:06 +03:00
|
|
|
bobSigner, pCache, bobChannelState,
|
2018-01-17 06:57:04 +03:00
|
|
|
)
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
addr := &net.TCPAddr{
|
|
|
|
IP: net.ParseIP("127.0.0.1"),
|
|
|
|
Port: 18556,
|
|
|
|
}
|
|
|
|
if err := channelAlice.State().SyncPending(addr, 101); err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
2017-11-21 10:57:08 +03:00
|
|
|
if err := channelAlice.State().FullSync(); err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
breacharbiter: properly account for second-level spends during breach remedy
In this commit, we address an un accounted for case during the breach
remedy process. If the remote node actually went directly to the second
layer during a channel breach attempt, then we wouldn’t properly be
able to sweep with out justice transaction, as some HTLC inputs may
actually be spent at that point.
In order to address this case, we’ll now catch the transaction
rejection, then check to see which input was spent, promote that to a
second level spend, and repeat as necessary. At the end of this loop,
any inputs which have been spent to the second level will have had the
prevouts and witnesses updated.
In order to perform this transition, we now also store the second level
witness script in the database. This allow us to modify the sign desc
with the proper input value, as well as witness script.
2018-01-23 04:11:02 +03:00
|
|
|
|
|
|
|
addr = &net.TCPAddr{
|
|
|
|
IP: net.ParseIP("127.0.0.1"),
|
|
|
|
Port: 18555,
|
|
|
|
}
|
|
|
|
if err := channelBob.State().SyncPending(addr, 101); err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
2017-11-21 10:57:08 +03:00
|
|
|
if err := channelBob.State().FullSync(); err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanUpFunc := func() {
|
|
|
|
dbBob.Close()
|
|
|
|
dbAlice.Close()
|
|
|
|
os.RemoveAll(bobPath)
|
|
|
|
os.RemoveAll(alicePath)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now that the channel are open, simulate the start of a session by
|
|
|
|
// having Alice and Bob extend their revocation windows to each other.
|
|
|
|
err = initRevocationWindows(channelAlice, channelBob, revocationWindow)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return channelAlice, channelBob, cleanUpFunc, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// initRevocationWindows simulates a new channel being opened within the p2p
|
|
|
|
// network by populating the initial revocation windows of the passed
|
|
|
|
// commitment state machines.
|
|
|
|
//
|
|
|
|
// TODO(conner) remove code duplication
|
|
|
|
func initRevocationWindows(chanA, chanB *lnwallet.LightningChannel, windowSize int) error {
|
|
|
|
aliceNextRevoke, err := chanA.NextRevocationKey()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := chanB.InitNextRevocation(aliceNextRevoke); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
bobNextRevoke, err := chanB.NextRevocationKey()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := chanA.InitNextRevocation(bobNextRevoke); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// createHTLC is a utility function for generating an HTLC with a given
|
|
|
|
// preimage and a given amount.
|
|
|
|
// TODO(conner) remove code duplication
|
|
|
|
func createHTLC(data int, amount lnwire.MilliSatoshi) (*lnwire.UpdateAddHTLC, [32]byte) {
|
|
|
|
preimage := bytes.Repeat([]byte{byte(data)}, 32)
|
|
|
|
paymentHash := sha256.Sum256(preimage)
|
|
|
|
|
|
|
|
var returnPreimage [32]byte
|
|
|
|
copy(returnPreimage[:], preimage)
|
|
|
|
|
|
|
|
return &lnwire.UpdateAddHTLC{
|
|
|
|
ID: uint64(data),
|
|
|
|
PaymentHash: paymentHash,
|
|
|
|
Amount: amount,
|
|
|
|
Expiry: uint32(5),
|
|
|
|
}, returnPreimage
|
|
|
|
}
|
|
|
|
|
|
|
|
// forceStateTransition executes the necessary interaction between the two
|
|
|
|
// commitment state machines to transition to a new state locking in any
|
|
|
|
// pending updates.
|
|
|
|
// TODO(conner) remove code duplication
|
|
|
|
func forceStateTransition(chanA, chanB *lnwallet.LightningChannel) error {
|
|
|
|
aliceSig, aliceHtlcSigs, err := chanA.SignNextCommitment()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err = chanB.ReceiveNewCommitment(aliceSig, aliceHtlcSigs); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-17 06:57:04 +03:00
|
|
|
bobRevocation, _, err := chanB.RevokeCurrentCommitment()
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
bobSig, bobHtlcSigs, err := chanB.SignNextCommitment()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-02-24 09:42:16 +03:00
|
|
|
if _, _, _, err := chanA.ReceiveRevocation(bobRevocation); err != nil {
|
2017-11-21 10:57:08 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := chanA.ReceiveNewCommitment(bobSig, bobHtlcSigs); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-01-17 06:57:04 +03:00
|
|
|
aliceRevocation, _, err := chanA.RevokeCurrentCommitment()
|
2017-11-21 10:57:08 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-02-24 09:42:16 +03:00
|
|
|
if _, _, _, err := chanB.ReceiveRevocation(aliceRevocation); err != nil {
|
2017-11-21 10:57:08 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// calcStaticFee calculates appropriate fees for commitment transactions. This
|
|
|
|
// function provides a simple way to allow test balance assertions to take fee
|
|
|
|
// calculations into account.
|
|
|
|
//
|
|
|
|
// TODO(bvu): Refactor when dynamic fee estimation is added.
|
|
|
|
// TODO(conner) remove code duplication
|
|
|
|
func calcStaticFee(numHTLCs int) btcutil.Amount {
|
|
|
|
const (
|
|
|
|
commitWeight = btcutil.Amount(724)
|
|
|
|
htlcWeight = 172
|
|
|
|
feePerKw = btcutil.Amount(24/4) * 1000
|
|
|
|
)
|
|
|
|
return feePerKw * (commitWeight +
|
|
|
|
btcutil.Amount(htlcWeight*numHTLCs)) / 1000
|
|
|
|
}
|