2018-01-17 06:38:35 +03:00
|
|
|
package contractcourt
|
|
|
|
|
|
|
|
import (
|
|
|
|
"crypto/rand"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"reflect"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
prand "math/rand"
|
|
|
|
|
2018-07-31 10:17:17 +03:00
|
|
|
"github.com/btcsuite/btcd/btcec"
|
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/txscript"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
2018-03-11 06:00:57 +03:00
|
|
|
"github.com/coreos/bbolt"
|
2018-01-17 06:38:35 +03:00
|
|
|
"github.com/davecgh/go-spew/spew"
|
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2019-01-16 17:47:43 +03:00
|
|
|
"github.com/lightningnetwork/lnd/input"
|
2018-01-17 06:38:35 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
testChainHash = [chainhash.HashSize]byte{
|
|
|
|
0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0x2d, 0xe7, 0x93, 0xe4,
|
|
|
|
}
|
|
|
|
|
|
|
|
testChanPoint1 = wire.OutPoint{
|
|
|
|
Hash: chainhash.Hash{
|
|
|
|
0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0x2d, 0xe7, 0x93, 0xe4,
|
|
|
|
},
|
|
|
|
Index: 1,
|
|
|
|
}
|
|
|
|
|
|
|
|
testChanPoint2 = wire.OutPoint{
|
|
|
|
Hash: chainhash.Hash{
|
|
|
|
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0x51, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x2d, 0xe7, 0x93, 0xe4,
|
|
|
|
},
|
|
|
|
Index: 2,
|
|
|
|
}
|
|
|
|
|
|
|
|
testPreimage = [32]byte{
|
|
|
|
0x52, 0xb6, 0x37, 0xd8, 0xfc, 0xd2, 0xc6, 0xda,
|
|
|
|
0x48, 0x59, 0xe6, 0x96, 0x31, 0x13, 0xa1, 0x17,
|
|
|
|
0x2d, 0xe7, 0x93, 0xe4,
|
|
|
|
}
|
|
|
|
|
|
|
|
key1 = []byte{
|
|
|
|
0x04, 0x11, 0xdb, 0x93, 0xe1, 0xdc, 0xdb, 0x8a,
|
|
|
|
0x01, 0x6b, 0x49, 0x84, 0x0f, 0x8c, 0x53, 0xbc, 0x1e,
|
|
|
|
0xb6, 0x8a, 0x38, 0x2e, 0x97, 0xb1, 0x48, 0x2e, 0xca,
|
|
|
|
0xd7, 0xb1, 0x48, 0xa6, 0x90, 0x9a, 0x5c, 0xb2, 0xe0,
|
|
|
|
0xea, 0xdd, 0xfb, 0x84, 0xcc, 0xf9, 0x74, 0x44, 0x64,
|
|
|
|
0xf8, 0x2e, 0x16, 0x0b, 0xfa, 0x9b, 0x8b, 0x64, 0xf9,
|
|
|
|
0xd4, 0xc0, 0x3f, 0x99, 0x9b, 0x86, 0x43, 0xf6, 0x56,
|
|
|
|
0xb4, 0x12, 0xa3,
|
|
|
|
}
|
|
|
|
|
2019-01-16 17:47:43 +03:00
|
|
|
testSignDesc = input.SignDescriptor{
|
2018-01-17 06:38:35 +03:00
|
|
|
SingleTweak: []byte{
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
0x02, 0x02, 0x02, 0x02, 0x02,
|
|
|
|
},
|
|
|
|
WitnessScript: []byte{
|
|
|
|
0x00, 0x14, 0xee, 0x91, 0x41, 0x7e, 0x85, 0x6c, 0xde,
|
|
|
|
0x10, 0xa2, 0x91, 0x1e, 0xdc, 0xbd, 0xbd, 0x69, 0xe2,
|
|
|
|
0xef, 0xb5, 0x71, 0x48,
|
|
|
|
},
|
|
|
|
Output: &wire.TxOut{
|
|
|
|
Value: 5000000000,
|
|
|
|
PkScript: []byte{
|
|
|
|
0x41, // OP_DATA_65
|
|
|
|
0x04, 0xd6, 0x4b, 0xdf, 0xd0, 0x9e, 0xb1, 0xc5,
|
|
|
|
0xfe, 0x29, 0x5a, 0xbd, 0xeb, 0x1d, 0xca, 0x42,
|
|
|
|
0x81, 0xbe, 0x98, 0x8e, 0x2d, 0xa0, 0xb6, 0xc1,
|
|
|
|
0xc6, 0xa5, 0x9d, 0xc2, 0x26, 0xc2, 0x86, 0x24,
|
|
|
|
0xe1, 0x81, 0x75, 0xe8, 0x51, 0xc9, 0x6b, 0x97,
|
|
|
|
0x3d, 0x81, 0xb0, 0x1c, 0xc3, 0x1f, 0x04, 0x78,
|
|
|
|
0x34, 0xbc, 0x06, 0xd6, 0xd6, 0xed, 0xf6, 0x20,
|
|
|
|
0xd1, 0x84, 0x24, 0x1a, 0x6a, 0xed, 0x8b, 0x63,
|
|
|
|
0xa6, // 65-byte signature
|
|
|
|
0xac, // OP_CHECKSIG
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HashType: txscript.SigHashAll,
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2018-11-30 07:04:21 +03:00
|
|
|
func makeTestDB() (*bbolt.DB, func(), error) {
|
2018-01-17 06:38:35 +03:00
|
|
|
// First, create a temporary directory to be used for the duration of
|
|
|
|
// this test.
|
|
|
|
tempDirName, err := ioutil.TempDir("", "arblog")
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
2018-11-30 07:04:21 +03:00
|
|
|
db, err := bbolt.Open(tempDirName+"/test.db", 0600, nil)
|
2018-01-17 06:38:35 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanUp := func() {
|
|
|
|
db.Close()
|
|
|
|
os.RemoveAll(tempDirName)
|
|
|
|
}
|
|
|
|
|
|
|
|
return db, cleanUp, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func newTestBoltArbLog(chainhash chainhash.Hash,
|
|
|
|
op wire.OutPoint) (ArbitratorLog, func(), error) {
|
|
|
|
|
|
|
|
testDB, cleanUp, err := makeTestDB()
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
testArbCfg := ChannelArbitratorConfig{}
|
|
|
|
testLog, err := newBoltArbitratorLog(testDB, testArbCfg, chainhash, op)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return testLog, cleanUp, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func randOutPoint() wire.OutPoint {
|
|
|
|
var op wire.OutPoint
|
|
|
|
rand.Read(op.Hash[:])
|
|
|
|
op.Index = prand.Uint32()
|
|
|
|
|
|
|
|
return op
|
|
|
|
}
|
|
|
|
|
|
|
|
func assertResolversEqual(t *testing.T, originalResolver ContractResolver,
|
|
|
|
diskResolver ContractResolver) {
|
|
|
|
|
|
|
|
assertTimeoutResEqual := func(ogRes, diskRes *htlcTimeoutResolver) {
|
|
|
|
if !reflect.DeepEqual(ogRes.htlcResolution, diskRes.htlcResolution) {
|
|
|
|
t.Fatalf("resolution mismatch: expected %#v, got %v#",
|
|
|
|
ogRes.htlcResolution, diskRes.htlcResolution)
|
|
|
|
}
|
|
|
|
if ogRes.outputIncubating != diskRes.outputIncubating {
|
|
|
|
t.Fatalf("expected %v, got %v",
|
|
|
|
ogRes.outputIncubating, diskRes.outputIncubating)
|
|
|
|
}
|
|
|
|
if ogRes.resolved != diskRes.resolved {
|
|
|
|
t.Fatalf("expected %v, got %v", ogRes.resolved,
|
|
|
|
diskRes.resolved)
|
|
|
|
}
|
|
|
|
if ogRes.broadcastHeight != diskRes.broadcastHeight {
|
|
|
|
t.Fatalf("expected %v, got %v",
|
|
|
|
ogRes.broadcastHeight, diskRes.broadcastHeight)
|
|
|
|
}
|
|
|
|
if ogRes.htlcIndex != diskRes.htlcIndex {
|
|
|
|
t.Fatalf("expected %v, got %v", ogRes.htlcIndex,
|
|
|
|
diskRes.htlcIndex)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
assertSuccessResEqual := func(ogRes, diskRes *htlcSuccessResolver) {
|
|
|
|
if !reflect.DeepEqual(ogRes.htlcResolution, diskRes.htlcResolution) {
|
|
|
|
t.Fatalf("resolution mismatch: expected %#v, got %v#",
|
|
|
|
ogRes.htlcResolution, diskRes.htlcResolution)
|
|
|
|
}
|
|
|
|
if ogRes.outputIncubating != diskRes.outputIncubating {
|
|
|
|
t.Fatalf("expected %v, got %v",
|
|
|
|
ogRes.outputIncubating, diskRes.outputIncubating)
|
|
|
|
}
|
|
|
|
if ogRes.resolved != diskRes.resolved {
|
|
|
|
t.Fatalf("expected %v, got %v", ogRes.resolved,
|
|
|
|
diskRes.resolved)
|
|
|
|
}
|
|
|
|
if ogRes.broadcastHeight != diskRes.broadcastHeight {
|
|
|
|
t.Fatalf("expected %v, got %v",
|
|
|
|
ogRes.broadcastHeight, diskRes.broadcastHeight)
|
|
|
|
}
|
|
|
|
if ogRes.payHash != diskRes.payHash {
|
|
|
|
t.Fatalf("expected %v, got %v", ogRes.payHash,
|
|
|
|
diskRes.payHash)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch ogRes := originalResolver.(type) {
|
|
|
|
case *htlcTimeoutResolver:
|
|
|
|
diskRes := diskResolver.(*htlcTimeoutResolver)
|
|
|
|
assertTimeoutResEqual(ogRes, diskRes)
|
|
|
|
|
|
|
|
case *htlcSuccessResolver:
|
|
|
|
diskRes := diskResolver.(*htlcSuccessResolver)
|
|
|
|
assertSuccessResEqual(ogRes, diskRes)
|
|
|
|
|
|
|
|
case *htlcOutgoingContestResolver:
|
|
|
|
diskRes := diskResolver.(*htlcOutgoingContestResolver)
|
|
|
|
assertTimeoutResEqual(
|
|
|
|
&ogRes.htlcTimeoutResolver, &diskRes.htlcTimeoutResolver,
|
|
|
|
)
|
|
|
|
|
|
|
|
case *htlcIncomingContestResolver:
|
|
|
|
diskRes := diskResolver.(*htlcIncomingContestResolver)
|
|
|
|
assertSuccessResEqual(
|
|
|
|
&ogRes.htlcSuccessResolver, &diskRes.htlcSuccessResolver,
|
|
|
|
)
|
|
|
|
|
|
|
|
if ogRes.htlcExpiry != diskRes.htlcExpiry {
|
|
|
|
t.Fatalf("expected %v, got %v", ogRes.htlcExpiry,
|
|
|
|
diskRes.htlcExpiry)
|
|
|
|
}
|
|
|
|
|
|
|
|
case *commitSweepResolver:
|
|
|
|
diskRes := diskResolver.(*commitSweepResolver)
|
|
|
|
if !reflect.DeepEqual(ogRes.commitResolution, diskRes.commitResolution) {
|
|
|
|
t.Fatalf("resolution mismatch: expected %v, got %v",
|
|
|
|
ogRes.commitResolution, diskRes.commitResolution)
|
|
|
|
}
|
|
|
|
if ogRes.resolved != diskRes.resolved {
|
|
|
|
t.Fatalf("expected %v, got %v", ogRes.resolved,
|
|
|
|
diskRes.resolved)
|
|
|
|
}
|
|
|
|
if ogRes.broadcastHeight != diskRes.broadcastHeight {
|
|
|
|
t.Fatalf("expected %v, got %v",
|
|
|
|
ogRes.broadcastHeight, diskRes.broadcastHeight)
|
|
|
|
}
|
|
|
|
if ogRes.chanPoint != diskRes.chanPoint {
|
|
|
|
t.Fatalf("expected %v, got %v", ogRes.chanPoint,
|
|
|
|
diskRes.chanPoint)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestContractInsertionRetrieval tests that were able to insert a set of
|
|
|
|
// unresolved contracts into the log, and retrieve the same set properly.
|
|
|
|
func TestContractInsertionRetrieval(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// First, we'll create a test instance of the ArbitratorLog
|
|
|
|
// implementation backed by boltdb.
|
|
|
|
testLog, cleanUp, err := newTestBoltArbLog(
|
|
|
|
testChainHash, testChanPoint1,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test log: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// The log created, we'll create a series of resolvers, each properly
|
|
|
|
// implementing the ContractResolver interface.
|
|
|
|
timeoutResolver := htlcTimeoutResolver{
|
|
|
|
htlcResolution: lnwallet.OutgoingHtlcResolution{
|
|
|
|
Expiry: 99,
|
|
|
|
SignedTimeoutTx: nil,
|
|
|
|
CsvDelay: 99,
|
|
|
|
ClaimOutpoint: randOutPoint(),
|
|
|
|
SweepSignDesc: testSignDesc,
|
|
|
|
},
|
|
|
|
outputIncubating: true,
|
|
|
|
resolved: true,
|
|
|
|
broadcastHeight: 102,
|
|
|
|
htlcIndex: 12,
|
|
|
|
}
|
|
|
|
successResolver := htlcSuccessResolver{
|
|
|
|
htlcResolution: lnwallet.IncomingHtlcResolution{
|
|
|
|
Preimage: testPreimage,
|
|
|
|
SignedSuccessTx: nil,
|
|
|
|
CsvDelay: 900,
|
|
|
|
ClaimOutpoint: randOutPoint(),
|
|
|
|
SweepSignDesc: testSignDesc,
|
|
|
|
},
|
|
|
|
outputIncubating: true,
|
|
|
|
resolved: true,
|
|
|
|
broadcastHeight: 109,
|
|
|
|
payHash: testPreimage,
|
|
|
|
sweepTx: nil,
|
|
|
|
}
|
|
|
|
resolvers := []ContractResolver{
|
|
|
|
&timeoutResolver,
|
|
|
|
&successResolver,
|
|
|
|
&commitSweepResolver{
|
|
|
|
commitResolution: lnwallet.CommitOutputResolution{
|
|
|
|
SelfOutPoint: testChanPoint2,
|
|
|
|
SelfOutputSignDesc: testSignDesc,
|
|
|
|
MaturityDelay: 99,
|
|
|
|
},
|
|
|
|
resolved: false,
|
|
|
|
broadcastHeight: 109,
|
|
|
|
chanPoint: testChanPoint1,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// All resolvers require a unique ResolverKey() output. To achieve this
|
|
|
|
// for the composite resolvers, we'll mutate the underlying resolver
|
|
|
|
// with a new outpoint.
|
|
|
|
contestTimeout := timeoutResolver
|
|
|
|
contestTimeout.htlcResolution.ClaimOutpoint = randOutPoint()
|
|
|
|
resolvers = append(resolvers, &htlcOutgoingContestResolver{
|
|
|
|
htlcTimeoutResolver: contestTimeout,
|
|
|
|
})
|
|
|
|
contestSuccess := successResolver
|
|
|
|
contestSuccess.htlcResolution.ClaimOutpoint = randOutPoint()
|
|
|
|
resolvers = append(resolvers, &htlcIncomingContestResolver{
|
|
|
|
htlcExpiry: 100,
|
|
|
|
htlcSuccessResolver: contestSuccess,
|
|
|
|
})
|
|
|
|
|
|
|
|
// For quick lookup during the test, we'll create this map which allow
|
|
|
|
// us to lookup a resolver according to its unique resolver key.
|
|
|
|
resolverMap := make(map[string]ContractResolver)
|
|
|
|
resolverMap[string(timeoutResolver.ResolverKey())] = resolvers[0]
|
|
|
|
resolverMap[string(successResolver.ResolverKey())] = resolvers[1]
|
|
|
|
resolverMap[string(resolvers[2].ResolverKey())] = resolvers[2]
|
|
|
|
resolverMap[string(resolvers[3].ResolverKey())] = resolvers[3]
|
|
|
|
resolverMap[string(resolvers[4].ResolverKey())] = resolvers[4]
|
|
|
|
|
|
|
|
// Now, we'll insert the resolver into the log.
|
|
|
|
if err := testLog.InsertUnresolvedContracts(resolvers...); err != nil {
|
|
|
|
t.Fatalf("unable to insert resolvers: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the resolvers inserted, we'll now attempt to retrieve them from
|
|
|
|
// the database, so we can compare them to the versions we created
|
|
|
|
// above.
|
|
|
|
diskResolvers, err := testLog.FetchUnresolvedContracts()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to retrieve resolvers: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(diskResolvers) != len(resolvers) {
|
|
|
|
t.Fatalf("expected %v got resolvers, instead got %v: %#v",
|
|
|
|
len(resolvers), len(diskResolvers),
|
|
|
|
diskResolvers)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we'll run through each of the resolvers, and ensure that it maps
|
|
|
|
// to a resolver perfectly that we inserted previously.
|
|
|
|
for _, diskResolver := range diskResolvers {
|
|
|
|
resKey := string(diskResolver.ResolverKey())
|
|
|
|
originalResolver, ok := resolverMap[resKey]
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("unable to find resolver match for %T: %v",
|
|
|
|
diskResolver, resKey)
|
|
|
|
}
|
|
|
|
|
|
|
|
assertResolversEqual(t, originalResolver, diskResolver)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll now delete the state, then attempt to retrieve the set of
|
|
|
|
// resolvers, no resolvers should be found.
|
|
|
|
if err := testLog.WipeHistory(); err != nil {
|
|
|
|
t.Fatalf("unable to wipe log: %v", err)
|
|
|
|
}
|
|
|
|
diskResolvers, err = testLog.FetchUnresolvedContracts()
|
|
|
|
if len(diskResolvers) != 0 {
|
|
|
|
t.Fatalf("no resolvers should be found, instead %v were",
|
|
|
|
len(diskResolvers))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestContractResolution tests that once we mark a contract as resolved, it's
|
|
|
|
// properly removed from the database.
|
|
|
|
func TestContractResolution(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// First, we'll create a test instance of the ArbitratorLog
|
|
|
|
// implementation backed by boltdb.
|
|
|
|
testLog, cleanUp, err := newTestBoltArbLog(
|
|
|
|
testChainHash, testChanPoint1,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test log: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// We'll now create a timeout resolver that we'll be using for the
|
|
|
|
// duration of this test.
|
|
|
|
timeoutResolver := &htlcTimeoutResolver{
|
|
|
|
htlcResolution: lnwallet.OutgoingHtlcResolution{
|
|
|
|
Expiry: 991,
|
|
|
|
SignedTimeoutTx: nil,
|
|
|
|
CsvDelay: 992,
|
|
|
|
ClaimOutpoint: randOutPoint(),
|
|
|
|
SweepSignDesc: testSignDesc,
|
|
|
|
},
|
|
|
|
outputIncubating: true,
|
|
|
|
resolved: true,
|
|
|
|
broadcastHeight: 192,
|
|
|
|
htlcIndex: 9912,
|
|
|
|
}
|
|
|
|
|
|
|
|
// First, we'll insert the resolver into the database and ensure that
|
|
|
|
// we get the same resolver out the other side.
|
|
|
|
err = testLog.InsertUnresolvedContracts(timeoutResolver)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to insert contract into db: %v", err)
|
|
|
|
}
|
|
|
|
dbContracts, err := testLog.FetchUnresolvedContracts()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch contracts from db: %v", err)
|
|
|
|
}
|
|
|
|
assertResolversEqual(t, timeoutResolver, dbContracts[0])
|
|
|
|
|
|
|
|
// Now, we'll mark the contract as resolved within the database.
|
|
|
|
if err := testLog.ResolveContract(timeoutResolver); err != nil {
|
|
|
|
t.Fatalf("unable to resolve contract: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, no contracts should exist within the log.
|
|
|
|
dbContracts, err = testLog.FetchUnresolvedContracts()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch contracts from db: %v", err)
|
|
|
|
}
|
|
|
|
if len(dbContracts) != 0 {
|
|
|
|
t.Fatalf("no contract should be from in the db, instead %v "+
|
|
|
|
"were", len(dbContracts))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestContractSwapping ensures that callers are able to atomically swap to
|
|
|
|
// distinct contracts for one another.
|
|
|
|
func TestContractSwapping(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// First, we'll create a test instance of the ArbitratorLog
|
|
|
|
// implementation backed by boltdb.
|
|
|
|
testLog, cleanUp, err := newTestBoltArbLog(
|
|
|
|
testChainHash, testChanPoint1,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test log: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// We'll create two resolvers, a regular timeout resolver, and the
|
|
|
|
// contest resolver that eventually turns into the timeout resolver.
|
|
|
|
timeoutResolver := htlcTimeoutResolver{
|
|
|
|
htlcResolution: lnwallet.OutgoingHtlcResolution{
|
|
|
|
Expiry: 99,
|
|
|
|
SignedTimeoutTx: nil,
|
|
|
|
CsvDelay: 99,
|
|
|
|
ClaimOutpoint: randOutPoint(),
|
|
|
|
SweepSignDesc: testSignDesc,
|
|
|
|
},
|
|
|
|
outputIncubating: true,
|
|
|
|
resolved: true,
|
|
|
|
broadcastHeight: 102,
|
|
|
|
htlcIndex: 12,
|
|
|
|
}
|
|
|
|
contestResolver := &htlcOutgoingContestResolver{
|
|
|
|
htlcTimeoutResolver: timeoutResolver,
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll first insert the contest resolver into the log.
|
|
|
|
err = testLog.InsertUnresolvedContracts(contestResolver)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to insert contract into db: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// With the resolver inserted, we'll now attempt to atomically swap it
|
|
|
|
// for its underlying timeout resolver.
|
|
|
|
err = testLog.SwapContract(contestResolver, &timeoutResolver)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to swap contracts: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, there should now only be a single contract in the
|
|
|
|
// database.
|
|
|
|
dbContracts, err := testLog.FetchUnresolvedContracts()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to fetch contracts from db: %v", err)
|
|
|
|
}
|
|
|
|
if len(dbContracts) != 1 {
|
|
|
|
t.Fatalf("one contract should be from in the db, instead %v "+
|
|
|
|
"were", len(dbContracts))
|
|
|
|
}
|
|
|
|
|
|
|
|
// That single contract should be the underlying timeout resolver.
|
|
|
|
assertResolversEqual(t, &timeoutResolver, dbContracts[0])
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestContractResolutionsStorage tests that we're able to properly store and
|
|
|
|
// retrieve contract resolutions written to disk.
|
|
|
|
func TestContractResolutionsStorage(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// First, we'll create a test instance of the ArbitratorLog
|
|
|
|
// implementation backed by boltdb.
|
|
|
|
testLog, cleanUp, err := newTestBoltArbLog(
|
|
|
|
testChainHash, testChanPoint1,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test log: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// With the test log created, we'll now craft a contact resolution that
|
|
|
|
// will be using for the duration of this test.
|
|
|
|
res := ContractResolutions{
|
|
|
|
CommitHash: testChainHash,
|
|
|
|
CommitResolution: &lnwallet.CommitOutputResolution{
|
|
|
|
SelfOutPoint: testChanPoint2,
|
|
|
|
SelfOutputSignDesc: testSignDesc,
|
|
|
|
MaturityDelay: 101,
|
|
|
|
},
|
|
|
|
HtlcResolutions: lnwallet.HtlcResolutions{
|
|
|
|
IncomingHTLCs: []lnwallet.IncomingHtlcResolution{
|
|
|
|
{
|
|
|
|
Preimage: testPreimage,
|
|
|
|
SignedSuccessTx: nil,
|
|
|
|
CsvDelay: 900,
|
|
|
|
ClaimOutpoint: randOutPoint(),
|
|
|
|
SweepSignDesc: testSignDesc,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
OutgoingHTLCs: []lnwallet.OutgoingHtlcResolution{
|
|
|
|
{
|
|
|
|
Expiry: 103,
|
|
|
|
SignedTimeoutTx: nil,
|
|
|
|
CsvDelay: 923923,
|
|
|
|
ClaimOutpoint: randOutPoint(),
|
|
|
|
SweepSignDesc: testSignDesc,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:53:56 +03:00
|
|
|
// First make sure that fetching unlogged contract resolutions will
|
|
|
|
// fail.
|
|
|
|
_, err = testLog.FetchContractResolutions()
|
|
|
|
if err == nil {
|
|
|
|
t.Fatalf("expected reading unlogged resolution from db to fail")
|
|
|
|
}
|
|
|
|
|
2018-01-17 06:38:35 +03:00
|
|
|
// Insert the resolution into the database, then immediately retrieve
|
|
|
|
// them so we can compare equality against the original version.
|
|
|
|
if err := testLog.LogContractResolutions(&res); err != nil {
|
|
|
|
t.Fatalf("unable to insert resolutions into db: %v", err)
|
|
|
|
}
|
|
|
|
diskRes, err := testLog.FetchContractResolutions()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to read resolution from db: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !reflect.DeepEqual(&res, diskRes) {
|
|
|
|
t.Fatalf("resolution mismatch: expected %#v\n, got %#v",
|
|
|
|
&res, diskRes)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll now delete the state, then attempt to retrieve the set of
|
|
|
|
// resolvers, no resolutions should be found.
|
|
|
|
if err := testLog.WipeHistory(); err != nil {
|
|
|
|
t.Fatalf("unable to wipe log: %v", err)
|
|
|
|
}
|
|
|
|
_, err = testLog.FetchContractResolutions()
|
|
|
|
if err != errScopeBucketNoExist {
|
|
|
|
t.Fatalf("unexpected error: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestChainActionStorage tests that were able to properly store a set of chain
|
|
|
|
// actions, and then retrieve the same set of chain actions from disk.
|
|
|
|
func TestChainActionStorage(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// First, we'll create a test instance of the ArbitratorLog
|
|
|
|
// implementation backed by boltdb.
|
|
|
|
testLog, cleanUp, err := newTestBoltArbLog(
|
|
|
|
testChainHash, testChanPoint2,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test log: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
chainActions := ChainActionMap{
|
|
|
|
NoAction: []channeldb.HTLC{
|
|
|
|
{
|
|
|
|
RHash: testPreimage,
|
|
|
|
Amt: lnwire.MilliSatoshi(prand.Uint64()),
|
|
|
|
RefundTimeout: prand.Uint32(),
|
|
|
|
OutputIndex: int32(prand.Uint32()),
|
|
|
|
Incoming: true,
|
|
|
|
HtlcIndex: prand.Uint64(),
|
|
|
|
LogIndex: prand.Uint64(),
|
|
|
|
OnionBlob: make([]byte, 0),
|
|
|
|
Signature: make([]byte, 0),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HtlcTimeoutAction: []channeldb.HTLC{
|
|
|
|
{
|
|
|
|
RHash: testPreimage,
|
|
|
|
Amt: lnwire.MilliSatoshi(prand.Uint64()),
|
|
|
|
RefundTimeout: prand.Uint32(),
|
|
|
|
OutputIndex: int32(prand.Uint32()),
|
|
|
|
Incoming: true,
|
|
|
|
HtlcIndex: prand.Uint64(),
|
|
|
|
LogIndex: prand.Uint64(),
|
|
|
|
OnionBlob: make([]byte, 0),
|
|
|
|
Signature: make([]byte, 0),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HtlcClaimAction: []channeldb.HTLC{
|
|
|
|
{
|
|
|
|
RHash: testPreimage,
|
|
|
|
Amt: lnwire.MilliSatoshi(prand.Uint64()),
|
|
|
|
RefundTimeout: prand.Uint32(),
|
|
|
|
OutputIndex: int32(prand.Uint32()),
|
|
|
|
Incoming: true,
|
|
|
|
HtlcIndex: prand.Uint64(),
|
|
|
|
LogIndex: prand.Uint64(),
|
|
|
|
OnionBlob: make([]byte, 0),
|
|
|
|
Signature: make([]byte, 0),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HtlcFailNowAction: []channeldb.HTLC{
|
|
|
|
{
|
|
|
|
RHash: testPreimage,
|
|
|
|
Amt: lnwire.MilliSatoshi(prand.Uint64()),
|
|
|
|
RefundTimeout: prand.Uint32(),
|
|
|
|
OutputIndex: int32(prand.Uint32()),
|
|
|
|
Incoming: true,
|
|
|
|
HtlcIndex: prand.Uint64(),
|
|
|
|
LogIndex: prand.Uint64(),
|
|
|
|
OnionBlob: make([]byte, 0),
|
|
|
|
Signature: make([]byte, 0),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HtlcOutgoingWatchAction: []channeldb.HTLC{
|
|
|
|
{
|
|
|
|
RHash: testPreimage,
|
|
|
|
Amt: lnwire.MilliSatoshi(prand.Uint64()),
|
|
|
|
RefundTimeout: prand.Uint32(),
|
|
|
|
OutputIndex: int32(prand.Uint32()),
|
|
|
|
Incoming: true,
|
|
|
|
HtlcIndex: prand.Uint64(),
|
|
|
|
LogIndex: prand.Uint64(),
|
|
|
|
OnionBlob: make([]byte, 0),
|
|
|
|
Signature: make([]byte, 0),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
HtlcIncomingWatchAction: []channeldb.HTLC{
|
|
|
|
{
|
|
|
|
RHash: testPreimage,
|
|
|
|
Amt: lnwire.MilliSatoshi(prand.Uint64()),
|
|
|
|
RefundTimeout: prand.Uint32(),
|
|
|
|
OutputIndex: int32(prand.Uint32()),
|
|
|
|
Incoming: true,
|
|
|
|
HtlcIndex: prand.Uint64(),
|
|
|
|
LogIndex: prand.Uint64(),
|
|
|
|
OnionBlob: make([]byte, 0),
|
|
|
|
Signature: make([]byte, 0),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// With our set of test chain actions constructed, we'll now insert
|
|
|
|
// them into the database, retrieve them, then assert equality with the
|
|
|
|
// set of chain actions create above.
|
|
|
|
if err := testLog.LogChainActions(chainActions); err != nil {
|
|
|
|
t.Fatalf("unable to write chain actions: %v", err)
|
|
|
|
}
|
|
|
|
diskActions, err := testLog.FetchChainActions()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to read chain actions: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for k, contracts := range chainActions {
|
|
|
|
diskContracts := diskActions[k]
|
|
|
|
if !reflect.DeepEqual(contracts, diskContracts) {
|
|
|
|
t.Fatalf("chain action mismatch: expected %v, got %v",
|
|
|
|
spew.Sdump(contracts), spew.Sdump(diskContracts))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll now delete the state, then attempt to retrieve the set of
|
|
|
|
// chain actions, no resolutions should be found.
|
|
|
|
if err := testLog.WipeHistory(); err != nil {
|
|
|
|
t.Fatalf("unable to wipe log: %v", err)
|
|
|
|
}
|
|
|
|
actions, err := testLog.FetchChainActions()
|
|
|
|
if len(actions) != 0 {
|
|
|
|
t.Fatalf("expected no chain actions, instead found: %v",
|
|
|
|
len(actions))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestStateMutation tests that we're able to properly mutate the state of the
|
|
|
|
// log, then retrieve that same mutated state from disk.
|
|
|
|
func TestStateMutation(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
testLog, cleanUp, err := newTestBoltArbLog(
|
|
|
|
testChainHash, testChanPoint1,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test log: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp()
|
|
|
|
|
|
|
|
// The default state of an arbitrator should be StateDefault.
|
|
|
|
arbState, err := testLog.CurrentState()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to read arb state: %v", err)
|
|
|
|
}
|
|
|
|
if arbState != StateDefault {
|
|
|
|
t.Fatalf("state mismatch: expected %v, got %v", StateDefault,
|
|
|
|
arbState)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We should now be able to mutate the state to an arbitrary one of our
|
|
|
|
// choosing, then read that same state back from disk.
|
|
|
|
if err := testLog.CommitState(StateFullyResolved); err != nil {
|
|
|
|
t.Fatalf("unable to write state: %v", err)
|
|
|
|
}
|
|
|
|
arbState, err = testLog.CurrentState()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to read arb state: %v", err)
|
|
|
|
}
|
|
|
|
if arbState != StateFullyResolved {
|
|
|
|
t.Fatalf("state mismatch: expected %v, got %v", StateFullyResolved,
|
|
|
|
arbState)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll wipe our state and ensure that if we try to query for
|
|
|
|
// the current state, we get the proper error.
|
|
|
|
err = testLog.WipeHistory()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to wipe history: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we try to query for the state again, we should get the default
|
|
|
|
// state again.
|
|
|
|
arbState, err = testLog.CurrentState()
|
|
|
|
if arbState != StateDefault {
|
|
|
|
t.Fatalf("state mismatch: expected %v, got %v", StateDefault,
|
|
|
|
arbState)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestScopeIsolation tests the two distinct ArbitratorLog instances with two
|
|
|
|
// distinct scopes, don't over write the state of one another.
|
|
|
|
func TestScopeIsolation(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// We'll create two distinct test logs. Each log will have a unique
|
|
|
|
// scope key, and therefore should be isolated from the other on disk.
|
|
|
|
testLog1, cleanUp1, err := newTestBoltArbLog(
|
|
|
|
testChainHash, testChanPoint1,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test log: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp1()
|
|
|
|
|
|
|
|
testLog2, cleanUp2, err := newTestBoltArbLog(
|
|
|
|
testChainHash, testChanPoint2,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create test log: %v", err)
|
|
|
|
}
|
|
|
|
defer cleanUp2()
|
|
|
|
|
|
|
|
// We'll now update the current state of both the logs to a unique
|
|
|
|
// state.
|
|
|
|
if err := testLog1.CommitState(StateWaitingFullResolution); err != nil {
|
|
|
|
t.Fatalf("unable to write state: %v", err)
|
|
|
|
}
|
|
|
|
if err := testLog2.CommitState(StateContractClosed); err != nil {
|
|
|
|
t.Fatalf("unable to write state: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Querying each log, the states should be the prior one we set, and be
|
|
|
|
// disjoint.
|
|
|
|
log1State, err := testLog1.CurrentState()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to read arb state: %v", err)
|
|
|
|
}
|
|
|
|
log2State, err := testLog2.CurrentState()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to read arb state: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if log1State == log2State {
|
|
|
|
t.Fatalf("log states are the same: %v", log1State)
|
|
|
|
}
|
|
|
|
|
|
|
|
if log1State != StateWaitingFullResolution {
|
|
|
|
t.Fatalf("state mismatch: expected %v, got %v",
|
|
|
|
StateWaitingFullResolution, log1State)
|
|
|
|
}
|
|
|
|
if log2State != StateContractClosed {
|
|
|
|
t.Fatalf("state mismatch: expected %v, got %v",
|
|
|
|
StateContractClosed, log2State)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
2018-02-20 06:13:03 +03:00
|
|
|
testSignDesc.KeyDesc.PubKey, _ = btcec.ParsePubKey(key1, btcec.S256())
|
2018-01-17 06:38:35 +03:00
|
|
|
|
|
|
|
prand.Seed(time.Now().Unix())
|
|
|
|
}
|