2018-01-17 06:37:09 +03:00
|
|
|
package contractcourt
|
2018-03-19 16:11:13 +03:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2018-09-10 13:12:54 +03:00
|
|
|
"sync"
|
2018-03-19 16:11:13 +03:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2018-07-18 05:42:43 +03:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
2018-03-19 16:11:13 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2018-08-21 13:21:15 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2019-01-16 17:47:43 +03:00
|
|
|
"github.com/lightningnetwork/lnd/input"
|
2019-01-15 13:31:22 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lntypes"
|
2018-03-19 16:11:13 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
|
|
|
)
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
type mockArbitratorLog struct {
|
2018-08-21 13:21:16 +03:00
|
|
|
state ArbitratorState
|
|
|
|
newStates chan ArbitratorState
|
|
|
|
failLog bool
|
|
|
|
failFetch error
|
|
|
|
failCommit bool
|
|
|
|
failCommitState ArbitratorState
|
2018-09-10 13:12:54 +03:00
|
|
|
resolutions *ContractResolutions
|
|
|
|
chainActions ChainActionMap
|
|
|
|
resolvers map[ContractResolver]struct{}
|
|
|
|
|
|
|
|
sync.Mutex
|
2018-08-21 13:21:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// A compile time check to ensure mockArbitratorLog meets the ArbitratorLog
|
|
|
|
// interface.
|
|
|
|
var _ ArbitratorLog = (*mockArbitratorLog)(nil)
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) CurrentState() (ArbitratorState, error) {
|
|
|
|
return b.state, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) CommitState(s ArbitratorState) error {
|
2018-08-21 13:21:16 +03:00
|
|
|
if b.failCommit && s == b.failCommitState {
|
|
|
|
return fmt.Errorf("intentional commit error at state %v",
|
|
|
|
b.failCommitState)
|
|
|
|
}
|
2018-08-21 13:21:14 +03:00
|
|
|
b.state = s
|
|
|
|
b.newStates <- s
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
func (b *mockArbitratorLog) FetchUnresolvedContracts() ([]ContractResolver,
|
|
|
|
error) {
|
|
|
|
|
|
|
|
b.Lock()
|
|
|
|
v := make([]ContractResolver, len(b.resolvers))
|
|
|
|
idx := 0
|
|
|
|
for resolver := range b.resolvers {
|
|
|
|
v[idx] = resolver
|
|
|
|
idx++
|
|
|
|
}
|
|
|
|
b.Unlock()
|
|
|
|
|
|
|
|
return v, nil
|
2018-08-21 13:21:14 +03:00
|
|
|
}
|
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
func (b *mockArbitratorLog) InsertUnresolvedContracts(
|
|
|
|
resolvers ...ContractResolver) error {
|
|
|
|
|
|
|
|
b.Lock()
|
|
|
|
for _, resolver := range resolvers {
|
|
|
|
b.resolvers[resolver] = struct{}{}
|
|
|
|
}
|
|
|
|
b.Unlock()
|
2018-08-21 13:21:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
func (b *mockArbitratorLog) SwapContract(oldContract,
|
|
|
|
newContract ContractResolver) error {
|
|
|
|
|
|
|
|
b.Lock()
|
|
|
|
delete(b.resolvers, oldContract)
|
|
|
|
b.resolvers[newContract] = struct{}{}
|
|
|
|
b.Unlock()
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) ResolveContract(res ContractResolver) error {
|
2018-09-10 13:12:54 +03:00
|
|
|
b.Lock()
|
|
|
|
delete(b.resolvers, res)
|
|
|
|
b.Unlock()
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) LogContractResolutions(c *ContractResolutions) error {
|
2018-08-21 13:21:15 +03:00
|
|
|
if b.failLog {
|
|
|
|
return fmt.Errorf("intentional log failure")
|
|
|
|
}
|
2018-09-10 13:12:54 +03:00
|
|
|
b.resolutions = c
|
2018-08-21 13:21:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) FetchContractResolutions() (*ContractResolutions, error) {
|
2018-08-21 13:21:15 +03:00
|
|
|
if b.failFetch != nil {
|
|
|
|
return nil, b.failFetch
|
|
|
|
}
|
2018-08-21 13:21:14 +03:00
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
return b.resolutions, nil
|
2018-08-21 13:21:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) LogChainActions(actions ChainActionMap) error {
|
2018-09-10 13:12:54 +03:00
|
|
|
b.chainActions = actions
|
2018-08-21 13:21:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) FetchChainActions() (ChainActionMap, error) {
|
2018-09-10 13:12:54 +03:00
|
|
|
actionsMap := b.chainActions
|
2018-08-21 13:21:14 +03:00
|
|
|
return actionsMap, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) WipeHistory() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
type mockChainIO struct{}
|
|
|
|
|
|
|
|
func (*mockChainIO) GetBestBlock() (*chainhash.Hash, int32, error) {
|
|
|
|
return nil, 0, nil
|
|
|
|
}
|
|
|
|
|
2018-07-18 05:42:43 +03:00
|
|
|
func (*mockChainIO) GetUtxo(op *wire.OutPoint, _ []byte,
|
2018-03-19 16:11:13 +03:00
|
|
|
heightHint uint32) (*wire.TxOut, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (*mockChainIO) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (*mockChainIO) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
func createTestChannelArbitrator(log ArbitratorLog) (*ChannelArbitrator,
|
|
|
|
chan struct{}, error) {
|
2018-03-19 16:11:13 +03:00
|
|
|
blockEpoch := &chainntnfs.BlockEpochEvent{
|
|
|
|
Cancel: func() {},
|
|
|
|
}
|
|
|
|
|
|
|
|
chanPoint := wire.OutPoint{}
|
|
|
|
shortChanID := lnwire.ShortChannelID{}
|
|
|
|
chanEvents := &ChainEventSubscription{
|
|
|
|
RemoteUnilateralClosure: make(chan *lnwallet.UnilateralCloseSummary, 1),
|
|
|
|
LocalUnilateralClosure: make(chan *LocalUnilateralCloseInfo, 1),
|
2018-08-21 13:21:15 +03:00
|
|
|
CooperativeClosure: make(chan *CooperativeCloseInfo, 1),
|
2018-03-19 16:11:13 +03:00
|
|
|
ContractBreach: make(chan *lnwallet.BreachRetribution, 1),
|
|
|
|
}
|
|
|
|
|
|
|
|
chainIO := &mockChainIO{}
|
|
|
|
chainArbCfg := ChainArbitratorConfig{
|
|
|
|
ChainIO: chainIO,
|
|
|
|
PublishTx: func(*wire.MsgTx) error {
|
|
|
|
return nil
|
|
|
|
},
|
2018-09-10 13:12:54 +03:00
|
|
|
DeliverResolutionMsg: func(...ResolutionMsg) error {
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
BroadcastDelta: 5,
|
|
|
|
Notifier: &mockNotifier{
|
|
|
|
epochChan: make(chan *chainntnfs.BlockEpoch),
|
|
|
|
spendChan: make(chan *chainntnfs.SpendDetail),
|
|
|
|
confChan: make(chan *chainntnfs.TxConfirmation),
|
|
|
|
},
|
|
|
|
IncubateOutputs: func(wire.OutPoint, *lnwallet.CommitOutputResolution,
|
|
|
|
*lnwallet.OutgoingHtlcResolution,
|
2018-09-26 07:48:24 +03:00
|
|
|
*lnwallet.IncomingHtlcResolution, uint32) error {
|
2018-09-10 13:12:54 +03:00
|
|
|
return nil
|
|
|
|
},
|
2019-01-15 13:31:22 +03:00
|
|
|
SettleInvoice: func(lntypes.Hash, lnwire.MilliSatoshi) error {
|
2019-01-23 07:45:35 +03:00
|
|
|
return nil
|
|
|
|
},
|
2018-03-19 16:11:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll use the resolvedChan to synchronize on call to
|
|
|
|
// MarkChannelResolved.
|
|
|
|
resolvedChan := make(chan struct{}, 1)
|
|
|
|
|
|
|
|
// Next we'll create the matching configuration struct that contains
|
|
|
|
// all interfaces and methods the arbitrator needs to do its job.
|
|
|
|
arbCfg := ChannelArbitratorConfig{
|
|
|
|
ChanPoint: chanPoint,
|
|
|
|
ShortChanID: shortChanID,
|
|
|
|
BlockEpochs: blockEpoch,
|
|
|
|
MarkChannelResolved: func() error {
|
|
|
|
resolvedChan <- struct{}{}
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
ForceCloseChan: func() (*lnwallet.LocalForceCloseSummary, error) {
|
|
|
|
summary := &lnwallet.LocalForceCloseSummary{
|
|
|
|
CloseTx: &wire.MsgTx{},
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
}
|
|
|
|
return summary, nil
|
|
|
|
},
|
|
|
|
MarkCommitmentBroadcasted: func() error {
|
|
|
|
return nil
|
|
|
|
},
|
2018-08-21 13:21:15 +03:00
|
|
|
MarkChannelClosed: func(*channeldb.ChannelCloseSummary) error {
|
|
|
|
return nil
|
|
|
|
},
|
2018-08-21 13:21:16 +03:00
|
|
|
IsPendingClose: false,
|
2018-03-19 16:11:13 +03:00
|
|
|
ChainArbitratorConfig: chainArbCfg,
|
|
|
|
ChainEvents: chanEvents,
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
return NewChannelArbitrator(arbCfg, nil, log), resolvedChan, nil
|
2018-03-19 16:11:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// assertState checks that the ChannelArbitrator is in the state we expect it
|
|
|
|
// to be.
|
|
|
|
func assertState(t *testing.T, c *ChannelArbitrator, expected ArbitratorState) {
|
|
|
|
if c.state != expected {
|
|
|
|
t.Fatalf("expected state %v, was %v", expected, c.state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestChannelArbitratorCooperativeClose tests that the ChannelArbitertor
|
2018-08-21 13:21:15 +03:00
|
|
|
// correctly marks the channel resolved in case a cooperative close is
|
|
|
|
// confirmed.
|
2018-03-19 16:11:13 +03:00
|
|
|
func TestChannelArbitratorCooperativeClose(t *testing.T) {
|
2018-08-21 13:21:14 +03:00
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:15 +03:00
|
|
|
chanArb, resolved, err := createTestChannelArbitrator(log)
|
2018-03-19 16:11:13 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// It should start out in the default state.
|
|
|
|
assertState(t, chanArb, StateDefault)
|
|
|
|
|
2018-08-21 13:21:15 +03:00
|
|
|
// We set up a channel to detect when MarkChannelClosed is called.
|
|
|
|
closeInfos := make(chan *channeldb.ChannelCloseSummary)
|
|
|
|
chanArb.cfg.MarkChannelClosed = func(
|
|
|
|
closeInfo *channeldb.ChannelCloseSummary) error {
|
|
|
|
closeInfos <- closeInfo
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cooperative close should do trigger a MarkChannelClosed +
|
|
|
|
// MarkChannelResolved.
|
|
|
|
closeInfo := &CooperativeCloseInfo{
|
|
|
|
&channeldb.ChannelCloseSummary{},
|
|
|
|
}
|
|
|
|
chanArb.cfg.ChainEvents.CooperativeClosure <- closeInfo
|
|
|
|
|
|
|
|
select {
|
|
|
|
case c := <-closeInfos:
|
|
|
|
if c.CloseType != channeldb.CooperativeClose {
|
|
|
|
t.Fatalf("expected cooperative close, got %v", c.CloseType)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("timeout waiting for channel close")
|
|
|
|
}
|
|
|
|
|
|
|
|
// It should mark the channel as resolved.
|
|
|
|
select {
|
|
|
|
case <-resolved:
|
|
|
|
// Expected.
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
2018-03-19 16:11:13 +03:00
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
func assertStateTransitions(t *testing.T, newStates <-chan ArbitratorState,
|
|
|
|
expectedStates ...ArbitratorState) {
|
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
for _, exp := range expectedStates {
|
|
|
|
var state ArbitratorState
|
|
|
|
select {
|
|
|
|
case state = <-newStates:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("new state not received")
|
|
|
|
}
|
|
|
|
|
|
|
|
if state != exp {
|
|
|
|
t.Fatalf("expected new state %v, got %v", exp, state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestChannelArbitratorRemoteForceClose checks that the ChannelArbitrator goes
|
2018-03-19 16:11:13 +03:00
|
|
|
// through the expected states if a remote force close is observed in the
|
|
|
|
// chain.
|
|
|
|
func TestChannelArbitratorRemoteForceClose(t *testing.T) {
|
2018-08-21 13:21:14 +03:00
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
}
|
|
|
|
|
|
|
|
chanArb, resolved, err := createTestChannelArbitrator(log)
|
2018-03-19 16:11:13 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// It should start out in the default state.
|
|
|
|
assertState(t, chanArb, StateDefault)
|
|
|
|
|
|
|
|
// Send a remote force close event.
|
|
|
|
commitSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &chainhash.Hash{},
|
|
|
|
}
|
|
|
|
|
|
|
|
uniClose := &lnwallet.UnilateralCloseSummary{
|
|
|
|
SpendDetail: commitSpend,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
}
|
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// It should transition StateDefault -> StateContractClosed ->
|
|
|
|
// StateFullyResolved.
|
|
|
|
assertStateTransitions(
|
|
|
|
t, log.newStates, StateContractClosed, StateFullyResolved,
|
|
|
|
)
|
|
|
|
|
|
|
|
// It should alos mark the channel as resolved.
|
2018-03-19 16:11:13 +03:00
|
|
|
select {
|
|
|
|
case <-resolved:
|
|
|
|
// Expected.
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestChannelArbitratorLocalForceClose tests that the ChannelArbitrator goes
|
|
|
|
// through the expected states in case we request it to force close the channel,
|
|
|
|
// and the local force close event is observed in chain.
|
|
|
|
func TestChannelArbitratorLocalForceClose(t *testing.T) {
|
2018-08-21 13:21:14 +03:00
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
}
|
|
|
|
|
|
|
|
chanArb, resolved, err := createTestChannelArbitrator(log)
|
2018-03-19 16:11:13 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// It should start out in the default state.
|
|
|
|
assertState(t, chanArb, StateDefault)
|
|
|
|
|
|
|
|
// We create a channel we can use to pause the ChannelArbitrator at the
|
|
|
|
// point where it broadcasts the close tx, and check its state.
|
|
|
|
stateChan := make(chan ArbitratorState)
|
|
|
|
chanArb.cfg.PublishTx = func(*wire.MsgTx) error {
|
|
|
|
// When the force close tx is being broadcasted, check that the
|
|
|
|
// state is correct at that point.
|
|
|
|
select {
|
|
|
|
case stateChan <- chanArb.state:
|
|
|
|
case <-chanArb.quit:
|
|
|
|
return fmt.Errorf("exiting")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
respChan := make(chan *wire.MsgTx, 1)
|
|
|
|
|
|
|
|
// With the channel found, and the request crafted, we'll send over a
|
|
|
|
// force close request to the arbitrator that watches this channel.
|
|
|
|
chanArb.forceCloseReqs <- &forceCloseReq{
|
|
|
|
errResp: errChan,
|
|
|
|
closeTx: respChan,
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// It should transition to StateBroadcastCommit.
|
|
|
|
assertStateTransitions(t, log.newStates, StateBroadcastCommit)
|
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
// When it is broadcasting the force close, its state should be
|
|
|
|
// StateBroadcastCommit.
|
|
|
|
select {
|
|
|
|
case state := <-stateChan:
|
|
|
|
if state != StateBroadcastCommit {
|
|
|
|
t.Fatalf("state during PublishTx was %v", state)
|
|
|
|
}
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("did not get state update")
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// After broadcasting, transition should be to
|
|
|
|
// StateCommitmentBroadcasted.
|
|
|
|
assertStateTransitions(t, log.newStates, StateCommitmentBroadcasted)
|
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
select {
|
|
|
|
case <-respChan:
|
2018-08-21 13:21:14 +03:00
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2018-03-19 16:11:13 +03:00
|
|
|
case err := <-errChan:
|
2018-08-21 13:21:14 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error force closing channel: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
2018-03-19 16:11:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// After broadcasting the close tx, it should be in state
|
|
|
|
// StateCommitmentBroadcasted.
|
|
|
|
assertState(t, chanArb, StateCommitmentBroadcasted)
|
|
|
|
|
|
|
|
// Now notify about the local force close getting confirmed.
|
|
|
|
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
|
|
|
|
&chainntnfs.SpendDetail{},
|
|
|
|
&lnwallet.LocalForceCloseSummary{
|
|
|
|
CloseTx: &wire.MsgTx{},
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
},
|
2018-08-21 13:21:15 +03:00
|
|
|
&channeldb.ChannelCloseSummary{},
|
2018-03-19 16:11:13 +03:00
|
|
|
}
|
2018-08-21 13:21:14 +03:00
|
|
|
|
|
|
|
// It should transition StateContractClosed -> StateFullyResolved.
|
|
|
|
assertStateTransitions(t, log.newStates, StateContractClosed,
|
|
|
|
StateFullyResolved)
|
|
|
|
|
|
|
|
// It should also mark the channel as resolved.
|
2018-03-19 16:11:13 +03:00
|
|
|
select {
|
|
|
|
case <-resolved:
|
|
|
|
// Expected.
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
// TestChannelArbitratorLocalForceClosePendingHtlc tests that the
|
|
|
|
// ChannelArbitrator goes through the expected states in case we request it to
|
|
|
|
// force close a channel that still has an HTLC pending.
|
|
|
|
func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
|
|
|
arbLog := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
resolvers: make(map[ContractResolver]struct{}),
|
|
|
|
}
|
|
|
|
|
|
|
|
chanArb, resolved, err := createTestChannelArbitrator(arbLog)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
incubateChan := make(chan struct{})
|
2018-09-26 07:48:24 +03:00
|
|
|
chanArb.cfg.IncubateOutputs = func(_ wire.OutPoint,
|
|
|
|
_ *lnwallet.CommitOutputResolution,
|
|
|
|
_ *lnwallet.OutgoingHtlcResolution,
|
|
|
|
_ *lnwallet.IncomingHtlcResolution, _ uint32) error {
|
2018-09-10 13:12:54 +03:00
|
|
|
|
|
|
|
incubateChan <- struct{}{}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// Create htlcUpdates channel.
|
|
|
|
htlcUpdates := make(chan []channeldb.HTLC)
|
|
|
|
|
|
|
|
signals := &ContractSignals{
|
|
|
|
HtlcUpdates: htlcUpdates,
|
|
|
|
ShortChanID: lnwire.ShortChannelID{},
|
|
|
|
}
|
|
|
|
chanArb.UpdateContractSignals(signals)
|
|
|
|
|
|
|
|
// Add HTLC to channel arbitrator.
|
|
|
|
htlc := channeldb.HTLC{
|
|
|
|
Incoming: false,
|
|
|
|
Amt: 10000,
|
|
|
|
HtlcIndex: 0,
|
|
|
|
}
|
|
|
|
|
|
|
|
htlcUpdates <- []channeldb.HTLC{
|
|
|
|
htlc,
|
|
|
|
}
|
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
respChan := make(chan *wire.MsgTx, 1)
|
|
|
|
|
|
|
|
// With the channel found, and the request crafted, we'll send over a
|
|
|
|
// force close request to the arbitrator that watches this channel.
|
|
|
|
chanArb.forceCloseReqs <- &forceCloseReq{
|
|
|
|
errResp: errChan,
|
|
|
|
closeTx: respChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
// The force close request should trigger broadcast of the commitment
|
|
|
|
// transaction.
|
|
|
|
assertStateTransitions(t, arbLog.newStates,
|
|
|
|
StateBroadcastCommit, StateCommitmentBroadcasted)
|
|
|
|
select {
|
|
|
|
case <-respChan:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error force closing channel: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now notify about the local force close getting confirmed.
|
|
|
|
closeTx := &wire.MsgTx{}
|
|
|
|
|
|
|
|
htlcOp := wire.OutPoint{
|
|
|
|
Hash: closeTx.TxHash(),
|
|
|
|
Index: 0,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set up the outgoing resolution. Populate SignedTimeoutTx because
|
|
|
|
// our commitment transaction got confirmed.
|
|
|
|
outgoingRes := lnwallet.OutgoingHtlcResolution{
|
|
|
|
Expiry: 10,
|
2019-01-16 17:47:43 +03:00
|
|
|
SweepSignDesc: input.SignDescriptor{
|
2018-09-10 13:12:54 +03:00
|
|
|
Output: &wire.TxOut{},
|
|
|
|
},
|
|
|
|
SignedTimeoutTx: &wire.MsgTx{
|
|
|
|
TxIn: []*wire.TxIn{
|
|
|
|
{
|
|
|
|
PreviousOutPoint: htlcOp,
|
|
|
|
Witness: [][]byte{{}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
TxOut: []*wire.TxOut{
|
|
|
|
{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
|
|
|
|
&chainntnfs.SpendDetail{},
|
|
|
|
&lnwallet.LocalForceCloseSummary{
|
|
|
|
CloseTx: closeTx,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{
|
|
|
|
OutgoingHTLCs: []lnwallet.OutgoingHtlcResolution{
|
|
|
|
outgoingRes,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
&channeldb.ChannelCloseSummary{},
|
|
|
|
}
|
|
|
|
|
|
|
|
assertStateTransitions(t, arbLog.newStates, StateContractClosed,
|
|
|
|
StateWaitingFullResolution)
|
|
|
|
|
|
|
|
// htlcOutgoingContestResolver is now active and waiting for the HTLC to
|
|
|
|
// expire. It should not yet have passed it on for incubation.
|
|
|
|
select {
|
|
|
|
case <-incubateChan:
|
|
|
|
t.Fatalf("contract should not be incubated yet")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send a notification that the expiry height has been reached.
|
|
|
|
notifier := chanArb.cfg.Notifier.(*mockNotifier)
|
|
|
|
notifier.epochChan <- &chainntnfs.BlockEpoch{Height: 10}
|
|
|
|
|
|
|
|
// htlcOutgoingContestResolver is now transforming into a
|
|
|
|
// htlcTimeoutResolver and should send the contract off for incubation.
|
|
|
|
select {
|
|
|
|
case <-incubateChan:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify resolver that output of the commitment has been spent.
|
|
|
|
notifier.confChan <- &chainntnfs.TxConfirmation{}
|
|
|
|
|
|
|
|
// As this is our own commitment transaction, the HTLC will go through
|
|
|
|
// to the second level. Channel arbitrator should still not be marked as
|
|
|
|
// resolved.
|
|
|
|
select {
|
|
|
|
case <-resolved:
|
|
|
|
t.Fatalf("channel resolved prematurely")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify resolver that the second level transaction is spent.
|
|
|
|
notifier.spendChan <- &chainntnfs.SpendDetail{}
|
|
|
|
|
|
|
|
// At this point channel should be marked as resolved.
|
2018-09-10 14:17:55 +03:00
|
|
|
assertStateTransitions(t, arbLog.newStates, StateFullyResolved)
|
2018-09-10 13:12:54 +03:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-resolved:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
// TestChannelArbitratorLocalForceCloseRemoteConfiremd tests that the
|
|
|
|
// ChannelArbitrator behaves as expected in the case where we request a local
|
|
|
|
// force close, but a remote commitment ends up being confirmed in chain.
|
|
|
|
func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) {
|
2018-08-21 13:21:14 +03:00
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
}
|
|
|
|
|
|
|
|
chanArb, resolved, err := createTestChannelArbitrator(log)
|
2018-03-19 16:11:13 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// It should start out in the default state.
|
|
|
|
assertState(t, chanArb, StateDefault)
|
|
|
|
|
|
|
|
// Create a channel we can use to assert the state when it publishes
|
|
|
|
// the close tx.
|
|
|
|
stateChan := make(chan ArbitratorState)
|
|
|
|
chanArb.cfg.PublishTx = func(*wire.MsgTx) error {
|
|
|
|
// When the force close tx is being broadcasted, check that the
|
|
|
|
// state is correct at that point.
|
|
|
|
select {
|
|
|
|
case stateChan <- chanArb.state:
|
|
|
|
case <-chanArb.quit:
|
|
|
|
return fmt.Errorf("exiting")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
respChan := make(chan *wire.MsgTx, 1)
|
|
|
|
|
|
|
|
// With the channel found, and the request crafted, we'll send over a
|
|
|
|
// force close request to the arbitrator that watches this channel.
|
|
|
|
chanArb.forceCloseReqs <- &forceCloseReq{
|
|
|
|
errResp: errChan,
|
|
|
|
closeTx: respChan,
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// It should transition to StateBroadcastCommit.
|
|
|
|
assertStateTransitions(t, log.newStates, StateBroadcastCommit)
|
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
// We expect it to be in state StateBroadcastCommit when publishing
|
|
|
|
// the force close.
|
|
|
|
select {
|
|
|
|
case state := <-stateChan:
|
|
|
|
if state != StateBroadcastCommit {
|
|
|
|
t.Fatalf("state during PublishTx was %v", state)
|
|
|
|
}
|
|
|
|
case <-time.After(15 * time.Second):
|
2018-05-14 15:21:06 +03:00
|
|
|
t.Fatalf("no state update received")
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// After broadcasting, transition should be to
|
|
|
|
// StateCommitmentBroadcasted.
|
|
|
|
assertStateTransitions(t, log.newStates, StateCommitmentBroadcasted)
|
|
|
|
|
2018-05-14 15:21:06 +03:00
|
|
|
// Wait for a response to the force close.
|
|
|
|
select {
|
|
|
|
case <-respChan:
|
2018-08-21 13:21:14 +03:00
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2018-05-14 15:21:06 +03:00
|
|
|
case err := <-errChan:
|
2018-08-21 13:21:14 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error force closing channel: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
2018-05-14 15:21:06 +03:00
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The state should be StateCommitmentBroadcasted.
|
|
|
|
assertState(t, chanArb, StateCommitmentBroadcasted)
|
|
|
|
|
|
|
|
// Now notify about the _REMOTE_ commitment getting confirmed.
|
|
|
|
commitSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &chainhash.Hash{},
|
|
|
|
}
|
|
|
|
uniClose := &lnwallet.UnilateralCloseSummary{
|
|
|
|
SpendDetail: commitSpend,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
}
|
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// It should transition StateContractClosed -> StateFullyResolved.
|
|
|
|
assertStateTransitions(t, log.newStates, StateContractClosed,
|
|
|
|
StateFullyResolved)
|
|
|
|
|
2018-05-14 15:21:06 +03:00
|
|
|
// It should resolve.
|
|
|
|
select {
|
|
|
|
case <-resolved:
|
|
|
|
// Expected.
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestChannelArbitratorLocalForceCloseDoubleSpend tests that the
|
|
|
|
// ChannelArbitrator behaves as expected in the case where we request a local
|
|
|
|
// force close, but we fail broadcasting our commitment because a remote
|
|
|
|
// commitment has already been published.
|
|
|
|
func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) {
|
2018-08-21 13:21:14 +03:00
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
}
|
|
|
|
|
|
|
|
chanArb, resolved, err := createTestChannelArbitrator(log)
|
2018-05-14 15:21:06 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// It should start out in the default state.
|
|
|
|
assertState(t, chanArb, StateDefault)
|
|
|
|
|
|
|
|
// Return ErrDoubleSpend when attempting to publish the tx.
|
|
|
|
stateChan := make(chan ArbitratorState)
|
|
|
|
chanArb.cfg.PublishTx = func(*wire.MsgTx) error {
|
|
|
|
// When the force close tx is being broadcasted, check that the
|
|
|
|
// state is correct at that point.
|
|
|
|
select {
|
|
|
|
case stateChan <- chanArb.state:
|
|
|
|
case <-chanArb.quit:
|
|
|
|
return fmt.Errorf("exiting")
|
|
|
|
}
|
|
|
|
return lnwallet.ErrDoubleSpend
|
|
|
|
}
|
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
respChan := make(chan *wire.MsgTx, 1)
|
|
|
|
|
|
|
|
// With the channel found, and the request crafted, we'll send over a
|
|
|
|
// force close request to the arbitrator that watches this channel.
|
|
|
|
chanArb.forceCloseReqs <- &forceCloseReq{
|
|
|
|
errResp: errChan,
|
|
|
|
closeTx: respChan,
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// It should transition to StateBroadcastCommit.
|
|
|
|
assertStateTransitions(t, log.newStates, StateBroadcastCommit)
|
|
|
|
|
2018-05-14 15:21:06 +03:00
|
|
|
// We expect it to be in state StateBroadcastCommit when publishing
|
|
|
|
// the force close.
|
|
|
|
select {
|
|
|
|
case state := <-stateChan:
|
|
|
|
if state != StateBroadcastCommit {
|
|
|
|
t.Fatalf("state during PublishTx was %v", state)
|
|
|
|
}
|
|
|
|
case <-time.After(15 * time.Second):
|
2018-03-19 16:11:13 +03:00
|
|
|
t.Fatalf("no state update received")
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// After broadcasting, transition should be to
|
|
|
|
// StateCommitmentBroadcasted.
|
|
|
|
assertStateTransitions(t, log.newStates, StateCommitmentBroadcasted)
|
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
// Wait for a response to the force close.
|
|
|
|
select {
|
|
|
|
case <-respChan:
|
2018-08-21 13:21:14 +03:00
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2018-03-19 16:11:13 +03:00
|
|
|
case err := <-errChan:
|
2018-08-21 13:21:14 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error force closing channel: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
2018-03-19 16:11:13 +03:00
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The state should be StateCommitmentBroadcasted.
|
|
|
|
assertState(t, chanArb, StateCommitmentBroadcasted)
|
|
|
|
|
|
|
|
// Now notify about the _REMOTE_ commitment getting confirmed.
|
|
|
|
commitSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &chainhash.Hash{},
|
|
|
|
}
|
|
|
|
uniClose := &lnwallet.UnilateralCloseSummary{
|
|
|
|
SpendDetail: commitSpend,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
}
|
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// It should transition StateContractClosed -> StateFullyResolved.
|
|
|
|
assertStateTransitions(t, log.newStates, StateContractClosed,
|
|
|
|
StateFullyResolved)
|
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
// It should resolve.
|
|
|
|
select {
|
|
|
|
case <-resolved:
|
|
|
|
// Expected.
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// TestChannelArbitratorPersistence tests that the ChannelArbitrator is able to
|
|
|
|
// keep advancing the state machine from various states after restart.
|
|
|
|
func TestChannelArbitratorPersistence(t *testing.T) {
|
|
|
|
// Start out with a log that will fail writing the set of resolutions.
|
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
failLog: true,
|
|
|
|
}
|
|
|
|
|
|
|
|
chanArb, resolved, err := createTestChannelArbitrator(log)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// It should start in StateDefault.
|
|
|
|
assertState(t, chanArb, StateDefault)
|
|
|
|
|
|
|
|
// Send a remote force close event.
|
|
|
|
commitSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &chainhash.Hash{},
|
|
|
|
}
|
|
|
|
|
|
|
|
uniClose := &lnwallet.UnilateralCloseSummary{
|
|
|
|
SpendDetail: commitSpend,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
}
|
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
|
|
|
|
|
|
|
// Since writing the resolutions fail, the arbitrator should not
|
|
|
|
// advance to the next state.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
if log.state != StateDefault {
|
|
|
|
t.Fatalf("expected to stay in StateDefault")
|
|
|
|
}
|
|
|
|
chanArb.Stop()
|
|
|
|
|
|
|
|
// Create a new arbitrator with the same log.
|
|
|
|
chanArb, resolved, err = createTestChannelArbitrator(log)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Again, it should start up in the default state.
|
|
|
|
assertState(t, chanArb, StateDefault)
|
|
|
|
|
|
|
|
// Now we make the log succeed writing the resolutions, but fail when
|
|
|
|
// attempting to close the channel.
|
|
|
|
log.failLog = false
|
|
|
|
chanArb.cfg.MarkChannelClosed = func(*channeldb.ChannelCloseSummary) error {
|
|
|
|
return fmt.Errorf("intentional close error")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send a new remote force close event.
|
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
|
|
|
|
|
|
|
// Since closing the channel failed, the arbitrator should stay in the
|
|
|
|
// default state.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
if log.state != StateDefault {
|
|
|
|
t.Fatalf("expected to stay in StateDefault")
|
|
|
|
}
|
|
|
|
chanArb.Stop()
|
|
|
|
|
|
|
|
// Create yet another arbitrator with the same log.
|
|
|
|
chanArb, resolved, err = createTestChannelArbitrator(log)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Starts out in StateDefault.
|
|
|
|
assertState(t, chanArb, StateDefault)
|
|
|
|
|
|
|
|
// Now make fetching the resolutions fail.
|
|
|
|
log.failFetch = fmt.Errorf("intentional fetch failure")
|
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
|
|
|
|
|
|
|
// Since logging the resolutions and closing the channel now succeeds,
|
|
|
|
// it should advance to StateContractClosed.
|
|
|
|
assertStateTransitions(
|
|
|
|
t, log.newStates, StateContractClosed,
|
|
|
|
)
|
|
|
|
|
|
|
|
// It should not advance further, however, as fetching resolutions
|
|
|
|
// failed.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
if log.state != StateContractClosed {
|
|
|
|
t.Fatalf("expected to stay in StateContractClosed")
|
|
|
|
}
|
|
|
|
chanArb.Stop()
|
|
|
|
|
|
|
|
// Create a new arbitrator, and now make fetching resolutions succeed.
|
|
|
|
log.failFetch = nil
|
|
|
|
chanArb, resolved, err = createTestChannelArbitrator(log)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// Finally it should advance to StateFullyResolved.
|
|
|
|
assertStateTransitions(
|
|
|
|
t, log.newStates, StateFullyResolved,
|
|
|
|
)
|
|
|
|
|
|
|
|
// It should also mark the channel as resolved.
|
|
|
|
select {
|
|
|
|
case <-resolved:
|
|
|
|
// Expected.
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
|
|
|
// TestChannelArbitratorCommitFailure tests that the channel arbitrator is able
|
|
|
|
// to recover from a failed CommitState call at restart.
|
|
|
|
func TestChannelArbitratorCommitFailure(t *testing.T) {
|
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
testCases := []struct {
|
|
|
|
|
|
|
|
// closeType is the type of channel close we want ot test.
|
|
|
|
closeType channeldb.ClosureType
|
|
|
|
|
|
|
|
// sendEvent is a function that will send the event
|
|
|
|
// corresponding to this test's closeType to the passed
|
|
|
|
// ChannelArbitrator.
|
|
|
|
sendEvent func(chanArb *ChannelArbitrator)
|
|
|
|
|
|
|
|
// expectedStates is the states we expect the state machine to
|
|
|
|
// go through after a restart and successful log commit.
|
|
|
|
expectedStates []ArbitratorState
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
closeType: channeldb.CooperativeClose,
|
|
|
|
sendEvent: func(chanArb *ChannelArbitrator) {
|
|
|
|
closeInfo := &CooperativeCloseInfo{
|
|
|
|
&channeldb.ChannelCloseSummary{},
|
|
|
|
}
|
|
|
|
chanArb.cfg.ChainEvents.CooperativeClosure <- closeInfo
|
|
|
|
},
|
|
|
|
expectedStates: []ArbitratorState{StateFullyResolved},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
closeType: channeldb.RemoteForceClose,
|
|
|
|
sendEvent: func(chanArb *ChannelArbitrator) {
|
|
|
|
commitSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &chainhash.Hash{},
|
|
|
|
}
|
|
|
|
|
|
|
|
uniClose := &lnwallet.UnilateralCloseSummary{
|
|
|
|
SpendDetail: commitSpend,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
}
|
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
|
|
|
},
|
|
|
|
expectedStates: []ArbitratorState{StateContractClosed, StateFullyResolved},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
closeType: channeldb.LocalForceClose,
|
|
|
|
sendEvent: func(chanArb *ChannelArbitrator) {
|
|
|
|
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
|
|
|
|
&chainntnfs.SpendDetail{},
|
|
|
|
&lnwallet.LocalForceCloseSummary{
|
|
|
|
CloseTx: &wire.MsgTx{},
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
},
|
|
|
|
&channeldb.ChannelCloseSummary{},
|
|
|
|
}
|
|
|
|
},
|
|
|
|
expectedStates: []ArbitratorState{StateContractClosed, StateFullyResolved},
|
|
|
|
},
|
2018-08-21 13:21:16 +03:00
|
|
|
}
|
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
for _, test := range testCases {
|
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
failCommit: true,
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
// Set the log to fail on the first expected state
|
|
|
|
// after state machine progress for this test case.
|
|
|
|
failCommitState: test.expectedStates[0],
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
chanArb, resolved, err := createTestChannelArbitrator(log)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
// It should start in StateDefault.
|
|
|
|
assertState(t, chanArb, StateDefault)
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
closed := make(chan struct{})
|
|
|
|
chanArb.cfg.MarkChannelClosed = func(
|
|
|
|
*channeldb.ChannelCloseSummary) error {
|
|
|
|
close(closed)
|
|
|
|
return nil
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
// Send the test event to trigger the state machine.
|
|
|
|
test.sendEvent(chanArb)
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
select {
|
|
|
|
case <-closed:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("channel was not marked closed")
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
// Since the channel was marked closed in the database, but the
|
|
|
|
// commit to the next state failed, the state should still be
|
|
|
|
// StateDefault.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
if log.state != StateDefault {
|
|
|
|
t.Fatalf("expected to stay in StateDefault, instead "+
|
|
|
|
"has %v", log.state)
|
|
|
|
}
|
|
|
|
chanArb.Stop()
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
// Start the arbitrator again, with IsPendingClose reporting
|
|
|
|
// the channel closed in the database.
|
|
|
|
chanArb, resolved, err = createTestChannelArbitrator(log)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
log.failCommit = false
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
chanArb.cfg.IsPendingClose = true
|
|
|
|
chanArb.cfg.ClosingHeight = 100
|
|
|
|
chanArb.cfg.CloseType = test.closeType
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Since the channel is marked closed in the database, it
|
|
|
|
// should advance to the expected states.
|
|
|
|
assertStateTransitions(
|
|
|
|
t, log.newStates, test.expectedStates...,
|
|
|
|
)
|
|
|
|
|
|
|
|
// It should also mark the channel as resolved.
|
|
|
|
select {
|
|
|
|
case <-resolved:
|
|
|
|
// Expected.
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
}
|
|
|
|
}
|
2018-08-21 13:55:19 +03:00
|
|
|
|
|
|
|
// TestChannelArbitratorEmptyResolutions makes sure that a channel that is
|
|
|
|
// pending close in the database, but haven't had any resolutions logged will
|
|
|
|
// not be marked resolved. This situation must be handled to avoid closing
|
|
|
|
// channels from earlier versions of the ChannelArbitrator, which didn't have a
|
|
|
|
// proper handoff from the ChainWatcher, and we could risk ending up in a state
|
|
|
|
// where the channel was closed in the DB, but the resolutions weren't properly
|
|
|
|
// written.
|
|
|
|
func TestChannelArbitratorEmptyResolutions(t *testing.T) {
|
|
|
|
// Start out with a log that will fail writing the set of resolutions.
|
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
failFetch: errNoResolutions,
|
|
|
|
}
|
|
|
|
|
|
|
|
chanArb, _, err := createTestChannelArbitrator(log)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
chanArb.cfg.IsPendingClose = true
|
|
|
|
chanArb.cfg.ClosingHeight = 100
|
|
|
|
chanArb.cfg.CloseType = channeldb.RemoteForceClose
|
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// It should not advance its state beyond StateContractClosed, since
|
|
|
|
// fetching resolutions fails.
|
|
|
|
assertStateTransitions(
|
|
|
|
t, log.newStates, StateContractClosed,
|
|
|
|
)
|
|
|
|
|
|
|
|
// It should not advance further, however, as fetching resolutions
|
|
|
|
// failed.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
if log.state != StateContractClosed {
|
|
|
|
t.Fatalf("expected to stay in StateContractClosed")
|
|
|
|
}
|
|
|
|
chanArb.Stop()
|
|
|
|
}
|
2018-10-31 21:38:27 +03:00
|
|
|
|
|
|
|
// TestChannelArbitratorAlreadyForceClosed ensures that we cannot force close a
|
|
|
|
// channel that is already in the process of doing so.
|
|
|
|
func TestChannelArbitratorAlreadyForceClosed(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// We'll create the arbitrator and its backing log to signal that it's
|
|
|
|
// already in the process of being force closed.
|
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateCommitmentBroadcasted,
|
|
|
|
}
|
|
|
|
chanArb, _, err := createTestChannelArbitrator(log)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// Then, we'll create a request to signal a force close request to the
|
|
|
|
// channel arbitrator.
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
respChan := make(chan *wire.MsgTx, 1)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case chanArb.forceCloseReqs <- &forceCloseReq{
|
|
|
|
closeTx: respChan,
|
|
|
|
errResp: errChan,
|
|
|
|
}:
|
|
|
|
case <-chanArb.quit:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Finally, we should ensure that we are not able to do so by seeing the
|
|
|
|
// expected errAlreadyForceClosed error.
|
|
|
|
select {
|
|
|
|
case err = <-errChan:
|
|
|
|
if err != errAlreadyForceClosed {
|
|
|
|
t.Fatalf("expected errAlreadyForceClosed, got %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
t.Fatal("expected to receive error response")
|
|
|
|
}
|
|
|
|
}
|