2018-01-17 06:37:09 +03:00
|
|
|
package contractcourt
|
2018-03-19 16:11:13 +03:00
|
|
|
|
|
|
|
import (
|
2019-09-17 11:29:36 +03:00
|
|
|
"errors"
|
2018-03-19 16:11:13 +03:00
|
|
|
"fmt"
|
2019-09-25 05:19:53 +03:00
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2018-09-10 13:12:54 +03:00
|
|
|
"sync"
|
2018-03-19 16:11:13 +03:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2018-07-18 05:42:43 +03:00
|
|
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
|
|
|
"github.com/btcsuite/btcd/wire"
|
2019-09-25 05:19:53 +03:00
|
|
|
"github.com/coreos/bbolt"
|
2018-03-19 16:11:13 +03:00
|
|
|
"github.com/lightningnetwork/lnd/chainntnfs"
|
2018-08-21 13:21:15 +03:00
|
|
|
"github.com/lightningnetwork/lnd/channeldb"
|
2019-01-16 17:47:43 +03:00
|
|
|
"github.com/lightningnetwork/lnd/input"
|
2018-03-19 16:11:13 +03:00
|
|
|
"github.com/lightningnetwork/lnd/lnwallet"
|
|
|
|
"github.com/lightningnetwork/lnd/lnwire"
|
|
|
|
)
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
type mockArbitratorLog struct {
|
2018-08-21 13:21:16 +03:00
|
|
|
state ArbitratorState
|
|
|
|
newStates chan ArbitratorState
|
|
|
|
failLog bool
|
|
|
|
failFetch error
|
|
|
|
failCommit bool
|
|
|
|
failCommitState ArbitratorState
|
2018-09-10 13:12:54 +03:00
|
|
|
resolutions *ContractResolutions
|
|
|
|
resolvers map[ContractResolver]struct{}
|
|
|
|
|
2019-05-24 06:27:04 +03:00
|
|
|
commitSet *CommitSet
|
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
sync.Mutex
|
2018-08-21 13:21:14 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// A compile time check to ensure mockArbitratorLog meets the ArbitratorLog
|
|
|
|
// interface.
|
|
|
|
var _ ArbitratorLog = (*mockArbitratorLog)(nil)
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) CurrentState() (ArbitratorState, error) {
|
|
|
|
return b.state, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) CommitState(s ArbitratorState) error {
|
2018-08-21 13:21:16 +03:00
|
|
|
if b.failCommit && s == b.failCommitState {
|
|
|
|
return fmt.Errorf("intentional commit error at state %v",
|
|
|
|
b.failCommitState)
|
|
|
|
}
|
2018-08-21 13:21:14 +03:00
|
|
|
b.state = s
|
|
|
|
b.newStates <- s
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
func (b *mockArbitratorLog) FetchUnresolvedContracts() ([]ContractResolver,
|
|
|
|
error) {
|
|
|
|
|
|
|
|
b.Lock()
|
|
|
|
v := make([]ContractResolver, len(b.resolvers))
|
|
|
|
idx := 0
|
|
|
|
for resolver := range b.resolvers {
|
|
|
|
v[idx] = resolver
|
|
|
|
idx++
|
|
|
|
}
|
|
|
|
b.Unlock()
|
|
|
|
|
|
|
|
return v, nil
|
2018-08-21 13:21:14 +03:00
|
|
|
}
|
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
func (b *mockArbitratorLog) InsertUnresolvedContracts(
|
|
|
|
resolvers ...ContractResolver) error {
|
|
|
|
|
|
|
|
b.Lock()
|
|
|
|
for _, resolver := range resolvers {
|
|
|
|
b.resolvers[resolver] = struct{}{}
|
|
|
|
}
|
|
|
|
b.Unlock()
|
2018-08-21 13:21:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
func (b *mockArbitratorLog) SwapContract(oldContract,
|
|
|
|
newContract ContractResolver) error {
|
|
|
|
|
|
|
|
b.Lock()
|
|
|
|
delete(b.resolvers, oldContract)
|
|
|
|
b.resolvers[newContract] = struct{}{}
|
|
|
|
b.Unlock()
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) ResolveContract(res ContractResolver) error {
|
2018-09-10 13:12:54 +03:00
|
|
|
b.Lock()
|
|
|
|
delete(b.resolvers, res)
|
|
|
|
b.Unlock()
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) LogContractResolutions(c *ContractResolutions) error {
|
2018-08-21 13:21:15 +03:00
|
|
|
if b.failLog {
|
|
|
|
return fmt.Errorf("intentional log failure")
|
|
|
|
}
|
2018-09-10 13:12:54 +03:00
|
|
|
b.resolutions = c
|
2018-08-21 13:21:14 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) FetchContractResolutions() (*ContractResolutions, error) {
|
2018-08-21 13:21:15 +03:00
|
|
|
if b.failFetch != nil {
|
|
|
|
return nil, b.failFetch
|
|
|
|
}
|
2018-08-21 13:21:14 +03:00
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
return b.resolutions, nil
|
2018-08-21 13:21:14 +03:00
|
|
|
}
|
|
|
|
|
2019-05-24 06:27:04 +03:00
|
|
|
func (b *mockArbitratorLog) FetchChainActions() (ChainActionMap, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) error {
|
|
|
|
b.commitSet = c
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *mockArbitratorLog) FetchConfirmedCommitSet() (*CommitSet, error) {
|
|
|
|
return b.commitSet, nil
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
func (b *mockArbitratorLog) WipeHistory() error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
// testArbLog is a wrapper around an existing (ideally fully concrete
|
|
|
|
// ArbitratorLog) that lets us intercept certain calls like transitioning to a
|
|
|
|
// new state.
|
|
|
|
type testArbLog struct {
|
|
|
|
ArbitratorLog
|
|
|
|
|
|
|
|
newStates chan ArbitratorState
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *testArbLog) CommitState(s ArbitratorState) error {
|
|
|
|
if err := t.ArbitratorLog.CommitState(s); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
t.newStates <- s
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
type mockChainIO struct{}
|
|
|
|
|
2018-08-24 16:30:23 +03:00
|
|
|
var _ lnwallet.BlockChainIO = (*mockChainIO)(nil)
|
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
func (*mockChainIO) GetBestBlock() (*chainhash.Hash, int32, error) {
|
|
|
|
return nil, 0, nil
|
|
|
|
}
|
|
|
|
|
2018-07-18 05:42:43 +03:00
|
|
|
func (*mockChainIO) GetUtxo(op *wire.OutPoint, _ []byte,
|
2018-08-24 16:31:57 +03:00
|
|
|
heightHint uint32, _ <-chan struct{}) (*wire.TxOut, error) {
|
2018-03-19 16:11:13 +03:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (*mockChainIO) GetBlockHash(blockHeight int64) (*chainhash.Hash, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (*mockChainIO) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error) {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
type chanArbTestCtx struct {
|
|
|
|
t *testing.T
|
2019-05-17 03:45:26 +03:00
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb *ChannelArbitrator
|
|
|
|
|
|
|
|
cleanUp func()
|
|
|
|
|
|
|
|
resolvedChan chan struct{}
|
|
|
|
|
|
|
|
blockEpochs chan *chainntnfs.BlockEpoch
|
|
|
|
|
|
|
|
incubationRequests chan struct{}
|
|
|
|
|
|
|
|
resolutions chan []ResolutionMsg
|
|
|
|
|
|
|
|
log ArbitratorLog
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *chanArbTestCtx) CleanUp() {
|
|
|
|
if err := c.chanArb.Stop(); err != nil {
|
|
|
|
c.t.Fatalf("unable to stop chan arb: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.cleanUp != nil {
|
|
|
|
c.cleanUp()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// AssertStateTransitions asserts that the state machine steps through the
|
|
|
|
// passed states in order.
|
|
|
|
func (c *chanArbTestCtx) AssertStateTransitions(expectedStates ...ArbitratorState) {
|
|
|
|
c.t.Helper()
|
|
|
|
|
|
|
|
var newStatesChan chan ArbitratorState
|
|
|
|
switch log := c.log.(type) {
|
|
|
|
case *mockArbitratorLog:
|
|
|
|
newStatesChan = log.newStates
|
|
|
|
|
|
|
|
case *testArbLog:
|
|
|
|
newStatesChan = log.newStates
|
|
|
|
|
|
|
|
default:
|
|
|
|
c.t.Fatalf("unable to assert state transitions with %T", log)
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, exp := range expectedStates {
|
|
|
|
var state ArbitratorState
|
|
|
|
select {
|
|
|
|
case state = <-newStatesChan:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
c.t.Fatalf("new state not received")
|
|
|
|
}
|
|
|
|
|
|
|
|
if state != exp {
|
|
|
|
c.t.Fatalf("expected new state %v, got %v", exp, state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// AssertState checks that the ChannelArbitrator is in the state we expect it
|
|
|
|
// to be.
|
|
|
|
func (c *chanArbTestCtx) AssertState(expected ArbitratorState) {
|
|
|
|
if c.chanArb.state != expected {
|
|
|
|
c.t.Fatalf("expected state %v, was %v", expected, c.chanArb.state)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Restart simulates a clean restart of the channel arbitrator, forcing it to
|
|
|
|
// walk through it's recovery logic. If this function returns nil, then a
|
|
|
|
// restart was successful. Note that the restart process keeps the log in
|
|
|
|
// place, in order to simulate proper persistence of the log. The caller can
|
|
|
|
// optionally provide a restart closure which will be executed before the
|
|
|
|
// resolver is started again, but after it is created.
|
|
|
|
func (c *chanArbTestCtx) Restart(restartClosure func(*chanArbTestCtx)) (*chanArbTestCtx, error) {
|
|
|
|
if err := c.chanArb.Stop(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
newCtx, err := createTestChannelArbitrator(c.t, c.log)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if restartClosure != nil {
|
|
|
|
restartClosure(newCtx)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := newCtx.chanArb.Start(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-05-17 03:45:26 +03:00
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
return newCtx, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func createTestChannelArbitrator(t *testing.T, log ArbitratorLog) (*chanArbTestCtx, error) {
|
2019-05-17 03:45:26 +03:00
|
|
|
blockEpochs := make(chan *chainntnfs.BlockEpoch)
|
2018-03-19 16:11:13 +03:00
|
|
|
blockEpoch := &chainntnfs.BlockEpochEvent{
|
2019-05-17 03:45:26 +03:00
|
|
|
Epochs: blockEpochs,
|
2018-03-19 16:11:13 +03:00
|
|
|
Cancel: func() {},
|
|
|
|
}
|
|
|
|
|
|
|
|
chanPoint := wire.OutPoint{}
|
|
|
|
shortChanID := lnwire.ShortChannelID{}
|
|
|
|
chanEvents := &ChainEventSubscription{
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
RemoteUnilateralClosure: make(chan *RemoteUnilateralCloseInfo, 1),
|
2018-03-19 16:11:13 +03:00
|
|
|
LocalUnilateralClosure: make(chan *LocalUnilateralCloseInfo, 1),
|
2018-08-21 13:21:15 +03:00
|
|
|
CooperativeClosure: make(chan *CooperativeCloseInfo, 1),
|
2018-03-19 16:11:13 +03:00
|
|
|
ContractBreach: make(chan *lnwallet.BreachRetribution, 1),
|
|
|
|
}
|
|
|
|
|
2019-05-17 03:45:26 +03:00
|
|
|
resolutionChan := make(chan []ResolutionMsg, 1)
|
2019-09-25 05:19:53 +03:00
|
|
|
incubateChan := make(chan struct{})
|
2019-05-17 03:45:26 +03:00
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
chainIO := &mockChainIO{}
|
|
|
|
chainArbCfg := ChainArbitratorConfig{
|
|
|
|
ChainIO: chainIO,
|
|
|
|
PublishTx: func(*wire.MsgTx) error {
|
|
|
|
return nil
|
|
|
|
},
|
2019-05-17 03:45:26 +03:00
|
|
|
DeliverResolutionMsg: func(msgs ...ResolutionMsg) error {
|
|
|
|
resolutionChan <- msgs
|
2018-09-10 13:12:54 +03:00
|
|
|
return nil
|
|
|
|
},
|
2019-04-03 13:18:19 +03:00
|
|
|
OutgoingBroadcastDelta: 5,
|
|
|
|
IncomingBroadcastDelta: 5,
|
2018-09-10 13:12:54 +03:00
|
|
|
Notifier: &mockNotifier{
|
|
|
|
epochChan: make(chan *chainntnfs.BlockEpoch),
|
|
|
|
spendChan: make(chan *chainntnfs.SpendDetail),
|
|
|
|
confChan: make(chan *chainntnfs.TxConfirmation),
|
|
|
|
},
|
|
|
|
IncubateOutputs: func(wire.OutPoint, *lnwallet.CommitOutputResolution,
|
|
|
|
*lnwallet.OutgoingHtlcResolution,
|
2018-09-26 07:48:24 +03:00
|
|
|
*lnwallet.IncomingHtlcResolution, uint32) error {
|
2019-09-25 05:19:53 +03:00
|
|
|
|
|
|
|
incubateChan <- struct{}{}
|
2018-09-10 13:12:54 +03:00
|
|
|
return nil
|
|
|
|
},
|
2018-03-19 16:11:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll use the resolvedChan to synchronize on call to
|
|
|
|
// MarkChannelResolved.
|
|
|
|
resolvedChan := make(chan struct{}, 1)
|
|
|
|
|
|
|
|
// Next we'll create the matching configuration struct that contains
|
|
|
|
// all interfaces and methods the arbitrator needs to do its job.
|
|
|
|
arbCfg := ChannelArbitratorConfig{
|
|
|
|
ChanPoint: chanPoint,
|
|
|
|
ShortChanID: shortChanID,
|
|
|
|
BlockEpochs: blockEpoch,
|
|
|
|
MarkChannelResolved: func() error {
|
|
|
|
resolvedChan <- struct{}{}
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
ForceCloseChan: func() (*lnwallet.LocalForceCloseSummary, error) {
|
|
|
|
summary := &lnwallet.LocalForceCloseSummary{
|
|
|
|
CloseTx: &wire.MsgTx{},
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
}
|
|
|
|
return summary, nil
|
|
|
|
},
|
2019-09-06 14:14:39 +03:00
|
|
|
MarkCommitmentBroadcasted: func(_ *wire.MsgTx) error {
|
2018-03-19 16:11:13 +03:00
|
|
|
return nil
|
|
|
|
},
|
2018-08-21 13:21:15 +03:00
|
|
|
MarkChannelClosed: func(*channeldb.ChannelCloseSummary) error {
|
|
|
|
return nil
|
|
|
|
},
|
2018-08-21 13:21:16 +03:00
|
|
|
IsPendingClose: false,
|
2018-03-19 16:11:13 +03:00
|
|
|
ChainArbitratorConfig: chainArbCfg,
|
|
|
|
ChainEvents: chanEvents,
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
var cleanUp func()
|
|
|
|
if log == nil {
|
|
|
|
dbDir, err := ioutil.TempDir("", "chanArb")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
dbPath := filepath.Join(dbDir, "testdb")
|
|
|
|
db, err := bbolt.Open(dbPath, 0600, nil)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-03-19 16:11:13 +03:00
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
backingLog, err := newBoltArbitratorLog(
|
|
|
|
db, arbCfg, chainhash.Hash{}, chanPoint,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
cleanUp = func() {
|
|
|
|
db.Close()
|
|
|
|
os.RemoveAll(dbDir)
|
|
|
|
}
|
|
|
|
|
|
|
|
log = &testArbLog{
|
|
|
|
ArbitratorLog: backingLog,
|
|
|
|
newStates: make(chan ArbitratorState),
|
|
|
|
}
|
2018-03-19 16:11:13 +03:00
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
|
|
|
|
htlcSets := make(map[HtlcSetKey]htlcSet)
|
|
|
|
|
|
|
|
chanArb := NewChannelArbitrator(arbCfg, htlcSets, log)
|
|
|
|
|
|
|
|
return &chanArbTestCtx{
|
|
|
|
t: t,
|
|
|
|
chanArb: chanArb,
|
|
|
|
cleanUp: cleanUp,
|
|
|
|
resolvedChan: resolvedChan,
|
|
|
|
resolutions: resolutionChan,
|
|
|
|
blockEpochs: blockEpochs,
|
|
|
|
log: log,
|
|
|
|
incubationRequests: incubateChan,
|
|
|
|
}, nil
|
2018-03-19 16:11:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// TestChannelArbitratorCooperativeClose tests that the ChannelArbitertor
|
2018-08-21 13:21:15 +03:00
|
|
|
// correctly marks the channel resolved in case a cooperative close is
|
|
|
|
// confirmed.
|
2018-03-19 16:11:13 +03:00
|
|
|
func TestChannelArbitratorCooperativeClose(t *testing.T) {
|
2018-08-21 13:21:14 +03:00
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err := createTestChannelArbitrator(t, log)
|
2018-03-19 16:11:13 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
if err := chanArbCtx.chanArb.Start(); err != nil {
|
2018-03-19 16:11:13 +03:00
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
defer func() {
|
|
|
|
if err := chanArbCtx.chanArb.Stop(); err != nil {
|
|
|
|
t.Fatalf("unable to stop chan arb: %v", err)
|
|
|
|
}
|
|
|
|
}()
|
2018-03-19 16:11:13 +03:00
|
|
|
|
|
|
|
// It should start out in the default state.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateDefault)
|
2018-03-19 16:11:13 +03:00
|
|
|
|
2018-08-21 13:21:15 +03:00
|
|
|
// We set up a channel to detect when MarkChannelClosed is called.
|
|
|
|
closeInfos := make(chan *channeldb.ChannelCloseSummary)
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.chanArb.cfg.MarkChannelClosed = func(
|
2018-08-21 13:21:15 +03:00
|
|
|
closeInfo *channeldb.ChannelCloseSummary) error {
|
|
|
|
closeInfos <- closeInfo
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cooperative close should do trigger a MarkChannelClosed +
|
|
|
|
// MarkChannelResolved.
|
|
|
|
closeInfo := &CooperativeCloseInfo{
|
|
|
|
&channeldb.ChannelCloseSummary{},
|
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.chanArb.cfg.ChainEvents.CooperativeClosure <- closeInfo
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
select {
|
|
|
|
case c := <-closeInfos:
|
|
|
|
if c.CloseType != channeldb.CooperativeClose {
|
|
|
|
t.Fatalf("expected cooperative close, got %v", c.CloseType)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("timeout waiting for channel close")
|
|
|
|
}
|
|
|
|
|
|
|
|
// It should mark the channel as resolved.
|
|
|
|
select {
|
2019-09-25 05:19:53 +03:00
|
|
|
case <-chanArbCtx.resolvedChan:
|
2018-08-21 13:21:15 +03:00
|
|
|
// Expected.
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
2018-03-19 16:11:13 +03:00
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// TestChannelArbitratorRemoteForceClose checks that the ChannelArbitrator goes
|
2018-03-19 16:11:13 +03:00
|
|
|
// through the expected states if a remote force close is observed in the
|
|
|
|
// chain.
|
|
|
|
func TestChannelArbitratorRemoteForceClose(t *testing.T) {
|
2018-08-21 13:21:14 +03:00
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err := createTestChannelArbitrator(t, log)
|
2018-03-19 16:11:13 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb := chanArbCtx.chanArb
|
2018-03-19 16:11:13 +03:00
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// It should start out in the default state.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateDefault)
|
2018-03-19 16:11:13 +03:00
|
|
|
|
|
|
|
// Send a remote force close event.
|
|
|
|
commitSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &chainhash.Hash{},
|
|
|
|
}
|
|
|
|
|
|
|
|
uniClose := &lnwallet.UnilateralCloseSummary{
|
|
|
|
SpendDetail: commitSpend,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
}
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
|
|
|
UnilateralCloseSummary: uniClose,
|
|
|
|
CommitSet: CommitSet{
|
|
|
|
ConfCommitKey: &RemoteHtlcSet,
|
|
|
|
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
|
|
|
},
|
|
|
|
}
|
2018-03-19 16:11:13 +03:00
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// It should transition StateDefault -> StateContractClosed ->
|
|
|
|
// StateFullyResolved.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(
|
|
|
|
StateContractClosed, StateFullyResolved,
|
2018-08-21 13:21:14 +03:00
|
|
|
)
|
|
|
|
|
2019-09-17 11:28:32 +03:00
|
|
|
// It should also mark the channel as resolved.
|
2018-03-19 16:11:13 +03:00
|
|
|
select {
|
2019-09-25 05:19:53 +03:00
|
|
|
case <-chanArbCtx.resolvedChan:
|
2018-03-19 16:11:13 +03:00
|
|
|
// Expected.
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestChannelArbitratorLocalForceClose tests that the ChannelArbitrator goes
|
|
|
|
// through the expected states in case we request it to force close the channel,
|
|
|
|
// and the local force close event is observed in chain.
|
|
|
|
func TestChannelArbitratorLocalForceClose(t *testing.T) {
|
2018-08-21 13:21:14 +03:00
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err := createTestChannelArbitrator(t, log)
|
2018-03-19 16:11:13 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb := chanArbCtx.chanArb
|
2018-03-19 16:11:13 +03:00
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// It should start out in the default state.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateDefault)
|
2018-03-19 16:11:13 +03:00
|
|
|
|
|
|
|
// We create a channel we can use to pause the ChannelArbitrator at the
|
|
|
|
// point where it broadcasts the close tx, and check its state.
|
|
|
|
stateChan := make(chan ArbitratorState)
|
|
|
|
chanArb.cfg.PublishTx = func(*wire.MsgTx) error {
|
|
|
|
// When the force close tx is being broadcasted, check that the
|
|
|
|
// state is correct at that point.
|
|
|
|
select {
|
|
|
|
case stateChan <- chanArb.state:
|
|
|
|
case <-chanArb.quit:
|
|
|
|
return fmt.Errorf("exiting")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
respChan := make(chan *wire.MsgTx, 1)
|
|
|
|
|
|
|
|
// With the channel found, and the request crafted, we'll send over a
|
|
|
|
// force close request to the arbitrator that watches this channel.
|
|
|
|
chanArb.forceCloseReqs <- &forceCloseReq{
|
|
|
|
errResp: errChan,
|
|
|
|
closeTx: respChan,
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// It should transition to StateBroadcastCommit.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
|
2018-08-21 13:21:14 +03:00
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
// When it is broadcasting the force close, its state should be
|
|
|
|
// StateBroadcastCommit.
|
|
|
|
select {
|
|
|
|
case state := <-stateChan:
|
|
|
|
if state != StateBroadcastCommit {
|
|
|
|
t.Fatalf("state during PublishTx was %v", state)
|
|
|
|
}
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("did not get state update")
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// After broadcasting, transition should be to
|
|
|
|
// StateCommitmentBroadcasted.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted)
|
2018-08-21 13:21:14 +03:00
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
select {
|
|
|
|
case <-respChan:
|
2018-08-21 13:21:14 +03:00
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2018-03-19 16:11:13 +03:00
|
|
|
case err := <-errChan:
|
2018-08-21 13:21:14 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error force closing channel: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
2018-03-19 16:11:13 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// After broadcasting the close tx, it should be in state
|
|
|
|
// StateCommitmentBroadcasted.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateCommitmentBroadcasted)
|
2018-03-19 16:11:13 +03:00
|
|
|
|
|
|
|
// Now notify about the local force close getting confirmed.
|
|
|
|
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
SpendDetail: &chainntnfs.SpendDetail{},
|
|
|
|
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
|
2018-03-19 16:11:13 +03:00
|
|
|
CloseTx: &wire.MsgTx{},
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
},
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
2018-03-19 16:11:13 +03:00
|
|
|
}
|
2018-08-21 13:21:14 +03:00
|
|
|
|
|
|
|
// It should transition StateContractClosed -> StateFullyResolved.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved)
|
2018-08-21 13:21:14 +03:00
|
|
|
|
|
|
|
// It should also mark the channel as resolved.
|
2018-03-19 16:11:13 +03:00
|
|
|
select {
|
2019-09-25 05:19:53 +03:00
|
|
|
case <-chanArbCtx.resolvedChan:
|
2018-03-19 16:11:13 +03:00
|
|
|
// Expected.
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-17 11:28:32 +03:00
|
|
|
// TestChannelArbitratorBreachClose tests that the ChannelArbitrator goes
|
|
|
|
// through the expected states in case we notice a breach in the chain, and
|
|
|
|
// gracefully exits.
|
|
|
|
func TestChannelArbitratorBreachClose(t *testing.T) {
|
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err := createTestChannelArbitrator(t, log)
|
2019-09-17 11:28:32 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb := chanArbCtx.chanArb
|
2019-09-17 11:28:32 +03:00
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err := chanArb.Stop(); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// It should start out in the default state.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateDefault)
|
2019-09-17 11:28:32 +03:00
|
|
|
|
|
|
|
// Send a breach close event.
|
|
|
|
chanArb.cfg.ChainEvents.ContractBreach <- &lnwallet.BreachRetribution{}
|
|
|
|
|
|
|
|
// It should transition StateDefault -> StateFullyResolved.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(
|
|
|
|
StateFullyResolved,
|
2019-09-17 11:28:32 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// It should also mark the channel as resolved.
|
|
|
|
select {
|
2019-09-25 05:19:53 +03:00
|
|
|
case <-chanArbCtx.resolvedChan:
|
2019-09-17 11:28:32 +03:00
|
|
|
// Expected.
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
// TestChannelArbitratorLocalForceClosePendingHtlc tests that the
|
|
|
|
// ChannelArbitrator goes through the expected states in case we request it to
|
|
|
|
// force close a channel that still has an HTLC pending.
|
|
|
|
func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
2019-09-07 04:40:27 +03:00
|
|
|
// We create a new test context for this channel arb, notice that we
|
|
|
|
// pass in a nil ArbitratorLog which means that a default one backed by
|
|
|
|
// a real DB will be created. We need this for our test as we want to
|
|
|
|
// test proper restart recovery and resolver population.
|
|
|
|
chanArbCtx, err := createTestChannelArbitrator(t, nil)
|
2018-09-10 13:12:54 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb := chanArbCtx.chanArb
|
2019-09-13 13:29:42 +03:00
|
|
|
chanArb.cfg.PreimageDB = newMockWitnessBeacon()
|
|
|
|
chanArb.cfg.Registry = &mockRegistry{}
|
2018-09-10 13:12:54 +03:00
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// Create htlcUpdates channel.
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
htlcUpdates := make(chan *ContractUpdate)
|
2018-09-10 13:12:54 +03:00
|
|
|
|
|
|
|
signals := &ContractSignals{
|
|
|
|
HtlcUpdates: htlcUpdates,
|
|
|
|
ShortChanID: lnwire.ShortChannelID{},
|
|
|
|
}
|
|
|
|
chanArb.UpdateContractSignals(signals)
|
|
|
|
|
|
|
|
// Add HTLC to channel arbitrator.
|
2019-09-07 04:40:27 +03:00
|
|
|
htlcAmt := 10000
|
2018-09-10 13:12:54 +03:00
|
|
|
htlc := channeldb.HTLC{
|
|
|
|
Incoming: false,
|
2019-09-07 04:40:27 +03:00
|
|
|
Amt: lnwire.MilliSatoshi(htlcAmt),
|
2019-09-13 13:29:42 +03:00
|
|
|
HtlcIndex: 99,
|
|
|
|
}
|
|
|
|
|
|
|
|
outgoingDustHtlc := channeldb.HTLC{
|
|
|
|
Incoming: false,
|
|
|
|
Amt: 100,
|
|
|
|
HtlcIndex: 100,
|
|
|
|
OutputIndex: -1,
|
|
|
|
}
|
|
|
|
|
2019-09-12 16:31:48 +03:00
|
|
|
incomingDustHtlc := channeldb.HTLC{
|
|
|
|
Incoming: true,
|
|
|
|
Amt: 105,
|
|
|
|
HtlcIndex: 101,
|
|
|
|
OutputIndex: -1,
|
|
|
|
}
|
|
|
|
|
2019-09-13 13:29:42 +03:00
|
|
|
htlcSet := []channeldb.HTLC{
|
2019-09-12 16:31:48 +03:00
|
|
|
htlc, outgoingDustHtlc, incomingDustHtlc,
|
2018-09-10 13:12:54 +03:00
|
|
|
}
|
|
|
|
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
htlcUpdates <- &ContractUpdate{
|
|
|
|
HtlcKey: LocalHtlcSet,
|
2019-09-13 13:29:42 +03:00
|
|
|
Htlcs: htlcSet,
|
2018-09-10 13:12:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
respChan := make(chan *wire.MsgTx, 1)
|
|
|
|
|
|
|
|
// With the channel found, and the request crafted, we'll send over a
|
|
|
|
// force close request to the arbitrator that watches this channel.
|
|
|
|
chanArb.forceCloseReqs <- &forceCloseReq{
|
|
|
|
errResp: errChan,
|
|
|
|
closeTx: respChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
// The force close request should trigger broadcast of the commitment
|
|
|
|
// transaction.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(
|
|
|
|
StateBroadcastCommit,
|
2019-03-19 03:04:19 +03:00
|
|
|
StateCommitmentBroadcasted,
|
|
|
|
)
|
2018-09-10 13:12:54 +03:00
|
|
|
select {
|
|
|
|
case <-respChan:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error force closing channel: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now notify about the local force close getting confirmed.
|
2019-03-19 03:04:19 +03:00
|
|
|
closeTx := &wire.MsgTx{
|
|
|
|
TxIn: []*wire.TxIn{
|
|
|
|
{
|
|
|
|
PreviousOutPoint: wire.OutPoint{},
|
|
|
|
Witness: [][]byte{
|
|
|
|
{0x1},
|
|
|
|
{0x2},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
2018-09-10 13:12:54 +03:00
|
|
|
|
|
|
|
htlcOp := wire.OutPoint{
|
|
|
|
Hash: closeTx.TxHash(),
|
|
|
|
Index: 0,
|
|
|
|
}
|
|
|
|
|
2019-09-07 04:40:27 +03:00
|
|
|
// Set up the outgoing resolution. Populate SignedTimeoutTx because our
|
|
|
|
// commitment transaction got confirmed.
|
2018-09-10 13:12:54 +03:00
|
|
|
outgoingRes := lnwallet.OutgoingHtlcResolution{
|
|
|
|
Expiry: 10,
|
2019-01-16 17:47:43 +03:00
|
|
|
SweepSignDesc: input.SignDescriptor{
|
2018-09-10 13:12:54 +03:00
|
|
|
Output: &wire.TxOut{},
|
|
|
|
},
|
|
|
|
SignedTimeoutTx: &wire.MsgTx{
|
|
|
|
TxIn: []*wire.TxIn{
|
|
|
|
{
|
|
|
|
PreviousOutPoint: htlcOp,
|
|
|
|
Witness: [][]byte{{}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
TxOut: []*wire.TxOut{
|
|
|
|
{},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
SpendDetail: &chainntnfs.SpendDetail{},
|
|
|
|
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
|
2018-09-10 13:12:54 +03:00
|
|
|
CloseTx: closeTx,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{
|
|
|
|
OutgoingHTLCs: []lnwallet.OutgoingHtlcResolution{
|
|
|
|
outgoingRes,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
|
|
|
CommitSet: CommitSet{
|
|
|
|
ConfCommitKey: &LocalHtlcSet,
|
|
|
|
HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
|
2019-09-13 13:29:42 +03:00
|
|
|
LocalHtlcSet: htlcSet,
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
},
|
|
|
|
},
|
2018-09-10 13:12:54 +03:00
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(
|
|
|
|
StateContractClosed,
|
2019-03-19 03:04:19 +03:00
|
|
|
StateWaitingFullResolution,
|
|
|
|
)
|
2018-09-10 13:12:54 +03:00
|
|
|
|
2019-09-13 13:29:42 +03:00
|
|
|
// We expect an immediate resolution message for the outgoing dust htlc.
|
|
|
|
// It is not resolvable on-chain.
|
|
|
|
select {
|
2019-09-25 05:19:53 +03:00
|
|
|
case msgs := <-chanArbCtx.resolutions:
|
2019-09-13 13:29:42 +03:00
|
|
|
if len(msgs) != 1 {
|
|
|
|
t.Fatalf("expected 1 message, instead got %v", len(msgs))
|
|
|
|
}
|
|
|
|
|
|
|
|
if msgs[0].HtlcIndex != outgoingDustHtlc.HtlcIndex {
|
|
|
|
t.Fatalf("wrong htlc index: expected %v, got %v",
|
|
|
|
outgoingDustHtlc.HtlcIndex, msgs[0].HtlcIndex)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("resolution msgs not sent")
|
|
|
|
}
|
|
|
|
|
2019-09-07 04:40:27 +03:00
|
|
|
// We'll grab the old notifier here as our resolvers are still holding
|
|
|
|
// a reference to this instance, and a new one will be created when we
|
|
|
|
// restart the channel arb below.
|
|
|
|
oldNotifier := chanArb.cfg.Notifier.(*mockNotifier)
|
|
|
|
|
|
|
|
// At this point, in order to simulate a restart, we'll re-create the
|
|
|
|
// channel arbitrator. We do this to ensure that all information
|
|
|
|
// required to properly resolve this HTLC are populated.
|
|
|
|
if err := chanArb.Stop(); err != nil {
|
|
|
|
t.Fatalf("unable to stop chan arb: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll no re-create the resolver, notice that we use the existing
|
|
|
|
// arbLog so it carries over the same on-disk state.
|
|
|
|
chanArbCtxNew, err := chanArbCtx.Restart(nil)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
chanArb = chanArbCtxNew.chanArb
|
|
|
|
defer chanArbCtxNew.CleanUp()
|
|
|
|
|
|
|
|
// Post restart, it should be the case that our resolver was properly
|
|
|
|
// supplemented, and we only have a single resolver in the final set.
|
|
|
|
if len(chanArb.activeResolvers) != 1 {
|
|
|
|
t.Fatalf("expected single resolver, instead got: %v",
|
|
|
|
len(chanArb.activeResolvers))
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll now examine the in-memory state of the active resolvers to
|
|
|
|
// ensure t hey were populated properly.
|
|
|
|
resolver := chanArb.activeResolvers[0]
|
|
|
|
outgoingResolver, ok := resolver.(*htlcOutgoingContestResolver)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("expected outgoing contest resolver, got %vT",
|
|
|
|
resolver)
|
|
|
|
}
|
|
|
|
|
|
|
|
// The resolver should have its htlcAmt field populated as it.
|
|
|
|
if int64(outgoingResolver.htlcAmt) != int64(htlcAmt) {
|
|
|
|
t.Fatalf("wrong htlc amount: expected %v, got %v,",
|
|
|
|
htlcAmt, int64(outgoingResolver.htlcAmt))
|
|
|
|
}
|
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
// htlcOutgoingContestResolver is now active and waiting for the HTLC to
|
|
|
|
// expire. It should not yet have passed it on for incubation.
|
|
|
|
select {
|
2019-09-07 04:40:27 +03:00
|
|
|
case <-chanArbCtx.incubationRequests:
|
2018-09-10 13:12:54 +03:00
|
|
|
t.Fatalf("contract should not be incubated yet")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send a notification that the expiry height has been reached.
|
2019-09-07 04:40:27 +03:00
|
|
|
oldNotifier.epochChan <- &chainntnfs.BlockEpoch{Height: 10}
|
2018-09-10 13:12:54 +03:00
|
|
|
|
|
|
|
// htlcOutgoingContestResolver is now transforming into a
|
|
|
|
// htlcTimeoutResolver and should send the contract off for incubation.
|
|
|
|
select {
|
2019-09-07 04:40:27 +03:00
|
|
|
case <-chanArbCtx.incubationRequests:
|
2018-09-10 13:12:54 +03:00
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
2019-03-19 03:04:19 +03:00
|
|
|
// Notify resolver that the HTLC output of the commitment has been
|
|
|
|
// spent.
|
2019-09-07 04:40:27 +03:00
|
|
|
oldNotifier.spendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx}
|
2018-09-10 13:12:54 +03:00
|
|
|
|
2019-05-17 03:45:26 +03:00
|
|
|
// Finally, we should also receive a resolution message instructing the
|
|
|
|
// switch to cancel back the HTLC.
|
|
|
|
select {
|
2019-09-25 05:19:53 +03:00
|
|
|
case msgs := <-chanArbCtx.resolutions:
|
2019-05-17 03:45:26 +03:00
|
|
|
if len(msgs) != 1 {
|
|
|
|
t.Fatalf("expected 1 message, instead got %v", len(msgs))
|
|
|
|
}
|
|
|
|
|
2019-09-13 13:29:42 +03:00
|
|
|
if msgs[0].HtlcIndex != htlc.HtlcIndex {
|
2019-05-17 03:45:26 +03:00
|
|
|
t.Fatalf("wrong htlc index: expected %v, got %v",
|
2019-09-13 13:29:42 +03:00
|
|
|
htlc.HtlcIndex, msgs[0].HtlcIndex)
|
2019-05-17 03:45:26 +03:00
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("resolution msgs not sent")
|
|
|
|
}
|
|
|
|
|
2018-09-10 13:12:54 +03:00
|
|
|
// As this is our own commitment transaction, the HTLC will go through
|
2019-03-19 03:04:19 +03:00
|
|
|
// to the second level. Channel arbitrator should still not be marked
|
|
|
|
// as resolved.
|
2018-09-10 13:12:54 +03:00
|
|
|
select {
|
2019-09-07 04:40:27 +03:00
|
|
|
case <-chanArbCtxNew.resolvedChan:
|
2018-09-10 13:12:54 +03:00
|
|
|
t.Fatalf("channel resolved prematurely")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify resolver that the second level transaction is spent.
|
2019-09-07 04:40:27 +03:00
|
|
|
oldNotifier.spendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx}
|
2018-09-10 13:12:54 +03:00
|
|
|
|
|
|
|
// At this point channel should be marked as resolved.
|
2019-09-07 04:40:27 +03:00
|
|
|
chanArbCtxNew.AssertStateTransitions(StateFullyResolved)
|
2018-09-10 13:12:54 +03:00
|
|
|
select {
|
2019-09-07 04:40:27 +03:00
|
|
|
case <-chanArbCtxNew.resolvedChan:
|
2018-09-10 13:12:54 +03:00
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
// TestChannelArbitratorLocalForceCloseRemoteConfiremd tests that the
|
|
|
|
// ChannelArbitrator behaves as expected in the case where we request a local
|
|
|
|
// force close, but a remote commitment ends up being confirmed in chain.
|
|
|
|
func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) {
|
2018-08-21 13:21:14 +03:00
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err := createTestChannelArbitrator(t, log)
|
2018-03-19 16:11:13 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb := chanArbCtx.chanArb
|
2018-03-19 16:11:13 +03:00
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// It should start out in the default state.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateDefault)
|
2018-03-19 16:11:13 +03:00
|
|
|
|
|
|
|
// Create a channel we can use to assert the state when it publishes
|
|
|
|
// the close tx.
|
|
|
|
stateChan := make(chan ArbitratorState)
|
|
|
|
chanArb.cfg.PublishTx = func(*wire.MsgTx) error {
|
|
|
|
// When the force close tx is being broadcasted, check that the
|
|
|
|
// state is correct at that point.
|
|
|
|
select {
|
|
|
|
case stateChan <- chanArb.state:
|
|
|
|
case <-chanArb.quit:
|
|
|
|
return fmt.Errorf("exiting")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
respChan := make(chan *wire.MsgTx, 1)
|
|
|
|
|
|
|
|
// With the channel found, and the request crafted, we'll send over a
|
|
|
|
// force close request to the arbitrator that watches this channel.
|
|
|
|
chanArb.forceCloseReqs <- &forceCloseReq{
|
|
|
|
errResp: errChan,
|
|
|
|
closeTx: respChan,
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// It should transition to StateBroadcastCommit.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
|
2018-08-21 13:21:14 +03:00
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
// We expect it to be in state StateBroadcastCommit when publishing
|
|
|
|
// the force close.
|
|
|
|
select {
|
|
|
|
case state := <-stateChan:
|
|
|
|
if state != StateBroadcastCommit {
|
|
|
|
t.Fatalf("state during PublishTx was %v", state)
|
|
|
|
}
|
|
|
|
case <-time.After(15 * time.Second):
|
2018-05-14 15:21:06 +03:00
|
|
|
t.Fatalf("no state update received")
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// After broadcasting, transition should be to
|
|
|
|
// StateCommitmentBroadcasted.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted)
|
2018-08-21 13:21:14 +03:00
|
|
|
|
2018-05-14 15:21:06 +03:00
|
|
|
// Wait for a response to the force close.
|
|
|
|
select {
|
|
|
|
case <-respChan:
|
2018-08-21 13:21:14 +03:00
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2018-05-14 15:21:06 +03:00
|
|
|
case err := <-errChan:
|
2018-08-21 13:21:14 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error force closing channel: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
2018-05-14 15:21:06 +03:00
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The state should be StateCommitmentBroadcasted.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateCommitmentBroadcasted)
|
2018-05-14 15:21:06 +03:00
|
|
|
|
|
|
|
// Now notify about the _REMOTE_ commitment getting confirmed.
|
|
|
|
commitSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &chainhash.Hash{},
|
|
|
|
}
|
|
|
|
uniClose := &lnwallet.UnilateralCloseSummary{
|
|
|
|
SpendDetail: commitSpend,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
}
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
|
|
|
UnilateralCloseSummary: uniClose,
|
|
|
|
}
|
2018-05-14 15:21:06 +03:00
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// It should transition StateContractClosed -> StateFullyResolved.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved)
|
2018-08-21 13:21:14 +03:00
|
|
|
|
2018-05-14 15:21:06 +03:00
|
|
|
// It should resolve.
|
|
|
|
select {
|
2019-09-25 05:19:53 +03:00
|
|
|
case <-chanArbCtx.resolvedChan:
|
2018-05-14 15:21:06 +03:00
|
|
|
// Expected.
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestChannelArbitratorLocalForceCloseDoubleSpend tests that the
|
|
|
|
// ChannelArbitrator behaves as expected in the case where we request a local
|
|
|
|
// force close, but we fail broadcasting our commitment because a remote
|
|
|
|
// commitment has already been published.
|
|
|
|
func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) {
|
2018-08-21 13:21:14 +03:00
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err := createTestChannelArbitrator(t, log)
|
2018-05-14 15:21:06 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb := chanArbCtx.chanArb
|
2018-05-14 15:21:06 +03:00
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// It should start out in the default state.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateDefault)
|
2018-05-14 15:21:06 +03:00
|
|
|
|
|
|
|
// Return ErrDoubleSpend when attempting to publish the tx.
|
|
|
|
stateChan := make(chan ArbitratorState)
|
|
|
|
chanArb.cfg.PublishTx = func(*wire.MsgTx) error {
|
|
|
|
// When the force close tx is being broadcasted, check that the
|
|
|
|
// state is correct at that point.
|
|
|
|
select {
|
|
|
|
case stateChan <- chanArb.state:
|
|
|
|
case <-chanArb.quit:
|
|
|
|
return fmt.Errorf("exiting")
|
|
|
|
}
|
|
|
|
return lnwallet.ErrDoubleSpend
|
|
|
|
}
|
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
respChan := make(chan *wire.MsgTx, 1)
|
|
|
|
|
|
|
|
// With the channel found, and the request crafted, we'll send over a
|
|
|
|
// force close request to the arbitrator that watches this channel.
|
|
|
|
chanArb.forceCloseReqs <- &forceCloseReq{
|
|
|
|
errResp: errChan,
|
|
|
|
closeTx: respChan,
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// It should transition to StateBroadcastCommit.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
|
2018-08-21 13:21:14 +03:00
|
|
|
|
2018-05-14 15:21:06 +03:00
|
|
|
// We expect it to be in state StateBroadcastCommit when publishing
|
|
|
|
// the force close.
|
|
|
|
select {
|
|
|
|
case state := <-stateChan:
|
|
|
|
if state != StateBroadcastCommit {
|
|
|
|
t.Fatalf("state during PublishTx was %v", state)
|
|
|
|
}
|
|
|
|
case <-time.After(15 * time.Second):
|
2018-03-19 16:11:13 +03:00
|
|
|
t.Fatalf("no state update received")
|
|
|
|
}
|
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// After broadcasting, transition should be to
|
|
|
|
// StateCommitmentBroadcasted.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateCommitmentBroadcasted)
|
2018-08-21 13:21:14 +03:00
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
// Wait for a response to the force close.
|
|
|
|
select {
|
|
|
|
case <-respChan:
|
2018-08-21 13:21:14 +03:00
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2018-03-19 16:11:13 +03:00
|
|
|
case err := <-errChan:
|
2018-08-21 13:21:14 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("error force closing channel: %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
2018-03-19 16:11:13 +03:00
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// The state should be StateCommitmentBroadcasted.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateCommitmentBroadcasted)
|
2018-03-19 16:11:13 +03:00
|
|
|
|
|
|
|
// Now notify about the _REMOTE_ commitment getting confirmed.
|
|
|
|
commitSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &chainhash.Hash{},
|
|
|
|
}
|
|
|
|
uniClose := &lnwallet.UnilateralCloseSummary{
|
|
|
|
SpendDetail: commitSpend,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
}
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
|
|
|
UnilateralCloseSummary: uniClose,
|
|
|
|
}
|
2018-03-19 16:11:13 +03:00
|
|
|
|
2018-08-21 13:21:14 +03:00
|
|
|
// It should transition StateContractClosed -> StateFullyResolved.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateContractClosed, StateFullyResolved)
|
2018-08-21 13:21:14 +03:00
|
|
|
|
2018-03-19 16:11:13 +03:00
|
|
|
// It should resolve.
|
|
|
|
select {
|
2019-09-25 05:19:53 +03:00
|
|
|
case <-chanArbCtx.resolvedChan:
|
2018-03-19 16:11:13 +03:00
|
|
|
// Expected.
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// TestChannelArbitratorPersistence tests that the ChannelArbitrator is able to
|
|
|
|
// keep advancing the state machine from various states after restart.
|
|
|
|
func TestChannelArbitratorPersistence(t *testing.T) {
|
|
|
|
// Start out with a log that will fail writing the set of resolutions.
|
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
failLog: true,
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err := createTestChannelArbitrator(t, log)
|
2018-08-21 13:21:15 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb := chanArbCtx.chanArb
|
2018-08-21 13:21:15 +03:00
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// It should start in StateDefault.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateDefault)
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// Send a remote force close event.
|
|
|
|
commitSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &chainhash.Hash{},
|
|
|
|
}
|
|
|
|
|
|
|
|
uniClose := &lnwallet.UnilateralCloseSummary{
|
|
|
|
SpendDetail: commitSpend,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
}
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
|
|
|
UnilateralCloseSummary: uniClose,
|
|
|
|
}
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// Since writing the resolutions fail, the arbitrator should not
|
|
|
|
// advance to the next state.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
if log.state != StateDefault {
|
|
|
|
t.Fatalf("expected to stay in StateDefault")
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
// Restart the channel arb, this'll use the same long and prior
|
|
|
|
// context.
|
|
|
|
chanArbCtx, err = chanArbCtx.Restart(nil)
|
2018-08-21 13:21:15 +03:00
|
|
|
if err != nil {
|
2019-09-25 05:19:53 +03:00
|
|
|
t.Fatalf("unable to restart channel arb: %v", err)
|
2018-08-21 13:21:15 +03:00
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb = chanArbCtx.chanArb
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// Again, it should start up in the default state.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateDefault)
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// Now we make the log succeed writing the resolutions, but fail when
|
|
|
|
// attempting to close the channel.
|
|
|
|
log.failLog = false
|
|
|
|
chanArb.cfg.MarkChannelClosed = func(*channeldb.ChannelCloseSummary) error {
|
|
|
|
return fmt.Errorf("intentional close error")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send a new remote force close event.
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
|
|
|
UnilateralCloseSummary: uniClose,
|
|
|
|
}
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// Since closing the channel failed, the arbitrator should stay in the
|
|
|
|
// default state.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
if log.state != StateDefault {
|
|
|
|
t.Fatalf("expected to stay in StateDefault")
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
// Restart once again to simulate yet another restart.
|
|
|
|
chanArbCtx, err = chanArbCtx.Restart(nil)
|
2018-08-21 13:21:15 +03:00
|
|
|
if err != nil {
|
2019-09-25 05:19:53 +03:00
|
|
|
t.Fatalf("unable to restart channel arb: %v", err)
|
2018-08-21 13:21:15 +03:00
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb = chanArbCtx.chanArb
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// Starts out in StateDefault.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateDefault)
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// Now make fetching the resolutions fail.
|
|
|
|
log.failFetch = fmt.Errorf("intentional fetch failure")
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
|
|
|
UnilateralCloseSummary: uniClose,
|
|
|
|
}
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// Since logging the resolutions and closing the channel now succeeds,
|
|
|
|
// it should advance to StateContractClosed.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateContractClosed)
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// It should not advance further, however, as fetching resolutions
|
|
|
|
// failed.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
if log.state != StateContractClosed {
|
|
|
|
t.Fatalf("expected to stay in StateContractClosed")
|
|
|
|
}
|
|
|
|
chanArb.Stop()
|
|
|
|
|
|
|
|
// Create a new arbitrator, and now make fetching resolutions succeed.
|
|
|
|
log.failFetch = nil
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err = chanArbCtx.Restart(nil)
|
2018-08-21 13:21:15 +03:00
|
|
|
if err != nil {
|
2019-09-25 05:19:53 +03:00
|
|
|
t.Fatalf("unable to restart channel arb: %v", err)
|
2018-08-21 13:21:15 +03:00
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
defer chanArbCtx.CleanUp()
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// Finally it should advance to StateFullyResolved.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateFullyResolved)
|
2019-09-17 11:29:36 +03:00
|
|
|
|
|
|
|
// It should also mark the channel as resolved.
|
|
|
|
select {
|
2019-09-25 05:19:53 +03:00
|
|
|
case <-chanArbCtx.resolvedChan:
|
2019-09-17 11:29:36 +03:00
|
|
|
// Expected.
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestChannelArbitratorForceCloseBreachedChannel tests that the channel
|
|
|
|
// arbitrator is able to handle a channel in the process of being force closed
|
|
|
|
// is breached by the remote node. In these cases we expect the
|
|
|
|
// ChannelArbitrator to gracefully exit, as the breach is handled by other
|
|
|
|
// subsystems.
|
|
|
|
func TestChannelArbitratorForceCloseBreachedChannel(t *testing.T) {
|
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err := createTestChannelArbitrator(t, log)
|
2019-09-17 11:29:36 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb := chanArbCtx.chanArb
|
2019-09-17 11:29:36 +03:00
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// It should start in StateDefault.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateDefault)
|
2019-09-17 11:29:36 +03:00
|
|
|
|
|
|
|
// We start by attempting a local force close. We'll return an
|
|
|
|
// unexpected publication error, causing the state machine to halt.
|
|
|
|
expErr := errors.New("intentional publication error")
|
|
|
|
stateChan := make(chan ArbitratorState)
|
|
|
|
chanArb.cfg.PublishTx = func(*wire.MsgTx) error {
|
|
|
|
// When the force close tx is being broadcasted, check that the
|
|
|
|
// state is correct at that point.
|
|
|
|
select {
|
|
|
|
case stateChan <- chanArb.state:
|
|
|
|
case <-chanArb.quit:
|
|
|
|
return fmt.Errorf("exiting")
|
|
|
|
}
|
|
|
|
return expErr
|
|
|
|
}
|
|
|
|
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
respChan := make(chan *wire.MsgTx, 1)
|
|
|
|
|
|
|
|
// With the channel found, and the request crafted, we'll send over a
|
|
|
|
// force close request to the arbitrator that watches this channel.
|
|
|
|
chanArb.forceCloseReqs <- &forceCloseReq{
|
|
|
|
errResp: errChan,
|
|
|
|
closeTx: respChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
// It should transition to StateBroadcastCommit.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateBroadcastCommit)
|
2019-09-17 11:29:36 +03:00
|
|
|
|
|
|
|
// We expect it to be in state StateBroadcastCommit when attempting
|
|
|
|
// the force close.
|
|
|
|
select {
|
|
|
|
case state := <-stateChan:
|
|
|
|
if state != StateBroadcastCommit {
|
|
|
|
t.Fatalf("state during PublishTx was %v", state)
|
|
|
|
}
|
|
|
|
case <-time.After(15 * time.Second):
|
|
|
|
t.Fatalf("no state update received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure we get the expected error.
|
|
|
|
select {
|
|
|
|
case err := <-errChan:
|
|
|
|
if err != expErr {
|
|
|
|
t.Fatalf("unexpected error force closing channel: %v",
|
|
|
|
err)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("no response received")
|
|
|
|
}
|
|
|
|
|
|
|
|
// We mimic that the channel is breached while the channel arbitrator
|
|
|
|
// is down. This means that on restart it will be started with a
|
|
|
|
// pending close channel, of type BreachClose.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err = chanArbCtx.Restart(func(c *chanArbTestCtx) {
|
|
|
|
c.chanArb.cfg.IsPendingClose = true
|
|
|
|
c.chanArb.cfg.ClosingHeight = 100
|
|
|
|
c.chanArb.cfg.CloseType = channeldb.BreachClose
|
|
|
|
})
|
2019-09-17 11:29:36 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
defer chanArbCtx.CleanUp()
|
2019-09-17 11:29:36 +03:00
|
|
|
|
|
|
|
// Finally it should advance to StateFullyResolved.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateFullyResolved)
|
2018-08-21 13:21:15 +03:00
|
|
|
|
|
|
|
// It should also mark the channel as resolved.
|
|
|
|
select {
|
2019-09-25 05:19:53 +03:00
|
|
|
case <-chanArbCtx.resolvedChan:
|
2018-08-21 13:21:15 +03:00
|
|
|
// Expected.
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
|
|
|
// TestChannelArbitratorCommitFailure tests that the channel arbitrator is able
|
|
|
|
// to recover from a failed CommitState call at restart.
|
|
|
|
func TestChannelArbitratorCommitFailure(t *testing.T) {
|
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
testCases := []struct {
|
|
|
|
|
|
|
|
// closeType is the type of channel close we want ot test.
|
|
|
|
closeType channeldb.ClosureType
|
|
|
|
|
|
|
|
// sendEvent is a function that will send the event
|
|
|
|
// corresponding to this test's closeType to the passed
|
|
|
|
// ChannelArbitrator.
|
|
|
|
sendEvent func(chanArb *ChannelArbitrator)
|
|
|
|
|
|
|
|
// expectedStates is the states we expect the state machine to
|
|
|
|
// go through after a restart and successful log commit.
|
|
|
|
expectedStates []ArbitratorState
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
closeType: channeldb.CooperativeClose,
|
|
|
|
sendEvent: func(chanArb *ChannelArbitrator) {
|
|
|
|
closeInfo := &CooperativeCloseInfo{
|
|
|
|
&channeldb.ChannelCloseSummary{},
|
|
|
|
}
|
|
|
|
chanArb.cfg.ChainEvents.CooperativeClosure <- closeInfo
|
|
|
|
},
|
|
|
|
expectedStates: []ArbitratorState{StateFullyResolved},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
closeType: channeldb.RemoteForceClose,
|
|
|
|
sendEvent: func(chanArb *ChannelArbitrator) {
|
|
|
|
commitSpend := &chainntnfs.SpendDetail{
|
|
|
|
SpenderTxHash: &chainhash.Hash{},
|
|
|
|
}
|
|
|
|
|
|
|
|
uniClose := &lnwallet.UnilateralCloseSummary{
|
|
|
|
SpendDetail: commitSpend,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
}
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
|
|
|
UnilateralCloseSummary: uniClose,
|
|
|
|
}
|
2018-08-23 11:04:43 +03:00
|
|
|
},
|
|
|
|
expectedStates: []ArbitratorState{StateContractClosed, StateFullyResolved},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
closeType: channeldb.LocalForceClose,
|
|
|
|
sendEvent: func(chanArb *ChannelArbitrator) {
|
|
|
|
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
SpendDetail: &chainntnfs.SpendDetail{},
|
|
|
|
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
|
2018-08-23 11:04:43 +03:00
|
|
|
CloseTx: &wire.MsgTx{},
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
},
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
2018-08-23 11:04:43 +03:00
|
|
|
}
|
|
|
|
},
|
|
|
|
expectedStates: []ArbitratorState{StateContractClosed, StateFullyResolved},
|
|
|
|
},
|
2018-08-21 13:21:16 +03:00
|
|
|
}
|
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
for _, test := range testCases {
|
2019-09-25 05:19:53 +03:00
|
|
|
test := test
|
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
failCommit: true,
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
// Set the log to fail on the first expected state
|
|
|
|
// after state machine progress for this test case.
|
|
|
|
failCommitState: test.expectedStates[0],
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err := createTestChannelArbitrator(t, log)
|
2018-08-23 11:04:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb := chanArbCtx.chanArb
|
2018-08-23 11:04:43 +03:00
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
// It should start in StateDefault.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertState(StateDefault)
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
closed := make(chan struct{})
|
|
|
|
chanArb.cfg.MarkChannelClosed = func(
|
|
|
|
*channeldb.ChannelCloseSummary) error {
|
|
|
|
close(closed)
|
|
|
|
return nil
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
// Send the test event to trigger the state machine.
|
|
|
|
test.sendEvent(chanArb)
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
select {
|
|
|
|
case <-closed:
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("channel was not marked closed")
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
// Since the channel was marked closed in the database, but the
|
|
|
|
// commit to the next state failed, the state should still be
|
|
|
|
// StateDefault.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
if log.state != StateDefault {
|
|
|
|
t.Fatalf("expected to stay in StateDefault, instead "+
|
|
|
|
"has %v", log.state)
|
|
|
|
}
|
|
|
|
chanArb.Stop()
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
// Start the arbitrator again, with IsPendingClose reporting
|
|
|
|
// the channel closed in the database.
|
2019-09-25 05:19:53 +03:00
|
|
|
log.failCommit = false
|
|
|
|
chanArbCtx, err = chanArbCtx.Restart(func(c *chanArbTestCtx) {
|
|
|
|
c.chanArb.cfg.IsPendingClose = true
|
|
|
|
c.chanArb.cfg.ClosingHeight = 100
|
|
|
|
c.chanArb.cfg.CloseType = test.closeType
|
|
|
|
})
|
2018-08-23 11:04:43 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
|
2018-08-23 11:04:43 +03:00
|
|
|
// Since the channel is marked closed in the database, it
|
|
|
|
// should advance to the expected states.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(test.expectedStates...)
|
2018-08-23 11:04:43 +03:00
|
|
|
|
|
|
|
// It should also mark the channel as resolved.
|
|
|
|
select {
|
2019-09-25 05:19:53 +03:00
|
|
|
case <-chanArbCtx.resolvedChan:
|
2018-08-23 11:04:43 +03:00
|
|
|
// Expected.
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("contract was not resolved")
|
|
|
|
}
|
2018-08-21 13:21:16 +03:00
|
|
|
}
|
|
|
|
}
|
2018-08-21 13:55:19 +03:00
|
|
|
|
|
|
|
// TestChannelArbitratorEmptyResolutions makes sure that a channel that is
|
|
|
|
// pending close in the database, but haven't had any resolutions logged will
|
|
|
|
// not be marked resolved. This situation must be handled to avoid closing
|
|
|
|
// channels from earlier versions of the ChannelArbitrator, which didn't have a
|
|
|
|
// proper handoff from the ChainWatcher, and we could risk ending up in a state
|
|
|
|
// where the channel was closed in the DB, but the resolutions weren't properly
|
|
|
|
// written.
|
|
|
|
func TestChannelArbitratorEmptyResolutions(t *testing.T) {
|
|
|
|
// Start out with a log that will fail writing the set of resolutions.
|
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
failFetch: errNoResolutions,
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err := createTestChannelArbitrator(t, log)
|
2018-08-21 13:55:19 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb := chanArbCtx.chanArb
|
2018-08-21 13:55:19 +03:00
|
|
|
chanArb.cfg.IsPendingClose = true
|
|
|
|
chanArb.cfg.ClosingHeight = 100
|
|
|
|
chanArb.cfg.CloseType = channeldb.RemoteForceClose
|
|
|
|
|
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// It should not advance its state beyond StateContractClosed, since
|
|
|
|
// fetching resolutions fails.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(StateContractClosed)
|
2018-08-21 13:55:19 +03:00
|
|
|
|
|
|
|
// It should not advance further, however, as fetching resolutions
|
|
|
|
// failed.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
if log.state != StateContractClosed {
|
|
|
|
t.Fatalf("expected to stay in StateContractClosed")
|
|
|
|
}
|
|
|
|
chanArb.Stop()
|
|
|
|
}
|
2018-10-31 21:38:27 +03:00
|
|
|
|
|
|
|
// TestChannelArbitratorAlreadyForceClosed ensures that we cannot force close a
|
|
|
|
// channel that is already in the process of doing so.
|
|
|
|
func TestChannelArbitratorAlreadyForceClosed(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
// We'll create the arbitrator and its backing log to signal that it's
|
|
|
|
// already in the process of being force closed.
|
|
|
|
log := &mockArbitratorLog{
|
|
|
|
state: StateCommitmentBroadcasted,
|
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err := createTestChannelArbitrator(t, log)
|
2018-10-31 21:38:27 +03:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb := chanArbCtx.chanArb
|
2018-10-31 21:38:27 +03:00
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// Then, we'll create a request to signal a force close request to the
|
|
|
|
// channel arbitrator.
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
respChan := make(chan *wire.MsgTx, 1)
|
|
|
|
|
|
|
|
select {
|
|
|
|
case chanArb.forceCloseReqs <- &forceCloseReq{
|
|
|
|
closeTx: respChan,
|
|
|
|
errResp: errChan,
|
|
|
|
}:
|
|
|
|
case <-chanArb.quit:
|
|
|
|
}
|
|
|
|
|
multi: address lingering TODO by no longer wiping out local HTLCs on remote close
In this commit, we fix a lingering TOOD statement in the channel arb.
Before this commitment, we would simply wipe our our local HTLC set of
the HTLC set that was on the remote commitment transaction on force
close. This was incorrect as if our commitment transaction had an HTLC
that the remote commitment didn't, then we would fail to cancel that
back, and cause both channels to time out on chain.
In order to remedy this, we introduce a new `HtlcSetKey` struct to track
all 3 possible in-flight set of HTLCs: ours, theirs, and their pending.
We also we start to tack on additional data to all the unilateral close
messages we send to subscribers. This new data is the CommitSet, or the
set of valid commitments at channel closure time. This new information
will be used by the channel arb in an upcoming commit to ensure it will
cancel back HTLCs in the case of split commitment state.
Finally, we start to thread through an optional *CommitSet to the
advanceState method. This additional information will give the channel
arb addition information it needs to ensure it properly cancels back
HTLCs that are about to time out or may time out depending on which
commitment is played.
Within the htlcswitch pakage, we modify the `SignNextCommitment` method
to return the new set of pending HTLCs for the remote party's commitment
transaction and `ReceiveRevocation` to return the latest set of
commitment transactions on the remote party's commitment as well. This
is a preparatory change which is part of a larger change to address a
lingering TODO in the cnct.
Additionally, rather than just send of the set of HTLCs after the we
revoke, we'll also send of the set of HTLCs after the remote party
revokes, and we create a pending commitment state for it.
2019-05-17 03:23:26 +03:00
|
|
|
// Finally, we should ensure that we are not able to do so by seeing
|
|
|
|
// the expected errAlreadyForceClosed error.
|
2018-10-31 21:38:27 +03:00
|
|
|
select {
|
|
|
|
case err = <-errChan:
|
|
|
|
if err != errAlreadyForceClosed {
|
|
|
|
t.Fatalf("expected errAlreadyForceClosed, got %v", err)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second):
|
|
|
|
t.Fatal("expected to receive error response")
|
|
|
|
}
|
|
|
|
}
|
2019-05-17 03:47:51 +03:00
|
|
|
|
|
|
|
// TestChannelArbitratorDanglingCommitForceClose tests that if there're HTLCs
|
|
|
|
// on the remote party's commitment, but not ours, and they're about to time
|
|
|
|
// out, then we'll go on chain so we can cancel back the HTLCs on the incoming
|
|
|
|
// commitment.
|
|
|
|
func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
type testCase struct {
|
|
|
|
htlcExpired bool
|
|
|
|
remotePendingHTLC bool
|
|
|
|
confCommit HtlcSetKey
|
|
|
|
}
|
|
|
|
var testCases []testCase
|
|
|
|
|
|
|
|
testOptions := []bool{true, false}
|
|
|
|
confOptions := []HtlcSetKey{
|
|
|
|
LocalHtlcSet, RemoteHtlcSet, RemotePendingHtlcSet,
|
|
|
|
}
|
|
|
|
for _, htlcExpired := range testOptions {
|
|
|
|
for _, remotePendingHTLC := range testOptions {
|
|
|
|
for _, commitConf := range confOptions {
|
|
|
|
switch {
|
|
|
|
// If the HTLC is on the remote commitment, and
|
|
|
|
// that one confirms, then there's no special
|
|
|
|
// behavior, we should play all the HTLCs on
|
|
|
|
// that remote commitment as normal.
|
|
|
|
case !remotePendingHTLC && commitConf == RemoteHtlcSet:
|
|
|
|
fallthrough
|
|
|
|
|
|
|
|
// If the HTLC is on the remote pending, and
|
|
|
|
// that confirms, then we don't have any
|
|
|
|
// special actions.
|
|
|
|
case remotePendingHTLC && commitConf == RemotePendingHtlcSet:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
testCases = append(testCases, testCase{
|
|
|
|
htlcExpired: htlcExpired,
|
|
|
|
remotePendingHTLC: remotePendingHTLC,
|
|
|
|
confCommit: commitConf,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, testCase := range testCases {
|
|
|
|
testCase := testCase
|
|
|
|
testName := fmt.Sprintf("testCase: htlcExpired=%v,"+
|
|
|
|
"remotePendingHTLC=%v,remotePendingCommitConf=%v",
|
|
|
|
testCase.htlcExpired, testCase.remotePendingHTLC,
|
|
|
|
testCase.confCommit)
|
|
|
|
|
|
|
|
t.Run(testName, func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
arbLog := &mockArbitratorLog{
|
|
|
|
state: StateDefault,
|
|
|
|
newStates: make(chan ArbitratorState, 5),
|
|
|
|
resolvers: make(map[ContractResolver]struct{}),
|
|
|
|
}
|
|
|
|
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx, err := createTestChannelArbitrator(
|
|
|
|
t, arbLog,
|
2019-05-17 03:47:51 +03:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
|
|
|
}
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArb := chanArbCtx.chanArb
|
2019-05-17 03:47:51 +03:00
|
|
|
if err := chanArb.Start(); err != nil {
|
|
|
|
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
|
|
|
}
|
|
|
|
defer chanArb.Stop()
|
|
|
|
|
|
|
|
// Now that our channel arb has started, we'll set up
|
|
|
|
// its contract signals channel so we can send it
|
|
|
|
// various HTLC updates for this test.
|
|
|
|
htlcUpdates := make(chan *ContractUpdate)
|
|
|
|
signals := &ContractSignals{
|
|
|
|
HtlcUpdates: htlcUpdates,
|
|
|
|
ShortChanID: lnwire.ShortChannelID{},
|
|
|
|
}
|
|
|
|
chanArb.UpdateContractSignals(signals)
|
|
|
|
|
|
|
|
htlcKey := RemoteHtlcSet
|
|
|
|
if testCase.remotePendingHTLC {
|
|
|
|
htlcKey = RemotePendingHtlcSet
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next, we'll send it a new HTLC that is set to expire
|
|
|
|
// in 10 blocks, this HTLC will only appear on the
|
|
|
|
// commitment transaction of the _remote_ party.
|
|
|
|
htlcIndex := uint64(99)
|
|
|
|
htlcExpiry := uint32(10)
|
|
|
|
danglingHTLC := channeldb.HTLC{
|
|
|
|
Incoming: false,
|
|
|
|
Amt: 10000,
|
|
|
|
HtlcIndex: htlcIndex,
|
|
|
|
RefundTimeout: htlcExpiry,
|
|
|
|
}
|
|
|
|
htlcUpdates <- &ContractUpdate{
|
|
|
|
HtlcKey: htlcKey,
|
|
|
|
Htlcs: []channeldb.HTLC{danglingHTLC},
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, we now have a split commitment state
|
|
|
|
// from the PoV of the channel arb. There's now an HTLC
|
|
|
|
// that only exists on the commitment transaction of
|
|
|
|
// the remote party.
|
|
|
|
errChan := make(chan error, 1)
|
|
|
|
respChan := make(chan *wire.MsgTx, 1)
|
|
|
|
switch {
|
|
|
|
// If we want an HTLC expiration trigger, then We'll
|
|
|
|
// now mine a block (height 5), which is 5 blocks away
|
|
|
|
// (our grace delta) from the expiry of that HTLC.
|
|
|
|
case testCase.htlcExpired:
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.blockEpochs <- &chainntnfs.BlockEpoch{Height: 5}
|
2019-05-17 03:47:51 +03:00
|
|
|
|
|
|
|
// Otherwise, we'll just trigger a regular force close
|
|
|
|
// request.
|
|
|
|
case !testCase.htlcExpired:
|
|
|
|
chanArb.forceCloseReqs <- &forceCloseReq{
|
|
|
|
errResp: errChan,
|
|
|
|
closeTx: respChan,
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// At this point, the resolver should now have
|
|
|
|
// determined that it needs to go to chain in order to
|
|
|
|
// block off the redemption path so it can cancel the
|
|
|
|
// incoming HTLC.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(
|
|
|
|
StateBroadcastCommit,
|
2019-05-17 03:47:51 +03:00
|
|
|
StateCommitmentBroadcasted,
|
|
|
|
)
|
|
|
|
|
|
|
|
// Next we'll craft a fake commitment transaction to
|
|
|
|
// send to signal that the channel has closed out on
|
|
|
|
// chain.
|
|
|
|
closeTx := &wire.MsgTx{
|
|
|
|
TxIn: []*wire.TxIn{
|
|
|
|
{
|
|
|
|
PreviousOutPoint: wire.OutPoint{},
|
|
|
|
Witness: [][]byte{
|
|
|
|
{0x9},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// We'll now signal to the channel arb that the HTLC
|
|
|
|
// has fully closed on chain. Our local commit set
|
|
|
|
// shows now HTLC on our commitment, but one on the
|
|
|
|
// remote commitment. This should result in the HTLC
|
|
|
|
// being canalled back. Also note that there're no HTLC
|
|
|
|
// resolutions sent since we have none on our
|
|
|
|
// commitment transaction.
|
|
|
|
uniCloseInfo := &LocalUnilateralCloseInfo{
|
|
|
|
SpendDetail: &chainntnfs.SpendDetail{},
|
|
|
|
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
|
|
|
|
CloseTx: closeTx,
|
|
|
|
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
|
|
|
},
|
|
|
|
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
|
|
|
CommitSet: CommitSet{
|
|
|
|
ConfCommitKey: &testCase.confCommit,
|
|
|
|
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the HTLC was meant to expire, then we'll mark the
|
|
|
|
// closing transaction at the proper expiry height
|
|
|
|
// since our comparison "need to timeout" comparison is
|
|
|
|
// based on the confirmation height.
|
|
|
|
if testCase.htlcExpired {
|
|
|
|
uniCloseInfo.SpendDetail.SpendingHeight = 5
|
|
|
|
}
|
|
|
|
|
|
|
|
// Depending on if we're testing the remote pending
|
|
|
|
// commitment or not, we'll populate either a fake
|
|
|
|
// dangling remote commitment, or a regular locked in
|
|
|
|
// one.
|
|
|
|
htlcs := []channeldb.HTLC{danglingHTLC}
|
|
|
|
if testCase.remotePendingHTLC {
|
|
|
|
uniCloseInfo.CommitSet.HtlcSets[RemotePendingHtlcSet] = htlcs
|
|
|
|
} else {
|
|
|
|
uniCloseInfo.CommitSet.HtlcSets[RemoteHtlcSet] = htlcs
|
|
|
|
}
|
|
|
|
|
|
|
|
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- uniCloseInfo
|
|
|
|
|
|
|
|
// The channel arb should now transition to waiting
|
|
|
|
// until the HTLCs have been fully resolved.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.AssertStateTransitions(
|
|
|
|
StateContractClosed,
|
2019-05-17 03:47:51 +03:00
|
|
|
StateWaitingFullResolution,
|
|
|
|
)
|
|
|
|
|
|
|
|
// Now that we've sent this signal, we should have that
|
2019-10-03 18:22:43 +03:00
|
|
|
// HTLC be canceled back immediately.
|
2019-05-17 03:47:51 +03:00
|
|
|
select {
|
2019-09-25 05:19:53 +03:00
|
|
|
case msgs := <-chanArbCtx.resolutions:
|
2019-05-17 03:47:51 +03:00
|
|
|
if len(msgs) != 1 {
|
|
|
|
t.Fatalf("expected 1 message, "+
|
|
|
|
"instead got %v", len(msgs))
|
|
|
|
}
|
|
|
|
|
|
|
|
if msgs[0].HtlcIndex != htlcIndex {
|
|
|
|
t.Fatalf("wrong htlc index: expected %v, got %v",
|
|
|
|
htlcIndex, msgs[0].HtlcIndex)
|
|
|
|
}
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
t.Fatalf("resolution msgs not sent")
|
|
|
|
}
|
|
|
|
|
|
|
|
// There's no contract to send a fully resolve message,
|
|
|
|
// so instead, we'll mine another block which'll cause
|
|
|
|
// it to re-examine its state and realize there're no
|
|
|
|
// more HTLCs.
|
2019-09-25 05:19:53 +03:00
|
|
|
chanArbCtx.blockEpochs <- &chainntnfs.BlockEpoch{Height: 6}
|
|
|
|
chanArbCtx.AssertStateTransitions(StateFullyResolved)
|
2019-05-17 03:47:51 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|