Merge pull request #3095 from Roasbeef/cnct-timeout-fix
multi: always fail pending htlcs on commitment confirmation
This commit is contained in:
commit
b8690d504d
@ -1946,7 +1946,7 @@ func createHTLC(data int, amount lnwire.MilliSatoshi) (*lnwire.UpdateAddHTLC, [3
|
||||
// pending updates.
|
||||
// TODO(conner) remove code duplication
|
||||
func forceStateTransition(chanA, chanB *lnwallet.LightningChannel) error {
|
||||
aliceSig, aliceHtlcSigs, err := chanA.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := chanA.SignNextCommitment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1958,12 +1958,13 @@ func forceStateTransition(chanA, chanB *lnwallet.LightningChannel) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bobSig, bobHtlcSigs, err := chanB.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := chanB.SignNextCommitment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, _, _, err := chanA.ReceiveRevocation(bobRevocation); err != nil {
|
||||
_, _, _, _, err = chanA.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := chanA.ReceiveNewCommitment(bobSig, bobHtlcSigs); err != nil {
|
||||
@ -1974,7 +1975,8 @@ func forceStateTransition(chanA, chanB *lnwallet.LightningChannel) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, _, _, err := chanB.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
_, _, _, _, err = chanB.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -82,15 +82,22 @@ type ArbitratorLog interface {
|
||||
// contract resolutions from persistent storage.
|
||||
FetchContractResolutions() (*ContractResolutions, error)
|
||||
|
||||
// LogChainActions stores a set of chain actions which are derived from
|
||||
// our set of active contracts, and the on-chain state. We'll write
|
||||
// this et of cations when: we decide to go on-chain to resolve a
|
||||
// contract, or we detect that the remote party has gone on-chain.
|
||||
LogChainActions(ChainActionMap) error
|
||||
// InsertConfirmedCommitSet stores the known set of active HTLCs at the
|
||||
// time channel closure. We'll use this to reconstruct our set of chain
|
||||
// actions anew based on the confirmed and pending commitment state.
|
||||
InsertConfirmedCommitSet(c *CommitSet) error
|
||||
|
||||
// FetchConfirmedCommitSet fetches the known confirmed active HTLC set
|
||||
// from the database.
|
||||
FetchConfirmedCommitSet() (*CommitSet, error)
|
||||
|
||||
// FetchChainActions attempts to fetch the set of previously stored
|
||||
// chain actions. We'll use this upon restart to properly advance our
|
||||
// state machine forward.
|
||||
//
|
||||
// NOTE: This method only exists in order to be able to serve nodes had
|
||||
// channels in the process of closing before the CommitSet struct was
|
||||
// introduced.
|
||||
FetchChainActions() (ChainActionMap, error)
|
||||
|
||||
// WipeHistory is to be called ONLY once *all* contracts have been
|
||||
@ -259,6 +266,11 @@ var (
|
||||
// actionsBucketKey is the key under the logScope that we'll use to
|
||||
// store all chain actions once they're determined.
|
||||
actionsBucketKey = []byte("chain-actions")
|
||||
|
||||
// commitSetKey is the primary key under the logScope that we'll use to
|
||||
// store the confirmed active HTLC sets once we learn that a channel
|
||||
// has closed out on chain.
|
||||
commitSetKey = []byte("commit-set")
|
||||
)
|
||||
|
||||
var (
|
||||
@ -277,6 +289,11 @@ var (
|
||||
// errNoActions is retuned when the log doesn't contain any stored
|
||||
// chain actions.
|
||||
errNoActions = fmt.Errorf("no chain actions exist")
|
||||
|
||||
// errNoCommitSet is return when the log doesn't contained a CommitSet.
|
||||
// This can happen if the channel hasn't closed yet, or a client is
|
||||
// running an older version that didn't yet write this state.
|
||||
errNoCommitSet = fmt.Errorf("no commit set exists")
|
||||
)
|
||||
|
||||
// boltArbitratorLog is an implementation of the ArbitratorLog interface backed
|
||||
@ -720,44 +737,6 @@ func (b *boltArbitratorLog) FetchContractResolutions() (*ContractResolutions, er
|
||||
return c, err
|
||||
}
|
||||
|
||||
// LogChainActions stores a set of chain actions which are derived from our set
|
||||
// of active contracts, and the on-chain state. We'll write this et of cations
|
||||
// when: we decide to go on-chain to resolve a contract, or we detect that the
|
||||
// remote party has gone on-chain.
|
||||
//
|
||||
// NOTE: Part of the ContractResolver interface.
|
||||
func (b *boltArbitratorLog) LogChainActions(actions ChainActionMap) error {
|
||||
return b.db.Batch(func(tx *bbolt.Tx) error {
|
||||
scopeBucket, err := tx.CreateBucketIfNotExists(b.scopeKey[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
actionsBucket, err := scopeBucket.CreateBucketIfNotExists(
|
||||
actionsBucketKey,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for chainAction, htlcs := range actions {
|
||||
var htlcBuf bytes.Buffer
|
||||
err := channeldb.SerializeHtlcs(&htlcBuf, htlcs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
actionKey := []byte{byte(chainAction)}
|
||||
err = actionsBucket.Put(actionKey, htlcBuf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// FetchChainActions attempts to fetch the set of previously stored chain
|
||||
// actions. We'll use this upon restart to properly advance our state machine
|
||||
// forward.
|
||||
@ -802,6 +781,59 @@ func (b *boltArbitratorLog) FetchChainActions() (ChainActionMap, error) {
|
||||
return actionsMap, nil
|
||||
}
|
||||
|
||||
// InsertConfirmedCommitSet stores the known set of active HTLCs at the time
|
||||
// channel closure. We'll use this to reconstruct our set of chain actions anew
|
||||
// based on the confirmed and pending commitment state.
|
||||
//
|
||||
// NOTE: Part of the ContractResolver interface.
|
||||
func (b *boltArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) error {
|
||||
return b.db.Update(func(tx *bbolt.Tx) error {
|
||||
scopeBucket, err := tx.CreateBucketIfNotExists(b.scopeKey[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
if err := encodeCommitSet(&b, c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return scopeBucket.Put(commitSetKey, b.Bytes())
|
||||
})
|
||||
}
|
||||
|
||||
// FetchConfirmedCommitSet fetches the known confirmed active HTLC set from the
|
||||
// database.
|
||||
//
|
||||
// NOTE: Part of the ContractResolver interface.
|
||||
func (b *boltArbitratorLog) FetchConfirmedCommitSet() (*CommitSet, error) {
|
||||
var c *CommitSet
|
||||
err := b.db.View(func(tx *bbolt.Tx) error {
|
||||
scopeBucket := tx.Bucket(b.scopeKey[:])
|
||||
if scopeBucket == nil {
|
||||
return errScopeBucketNoExist
|
||||
}
|
||||
|
||||
commitSetBytes := scopeBucket.Get(commitSetKey)
|
||||
if commitSetBytes == nil {
|
||||
return errNoCommitSet
|
||||
}
|
||||
|
||||
commitSet, err := decodeCommitSet(bytes.NewReader(commitSetBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c = commitSet
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// WipeHistory is to be called ONLY once *all* contracts have been fully
|
||||
// resolved, and the channel closure if finalized. This method will delete all
|
||||
// on-disk state within the persistent log.
|
||||
@ -835,7 +867,6 @@ func (b *boltArbitratorLog) WipeHistory() error {
|
||||
return err
|
||||
}
|
||||
if err := scopeBucket.DeleteBucket(contractsBucketKey); err != nil {
|
||||
fmt.Println("nah")
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1052,3 +1083,76 @@ func decodeCommitResolution(r io.Reader,
|
||||
|
||||
return binary.Read(r, endian, &c.MaturityDelay)
|
||||
}
|
||||
|
||||
func encodeHtlcSetKey(w io.Writer, h *HtlcSetKey) error {
|
||||
err := binary.Write(w, endian, h.IsRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return binary.Write(w, endian, h.IsPending)
|
||||
}
|
||||
|
||||
func encodeCommitSet(w io.Writer, c *CommitSet) error {
|
||||
if err := encodeHtlcSetKey(w, c.ConfCommitKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numSets := uint8(len(c.HtlcSets))
|
||||
if err := binary.Write(w, endian, numSets); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for htlcSetKey, htlcs := range c.HtlcSets {
|
||||
htlcSetKey := htlcSetKey
|
||||
if err := encodeHtlcSetKey(w, &htlcSetKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := channeldb.SerializeHtlcs(w, htlcs...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeHtlcSetKey(r io.Reader, h *HtlcSetKey) error {
|
||||
err := binary.Read(r, endian, &h.IsRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return binary.Read(r, endian, &h.IsPending)
|
||||
}
|
||||
|
||||
func decodeCommitSet(r io.Reader) (*CommitSet, error) {
|
||||
c := &CommitSet{
|
||||
ConfCommitKey: &HtlcSetKey{},
|
||||
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
||||
}
|
||||
|
||||
if err := decodeHtlcSetKey(r, c.ConfCommitKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var numSets uint8
|
||||
if err := binary.Read(r, endian, &numSets); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := uint8(0); i < numSets; i++ {
|
||||
var htlcSetKey HtlcSetKey
|
||||
if err := decodeHtlcSetKey(r, &htlcSetKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
htlcs, err := channeldb.DeserializeHtlcs(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.HtlcSets[htlcSetKey] = htlcs
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
@ -19,7 +19,6 @@ import (
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/input"
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -565,133 +564,6 @@ func TestContractResolutionsStorage(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestChainActionStorage tests that were able to properly store a set of chain
|
||||
// actions, and then retrieve the same set of chain actions from disk.
|
||||
func TestChainActionStorage(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// First, we'll create a test instance of the ArbitratorLog
|
||||
// implementation backed by boltdb.
|
||||
testLog, cleanUp, err := newTestBoltArbLog(
|
||||
testChainHash, testChanPoint2,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create test log: %v", err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
chainActions := ChainActionMap{
|
||||
NoAction: []channeldb.HTLC{
|
||||
{
|
||||
RHash: testPreimage,
|
||||
Amt: lnwire.MilliSatoshi(prand.Uint64()),
|
||||
RefundTimeout: prand.Uint32(),
|
||||
OutputIndex: int32(prand.Uint32()),
|
||||
Incoming: true,
|
||||
HtlcIndex: prand.Uint64(),
|
||||
LogIndex: prand.Uint64(),
|
||||
OnionBlob: make([]byte, 0),
|
||||
Signature: make([]byte, 0),
|
||||
},
|
||||
},
|
||||
HtlcTimeoutAction: []channeldb.HTLC{
|
||||
{
|
||||
RHash: testPreimage,
|
||||
Amt: lnwire.MilliSatoshi(prand.Uint64()),
|
||||
RefundTimeout: prand.Uint32(),
|
||||
OutputIndex: int32(prand.Uint32()),
|
||||
Incoming: true,
|
||||
HtlcIndex: prand.Uint64(),
|
||||
LogIndex: prand.Uint64(),
|
||||
OnionBlob: make([]byte, 0),
|
||||
Signature: make([]byte, 0),
|
||||
},
|
||||
},
|
||||
HtlcClaimAction: []channeldb.HTLC{
|
||||
{
|
||||
RHash: testPreimage,
|
||||
Amt: lnwire.MilliSatoshi(prand.Uint64()),
|
||||
RefundTimeout: prand.Uint32(),
|
||||
OutputIndex: int32(prand.Uint32()),
|
||||
Incoming: true,
|
||||
HtlcIndex: prand.Uint64(),
|
||||
LogIndex: prand.Uint64(),
|
||||
OnionBlob: make([]byte, 0),
|
||||
Signature: make([]byte, 0),
|
||||
},
|
||||
},
|
||||
HtlcFailNowAction: []channeldb.HTLC{
|
||||
{
|
||||
RHash: testPreimage,
|
||||
Amt: lnwire.MilliSatoshi(prand.Uint64()),
|
||||
RefundTimeout: prand.Uint32(),
|
||||
OutputIndex: int32(prand.Uint32()),
|
||||
Incoming: true,
|
||||
HtlcIndex: prand.Uint64(),
|
||||
LogIndex: prand.Uint64(),
|
||||
OnionBlob: make([]byte, 0),
|
||||
Signature: make([]byte, 0),
|
||||
},
|
||||
},
|
||||
HtlcOutgoingWatchAction: []channeldb.HTLC{
|
||||
{
|
||||
RHash: testPreimage,
|
||||
Amt: lnwire.MilliSatoshi(prand.Uint64()),
|
||||
RefundTimeout: prand.Uint32(),
|
||||
OutputIndex: int32(prand.Uint32()),
|
||||
Incoming: true,
|
||||
HtlcIndex: prand.Uint64(),
|
||||
LogIndex: prand.Uint64(),
|
||||
OnionBlob: make([]byte, 0),
|
||||
Signature: make([]byte, 0),
|
||||
},
|
||||
},
|
||||
HtlcIncomingWatchAction: []channeldb.HTLC{
|
||||
{
|
||||
RHash: testPreimage,
|
||||
Amt: lnwire.MilliSatoshi(prand.Uint64()),
|
||||
RefundTimeout: prand.Uint32(),
|
||||
OutputIndex: int32(prand.Uint32()),
|
||||
Incoming: true,
|
||||
HtlcIndex: prand.Uint64(),
|
||||
LogIndex: prand.Uint64(),
|
||||
OnionBlob: make([]byte, 0),
|
||||
Signature: make([]byte, 0),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// With our set of test chain actions constructed, we'll now insert
|
||||
// them into the database, retrieve them, then assert equality with the
|
||||
// set of chain actions create above.
|
||||
if err := testLog.LogChainActions(chainActions); err != nil {
|
||||
t.Fatalf("unable to write chain actions: %v", err)
|
||||
}
|
||||
diskActions, err := testLog.FetchChainActions()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to read chain actions: %v", err)
|
||||
}
|
||||
|
||||
for k, contracts := range chainActions {
|
||||
diskContracts := diskActions[k]
|
||||
if !reflect.DeepEqual(contracts, diskContracts) {
|
||||
t.Fatalf("chain action mismatch: expected %v, got %v",
|
||||
spew.Sdump(contracts), spew.Sdump(diskContracts))
|
||||
}
|
||||
}
|
||||
|
||||
// We'll now delete the state, then attempt to retrieve the set of
|
||||
// chain actions, no resolutions should be found.
|
||||
if err := testLog.WipeHistory(); err != nil {
|
||||
t.Fatalf("unable to wipe log: %v", err)
|
||||
}
|
||||
actions, err := testLog.FetchChainActions()
|
||||
if len(actions) != 0 {
|
||||
t.Fatalf("expected no chain actions, instead found: %v",
|
||||
len(actions))
|
||||
}
|
||||
}
|
||||
|
||||
// TestStateMutation tests that we're able to properly mutate the state of the
|
||||
// log, then retrieve that same mutated state from disk.
|
||||
func TestStateMutation(t *testing.T) {
|
||||
@ -802,6 +674,62 @@ func TestScopeIsolation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestCommitSetStorage tests that we're able to properly read/write active
|
||||
// commitment sets.
|
||||
func TestCommitSetStorage(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testLog, cleanUp, err := newTestBoltArbLog(
|
||||
testChainHash, testChanPoint1,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create test log: %v", err)
|
||||
}
|
||||
defer cleanUp()
|
||||
|
||||
activeHTLCs := []channeldb.HTLC{
|
||||
{
|
||||
Amt: 1000,
|
||||
OnionBlob: make([]byte, 0),
|
||||
Signature: make([]byte, 0),
|
||||
},
|
||||
}
|
||||
|
||||
confTypes := []HtlcSetKey{
|
||||
LocalHtlcSet, RemoteHtlcSet, RemotePendingHtlcSet,
|
||||
}
|
||||
for _, pendingRemote := range []bool{true, false} {
|
||||
for _, confType := range confTypes {
|
||||
commitSet := &CommitSet{
|
||||
ConfCommitKey: &confType,
|
||||
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
||||
}
|
||||
commitSet.HtlcSets[LocalHtlcSet] = activeHTLCs
|
||||
commitSet.HtlcSets[RemoteHtlcSet] = activeHTLCs
|
||||
|
||||
if pendingRemote {
|
||||
commitSet.HtlcSets[RemotePendingHtlcSet] = activeHTLCs
|
||||
}
|
||||
|
||||
err := testLog.InsertConfirmedCommitSet(commitSet)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to write commit set: %v", err)
|
||||
}
|
||||
|
||||
diskCommitSet, err := testLog.FetchConfirmedCommitSet()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to read commit set: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(commitSet, diskCommitSet) {
|
||||
t.Fatalf("commit set mismatch: expected %v, got %v",
|
||||
spew.Sdump(commitSet), spew.Sdump(diskCommitSet))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func init() {
|
||||
testSignDesc.KeyDesc.PubKey, _ = btcec.ParsePubKey(key1, btcec.S256())
|
||||
|
||||
|
@ -296,8 +296,25 @@ func newActiveChannelArbitrator(channel *channeldb.OpenChannel,
|
||||
return c.resolveContract(chanPoint, chanLog)
|
||||
}
|
||||
|
||||
// Finally, we'll need to construct a series of htlc Sets based on all
|
||||
// currently known valid commitments.
|
||||
htlcSets := make(map[HtlcSetKey]htlcSet)
|
||||
htlcSets[LocalHtlcSet] = newHtlcSet(channel.LocalCommitment.Htlcs)
|
||||
htlcSets[RemoteHtlcSet] = newHtlcSet(channel.RemoteCommitment.Htlcs)
|
||||
|
||||
pendingRemoteCommitment, err := channel.RemoteCommitChainTip()
|
||||
if err != nil && err != channeldb.ErrNoPendingCommit {
|
||||
blockEpoch.Cancel()
|
||||
return nil, err
|
||||
}
|
||||
if pendingRemoteCommitment != nil {
|
||||
htlcSets[RemotePendingHtlcSet] = newHtlcSet(
|
||||
pendingRemoteCommitment.Commitment.Htlcs,
|
||||
)
|
||||
}
|
||||
|
||||
return NewChannelArbitrator(
|
||||
arbCfg, channel.LocalCommitment.Htlcs, chanLog,
|
||||
arbCfg, htlcSets, chanLog,
|
||||
), nil
|
||||
}
|
||||
|
||||
@ -446,7 +463,7 @@ func (c *ChainArbitrator) Start() error {
|
||||
|
||||
// We can also leave off the set of HTLC's here as since the
|
||||
// channel is already in the process of being full resolved, no
|
||||
// new HTLC's we be added.
|
||||
// new HTLC's will be added.
|
||||
c.activeChannels[chanPoint] = NewChannelArbitrator(
|
||||
arbCfg, nil, chanLog,
|
||||
)
|
||||
@ -557,15 +574,27 @@ func (c *ChainArbitrator) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ContractUpdate is a message packages the latest set of active HTLCs on a
|
||||
// commitment, and also identifies which commitment received a new set of
|
||||
// HTLCs.
|
||||
type ContractUpdate struct {
|
||||
// HtlcKey identifies which commitment the HTLCs below are present on.
|
||||
HtlcKey HtlcSetKey
|
||||
|
||||
// Htlcs are the of active HTLCs on the commitment identified by the
|
||||
// above HtlcKey.
|
||||
Htlcs []channeldb.HTLC
|
||||
}
|
||||
|
||||
// ContractSignals wraps the two signals that affect the state of a channel
|
||||
// being watched by an arbitrator. The two signals we care about are: the
|
||||
// channel has a new set of HTLC's, and the remote party has just broadcast
|
||||
// their version of the commitment transaction.
|
||||
type ContractSignals struct {
|
||||
// HtlcUpdates is a channel that once we new commitment updates takes
|
||||
// place, the later set of HTLC's on the commitment transaction should
|
||||
// be sent over.
|
||||
HtlcUpdates chan []channeldb.HTLC
|
||||
// HtlcUpdates is a channel that the link will use to update the
|
||||
// designated channel arbitrator when the set of HTLCs on any valid
|
||||
// commitment changes.
|
||||
HtlcUpdates chan *ContractUpdate
|
||||
|
||||
// ShortChanID is the up to date short channel ID for a contract. This
|
||||
// can change either if when the contract was added it didn't yet have
|
||||
|
@ -30,20 +30,77 @@ const (
|
||||
maxCommitPointPollTimeout = 10 * time.Minute
|
||||
)
|
||||
|
||||
// LocalUnilateralCloseInfo encapsulates all the informnation we need to act
|
||||
// on a local force close that gets confirmed.
|
||||
// LocalUnilateralCloseInfo encapsulates all the information we need to act on
|
||||
// a local force close that gets confirmed.
|
||||
type LocalUnilateralCloseInfo struct {
|
||||
*chainntnfs.SpendDetail
|
||||
*lnwallet.LocalForceCloseSummary
|
||||
*channeldb.ChannelCloseSummary
|
||||
|
||||
// CommitSet is the set of known valid commitments at the time the
|
||||
// remote party's commitment hit the chain.
|
||||
CommitSet CommitSet
|
||||
}
|
||||
|
||||
// CooperativeCloseInfo encapsulates all the informnation we need to act
|
||||
// on a cooperative close that gets confirmed.
|
||||
// CooperativeCloseInfo encapsulates all the information we need to act on a
|
||||
// cooperative close that gets confirmed.
|
||||
type CooperativeCloseInfo struct {
|
||||
*channeldb.ChannelCloseSummary
|
||||
}
|
||||
|
||||
// RemoteUnilateralCloseInfo wraps the normal UnilateralCloseSummary to couple
|
||||
// the CommitSet at the time of channel closure.
|
||||
type RemoteUnilateralCloseInfo struct {
|
||||
*lnwallet.UnilateralCloseSummary
|
||||
|
||||
// CommitSet is the set of known valid commitments at the time the
|
||||
// remote party's commitment hit the chain.
|
||||
CommitSet CommitSet
|
||||
}
|
||||
|
||||
// CommitSet is a collection of the set of known valid commitments at a given
|
||||
// instant. If ConfCommitKey is set, then the commitment identified by the
|
||||
// HtlcSetKey has hit the chain. This struct will be used to examine all live
|
||||
// HTLCs to determine if any additional actions need to be made based on the
|
||||
// remote party's commitments.
|
||||
type CommitSet struct {
|
||||
// ConfCommitKey if non-nil, identifies the commitment that was
|
||||
// confirmed in the chain.
|
||||
ConfCommitKey *HtlcSetKey
|
||||
|
||||
// HtlcSets stores the set of all known active HTLC for each active
|
||||
// commitment at the time of channel closure.
|
||||
HtlcSets map[HtlcSetKey][]channeldb.HTLC
|
||||
}
|
||||
|
||||
// IsEmpty returns true if there are no HTLCs at all within all commitments
|
||||
// that are a part of this commitment diff.
|
||||
func (c *CommitSet) IsEmpty() bool {
|
||||
if c == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, htlcs := range c.HtlcSets {
|
||||
if len(htlcs) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// toActiveHTLCSets returns the set of all active HTLCs across all commitment
|
||||
// transactions.
|
||||
func (c *CommitSet) toActiveHTLCSets() map[HtlcSetKey]htlcSet {
|
||||
htlcSets := make(map[HtlcSetKey]htlcSet)
|
||||
|
||||
for htlcSetKey, htlcs := range c.HtlcSets {
|
||||
htlcSets[htlcSetKey] = newHtlcSet(htlcs)
|
||||
}
|
||||
|
||||
return htlcSets
|
||||
}
|
||||
|
||||
// ChainEventSubscription is a struct that houses a subscription to be notified
|
||||
// for any on-chain events related to a channel. There are three types of
|
||||
// possible on-chain events: a cooperative channel closure, a unilateral
|
||||
@ -55,7 +112,7 @@ type ChainEventSubscription struct {
|
||||
|
||||
// RemoteUnilateralClosure is a channel that will be sent upon in the
|
||||
// event that the remote party's commitment transaction is confirmed.
|
||||
RemoteUnilateralClosure chan *lnwallet.UnilateralCloseSummary
|
||||
RemoteUnilateralClosure chan *RemoteUnilateralCloseInfo
|
||||
|
||||
// LocalUnilateralClosure is a channel that will be sent upon in the
|
||||
// event that our commitment transaction is confirmed.
|
||||
@ -249,7 +306,7 @@ func (c *chainWatcher) SubscribeChannelEvents() *ChainEventSubscription {
|
||||
|
||||
sub := &ChainEventSubscription{
|
||||
ChanPoint: c.cfg.chanState.FundingOutpoint,
|
||||
RemoteUnilateralClosure: make(chan *lnwallet.UnilateralCloseSummary, 1),
|
||||
RemoteUnilateralClosure: make(chan *RemoteUnilateralCloseInfo, 1),
|
||||
LocalUnilateralClosure: make(chan *LocalUnilateralCloseInfo, 1),
|
||||
CooperativeClosure: make(chan *CooperativeCloseInfo, 1),
|
||||
ContractBreach: make(chan *lnwallet.BreachRetribution, 1),
|
||||
@ -373,6 +430,30 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
||||
return
|
||||
}
|
||||
|
||||
// Fetch the current known commit height for the remote party,
|
||||
// and their pending commitment chain tip if it exist.
|
||||
remoteStateNum := remoteCommit.CommitHeight
|
||||
remoteChainTip, err := c.cfg.chanState.RemoteCommitChainTip()
|
||||
if err != nil && err != channeldb.ErrNoPendingCommit {
|
||||
log.Errorf("unable to obtain chain tip for "+
|
||||
"ChannelPoint(%v): %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Now that we have all the possible valid commitments, we'll
|
||||
// make the CommitSet the ChannelArbitrator will need it in
|
||||
// order to carry out its duty.
|
||||
commitSet := CommitSet{
|
||||
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
||||
}
|
||||
commitSet.HtlcSets[LocalHtlcSet] = localCommit.Htlcs
|
||||
commitSet.HtlcSets[RemoteHtlcSet] = remoteCommit.Htlcs
|
||||
if remoteChainTip != nil {
|
||||
htlcs := remoteChainTip.Commitment.Htlcs
|
||||
commitSet.HtlcSets[RemotePendingHtlcSet] = htlcs
|
||||
}
|
||||
|
||||
// We'll not retrieve the latest sate of the revocation store
|
||||
// so we can populate the information within the channel state
|
||||
// object that we have.
|
||||
@ -411,8 +492,10 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
||||
// as we don't have any further processing we need to do (we
|
||||
// can't cheat ourselves :p).
|
||||
if isOurCommit {
|
||||
commitSet.ConfCommitKey = &LocalHtlcSet
|
||||
|
||||
if err := c.dispatchLocalForceClose(
|
||||
commitSpend, *localCommit,
|
||||
commitSpend, *localCommit, commitSet,
|
||||
); err != nil {
|
||||
log.Errorf("unable to handle local"+
|
||||
"close for chan_point=%v: %v",
|
||||
@ -439,17 +522,6 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
||||
log.Warnf("Unprompted commitment broadcast for "+
|
||||
"ChannelPoint(%v) ", c.cfg.chanState.FundingOutpoint)
|
||||
|
||||
// Fetch the current known commit height for the remote party,
|
||||
// and their pending commitment chain tip if it exist.
|
||||
remoteStateNum := remoteCommit.CommitHeight
|
||||
remoteChainTip, err := c.cfg.chanState.RemoteCommitChainTip()
|
||||
if err != nil && err != channeldb.ErrNoPendingCommit {
|
||||
log.Errorf("unable to obtain chain tip for "+
|
||||
"ChannelPoint(%v): %v",
|
||||
c.cfg.chanState.FundingOutpoint, err)
|
||||
return
|
||||
}
|
||||
|
||||
// If this channel has been recovered, then we'll modify our
|
||||
// behavior as it isn't possible for us to close out the
|
||||
// channel off-chain ourselves. It can only be the remote party
|
||||
@ -465,9 +537,10 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
||||
// we'll trigger the unilateral close signal so subscribers can
|
||||
// clean up the state as necessary.
|
||||
case broadcastStateNum == remoteStateNum && !isRecoveredChan:
|
||||
commitSet.ConfCommitKey = &RemoteHtlcSet
|
||||
|
||||
err := c.dispatchRemoteForceClose(
|
||||
commitSpend, *remoteCommit,
|
||||
commitSpend, *remoteCommit, commitSet,
|
||||
c.cfg.chanState.RemoteCurrentRevocation,
|
||||
)
|
||||
if err != nil {
|
||||
@ -484,8 +557,10 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
||||
case broadcastStateNum == remoteStateNum+1 &&
|
||||
remoteChainTip != nil && !isRecoveredChan:
|
||||
|
||||
commitSet.ConfCommitKey = &RemotePendingHtlcSet
|
||||
|
||||
err := c.dispatchRemoteForceClose(
|
||||
commitSpend, remoteChainTip.Commitment,
|
||||
commitSpend, *remoteCommit, commitSet,
|
||||
c.cfg.chanState.RemoteNextRevocation,
|
||||
)
|
||||
if err != nil {
|
||||
@ -553,14 +628,15 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) {
|
||||
c.cfg.chanState.FundingOutpoint)
|
||||
|
||||
// Since we don't have the commitment stored for this
|
||||
// state, we'll just pass an empty commitment. Note
|
||||
// that this means we won't be able to recover any HTLC
|
||||
// funds.
|
||||
// state, we'll just pass an empty commitment within
|
||||
// the commitment set. Note that this means we won't be
|
||||
// able to recover any HTLC funds.
|
||||
//
|
||||
// TODO(halseth): can we try to recover some HTLCs?
|
||||
commitSet.ConfCommitKey = &RemoteHtlcSet
|
||||
err = c.dispatchRemoteForceClose(
|
||||
commitSpend, channeldb.ChannelCommitment{},
|
||||
commitPoint,
|
||||
commitSet, commitPoint,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to handle remote "+
|
||||
@ -691,7 +767,7 @@ func (c *chainWatcher) dispatchCooperativeClose(commitSpend *chainntnfs.SpendDet
|
||||
// dispatchLocalForceClose processes a unilateral close by us being confirmed.
|
||||
func (c *chainWatcher) dispatchLocalForceClose(
|
||||
commitSpend *chainntnfs.SpendDetail,
|
||||
localCommit channeldb.ChannelCommitment) error {
|
||||
localCommit channeldb.ChannelCommitment, commitSet CommitSet) error {
|
||||
|
||||
log.Infof("Local unilateral close of ChannelPoint(%v) "+
|
||||
"detected", c.cfg.chanState.FundingOutpoint)
|
||||
@ -749,7 +825,10 @@ func (c *chainWatcher) dispatchLocalForceClose(
|
||||
// With the event processed, we'll now notify all subscribers of the
|
||||
// event.
|
||||
closeInfo := &LocalUnilateralCloseInfo{
|
||||
commitSpend, forceClose, closeSummary,
|
||||
SpendDetail: commitSpend,
|
||||
LocalForceCloseSummary: forceClose,
|
||||
ChannelCloseSummary: closeSummary,
|
||||
CommitSet: commitSet,
|
||||
}
|
||||
c.Lock()
|
||||
for _, sub := range c.clientSubscriptions {
|
||||
@ -781,7 +860,7 @@ func (c *chainWatcher) dispatchLocalForceClose(
|
||||
func (c *chainWatcher) dispatchRemoteForceClose(
|
||||
commitSpend *chainntnfs.SpendDetail,
|
||||
remoteCommit channeldb.ChannelCommitment,
|
||||
commitPoint *btcec.PublicKey) error {
|
||||
commitSet CommitSet, commitPoint *btcec.PublicKey) error {
|
||||
|
||||
log.Infof("Unilateral close of ChannelPoint(%v) "+
|
||||
"detected", c.cfg.chanState.FundingOutpoint)
|
||||
@ -802,7 +881,10 @@ func (c *chainWatcher) dispatchRemoteForceClose(
|
||||
c.Lock()
|
||||
for _, sub := range c.clientSubscriptions {
|
||||
select {
|
||||
case sub.RemoteUnilateralClosure <- uniClose:
|
||||
case sub.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
||||
UnilateralCloseSummary: uniClose,
|
||||
CommitSet: commitSet,
|
||||
}:
|
||||
case <-c.quit:
|
||||
c.Unlock()
|
||||
return fmt.Errorf("exiting")
|
||||
|
@ -104,7 +104,7 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) {
|
||||
|
||||
// We should get a new spend event over the remote unilateral close
|
||||
// event channel.
|
||||
var uniClose *lnwallet.UnilateralCloseSummary
|
||||
var uniClose *RemoteUnilateralCloseInfo
|
||||
select {
|
||||
case uniClose = <-chanEvents.RemoteUnilateralClosure:
|
||||
case <-time.After(time.Second * 15):
|
||||
@ -186,7 +186,7 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) {
|
||||
|
||||
// With the HTLC added, we'll now manually initiate a state transition
|
||||
// from Alice to Bob.
|
||||
_, _, err = aliceChannel.SignNextCommitment()
|
||||
_, _, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -211,7 +211,7 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) {
|
||||
|
||||
// We should get a new spend event over the remote unilateral close
|
||||
// event channel.
|
||||
var uniClose *lnwallet.UnilateralCloseSummary
|
||||
var uniClose *RemoteUnilateralCloseInfo
|
||||
select {
|
||||
case uniClose = <-chanEvents.RemoteUnilateralClosure:
|
||||
case <-time.After(time.Second * 15):
|
||||
@ -343,7 +343,7 @@ func TestChainWatcherDataLossProtect(t *testing.T) {
|
||||
|
||||
// We should get a new uni close resolution that indicates we
|
||||
// processed the DLP scenario.
|
||||
var uniClose *lnwallet.UnilateralCloseSummary
|
||||
var uniClose *RemoteUnilateralCloseInfo
|
||||
select {
|
||||
case uniClose = <-chanEvents.RemoteUnilateralClosure:
|
||||
// If we processed this as a DLP case, then the remote
|
||||
|
@ -3,6 +3,7 @@ package contractcourt
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
@ -161,14 +162,14 @@ type ContractReport struct {
|
||||
// htlcSet represents the set of active HTLCs on a given commitment
|
||||
// transaction.
|
||||
type htlcSet struct {
|
||||
// incomingHTLCs is a map of all incoming HTLCs on our commitment
|
||||
// transaction. We may potentially go onchain to claim the funds sent
|
||||
// to us within this set.
|
||||
// incomingHTLCs is a map of all incoming HTLCs on the target
|
||||
// commitment transaction. We may potentially go onchain to claim the
|
||||
// funds sent to us within this set.
|
||||
incomingHTLCs map[uint64]channeldb.HTLC
|
||||
|
||||
// outgoingHTLCs is a map of all outgoing HTLCs on our commitment
|
||||
// transaction. We may potentially go onchain to reclaim the funds that
|
||||
// are currently in limbo.
|
||||
// outgoingHTLCs is a map of all outgoing HTLCs on the target
|
||||
// commitment transaction. We may potentially go onchain to reclaim the
|
||||
// funds that are currently in limbo.
|
||||
outgoingHTLCs map[uint64]channeldb.HTLC
|
||||
}
|
||||
|
||||
@ -191,6 +192,44 @@ func newHtlcSet(htlcs []channeldb.HTLC) htlcSet {
|
||||
}
|
||||
}
|
||||
|
||||
// HtlcSetKey is a two-tuple that uniquely identifies a set of HTLCs on a
|
||||
// commitment transaction.
|
||||
type HtlcSetKey struct {
|
||||
// IsRemote denotes if the HTLCs are on the remote commitment
|
||||
// transaction.
|
||||
IsRemote bool
|
||||
|
||||
// IsPending denotes if the commitment transaction that HTLCS are on
|
||||
// are pending (the higher of two unrevoked commitments).
|
||||
IsPending bool
|
||||
}
|
||||
|
||||
var (
|
||||
// LocalHtlcSet is the HtlcSetKey used for local commitments.
|
||||
LocalHtlcSet = HtlcSetKey{IsRemote: false, IsPending: false}
|
||||
|
||||
// RemoteHtlcSet is the HtlcSetKey used for remote commitments.
|
||||
RemoteHtlcSet = HtlcSetKey{IsRemote: true, IsPending: false}
|
||||
|
||||
// RemotePendingHtlcSet is the HtlcSetKey used for dangling remote
|
||||
// commitment transactions.
|
||||
RemotePendingHtlcSet = HtlcSetKey{IsRemote: true, IsPending: true}
|
||||
)
|
||||
|
||||
// String returns a human readable string describing the target HtlcSetKey.
|
||||
func (h HtlcSetKey) String() string {
|
||||
switch h {
|
||||
case LocalHtlcSet:
|
||||
return "LocalHtlcSet"
|
||||
case RemoteHtlcSet:
|
||||
return "RemoteHtlcSet"
|
||||
case RemotePendingHtlcSet:
|
||||
return "RemotePendingHtlcSet"
|
||||
default:
|
||||
return "unknown HtlcSetKey"
|
||||
}
|
||||
}
|
||||
|
||||
// ChannelArbitrator is the on-chain arbitrator for a particular channel. The
|
||||
// struct will keep in sync with the current set of HTLCs on the commitment
|
||||
// transaction. The job of the attendant is to go on-chain to either settle or
|
||||
@ -207,9 +246,9 @@ type ChannelArbitrator struct {
|
||||
// its next action, and the state of any unresolved contracts.
|
||||
log ArbitratorLog
|
||||
|
||||
// activeHTLCs is the set of active incoming/outgoing HTLC's on the
|
||||
// commitment transaction.
|
||||
activeHTLCs htlcSet
|
||||
// activeHTLCs is the set of active incoming/outgoing HTLC's on all
|
||||
// currently valid commitment transactions.
|
||||
activeHTLCs map[HtlcSetKey]htlcSet
|
||||
|
||||
// cfg contains all the functionality that the ChannelArbitrator requires
|
||||
// to do its duty.
|
||||
@ -222,7 +261,7 @@ type ChannelArbitrator struct {
|
||||
// htlcUpdates is a channel that is sent upon with new updates from the
|
||||
// active channel. Each time a new commitment state is accepted, the
|
||||
// set of HTLC's on the new state should be sent across this channel.
|
||||
htlcUpdates <-chan []channeldb.HTLC
|
||||
htlcUpdates <-chan *ContractUpdate
|
||||
|
||||
// activeResolvers is a slice of any active resolvers. This is used to
|
||||
// be able to signal them for shutdown in the case that we shutdown.
|
||||
@ -252,15 +291,15 @@ type ChannelArbitrator struct {
|
||||
// NewChannelArbitrator returns a new instance of a ChannelArbitrator backed by
|
||||
// the passed config struct.
|
||||
func NewChannelArbitrator(cfg ChannelArbitratorConfig,
|
||||
startingHTLCs []channeldb.HTLC, log ArbitratorLog) *ChannelArbitrator {
|
||||
htlcSets map[HtlcSetKey]htlcSet, log ArbitratorLog) *ChannelArbitrator {
|
||||
|
||||
return &ChannelArbitrator{
|
||||
log: log,
|
||||
signalUpdates: make(chan *signalUpdateMsg),
|
||||
htlcUpdates: make(<-chan []channeldb.HTLC),
|
||||
htlcUpdates: make(<-chan *ContractUpdate),
|
||||
resolutionSignal: make(chan struct{}),
|
||||
forceCloseReqs: make(chan *forceCloseReq),
|
||||
activeHTLCs: newHtlcSet(startingHTLCs),
|
||||
activeHTLCs: htlcSets,
|
||||
cfg: cfg,
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
@ -332,10 +371,22 @@ func (c *ChannelArbitrator) Start() error {
|
||||
}
|
||||
}
|
||||
|
||||
// Next we'll fetch our confirmed commitment set. This will only exist
|
||||
// if the channel has been closed out on chain for modern nodes. For
|
||||
// older nodes, this won't be found at all, and will rely on the
|
||||
// existing written chain actions. Additionally, if this channel hasn't
|
||||
// logged any actions in the log, then this field won't be present.
|
||||
commitSet, err := c.log.FetchConfirmedCommitSet()
|
||||
if err != nil && err != errNoCommitSet && err != errScopeBucketNoExist {
|
||||
return err
|
||||
}
|
||||
|
||||
// We'll now attempt to advance our state forward based on the current
|
||||
// on-chain state, and our set of active contracts.
|
||||
startingState := c.state
|
||||
nextState, _, err := c.advanceState(triggerHeight, trigger)
|
||||
nextState, _, err := c.advanceState(
|
||||
triggerHeight, trigger, commitSet,
|
||||
)
|
||||
if err != nil {
|
||||
switch err {
|
||||
|
||||
@ -398,18 +449,24 @@ func (c *ChannelArbitrator) relaunchResolvers() error {
|
||||
commitHash := contractResolutions.CommitHash
|
||||
|
||||
// Reconstruct the htlc outpoints and data from the chain action log.
|
||||
// The purpose of the constructed htlc map is to supplement to resolvers
|
||||
// restored from database with extra data. Ideally this data is stored
|
||||
// as part of the resolver in the log. This is a workaround to prevent a
|
||||
// db migration.
|
||||
// The purpose of the constructed htlc map is to supplement to
|
||||
// resolvers restored from database with extra data. Ideally this data
|
||||
// is stored as part of the resolver in the log. This is a workaround
|
||||
// to prevent a db migration. We use all available htlc sets here in
|
||||
// order to ensure we have complete coverage.
|
||||
htlcMap := make(map[wire.OutPoint]*channeldb.HTLC)
|
||||
chainActions, err := c.log.FetchChainActions()
|
||||
if err != nil {
|
||||
log.Errorf("unable to fetch chain actions: %v", err)
|
||||
return err
|
||||
}
|
||||
for _, htlcs := range chainActions {
|
||||
for _, htlc := range htlcs {
|
||||
for _, htlcs := range c.activeHTLCs {
|
||||
for _, htlc := range htlcs.incomingHTLCs {
|
||||
htlc := htlc
|
||||
outpoint := wire.OutPoint{
|
||||
Hash: commitHash,
|
||||
Index: uint32(htlc.OutputIndex),
|
||||
}
|
||||
htlcMap[outpoint] = &htlc
|
||||
}
|
||||
|
||||
for _, htlc := range htlcs.outgoingHTLCs {
|
||||
htlc := htlc
|
||||
outpoint := wire.OutPoint{
|
||||
Hash: commitHash,
|
||||
Index: uint32(htlc.OutputIndex),
|
||||
@ -600,8 +657,9 @@ func (t transitionTrigger) String() string {
|
||||
// the appropriate state transition if necessary. The next state we transition
|
||||
// to is returned, Additionally, if the next transition results in a commitment
|
||||
// broadcast, the commitment transaction itself is returned.
|
||||
func (c *ChannelArbitrator) stateStep(triggerHeight uint32,
|
||||
trigger transitionTrigger) (ArbitratorState, *wire.MsgTx, error) {
|
||||
func (c *ChannelArbitrator) stateStep(
|
||||
triggerHeight uint32, trigger transitionTrigger,
|
||||
confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, error) {
|
||||
|
||||
var (
|
||||
nextState ArbitratorState
|
||||
@ -619,10 +677,20 @@ func (c *ChannelArbitrator) stateStep(triggerHeight uint32,
|
||||
// As a new block has been connected to the end of the main
|
||||
// chain, we'll check to see if we need to make any on-chain
|
||||
// claims on behalf of the channel contract that we're
|
||||
// arbitrating for.
|
||||
chainActions, err := c.checkChainActions(triggerHeight, trigger)
|
||||
// arbitrating for. If a commitment has confirmed, then we'll
|
||||
// use the set snapshot from the chain, otherwise we'll use our
|
||||
// current set.
|
||||
var htlcs map[HtlcSetKey]htlcSet
|
||||
if confCommitSet != nil {
|
||||
htlcs = confCommitSet.toActiveHTLCSets()
|
||||
} else {
|
||||
htlcs = c.activeHTLCs
|
||||
}
|
||||
chainActions, err := c.checkLocalChainActions(
|
||||
triggerHeight, trigger, htlcs, false,
|
||||
)
|
||||
if err != nil {
|
||||
return StateError, closeTx, err
|
||||
return StateDefault, nil, err
|
||||
}
|
||||
|
||||
// If there are no actions to be made, then we'll remain in the
|
||||
@ -642,9 +710,6 @@ func (c *ChannelArbitrator) stateStep(triggerHeight uint32,
|
||||
newLogClosure(func() string {
|
||||
return spew.Sdump(chainActions)
|
||||
}))
|
||||
if err := c.log.LogChainActions(chainActions); err != nil {
|
||||
return StateError, closeTx, err
|
||||
}
|
||||
|
||||
// Depending on the type of trigger, we'll either "tunnel"
|
||||
// through to a farther state, or just proceed linearly to the
|
||||
@ -787,11 +852,6 @@ func (c *ChannelArbitrator) stateStep(triggerHeight uint32,
|
||||
case StateContractClosed:
|
||||
// First, we'll fetch our chain actions, and both sets of
|
||||
// resolutions so we can process them.
|
||||
chainActions, err := c.log.FetchChainActions()
|
||||
if err != nil {
|
||||
log.Errorf("unable to fetch chain actions: %v", err)
|
||||
return StateError, closeTx, err
|
||||
}
|
||||
contractResolutions, err := c.log.FetchContractResolutions()
|
||||
if err != nil {
|
||||
log.Errorf("unable to fetch contract resolutions: %v",
|
||||
@ -799,10 +859,10 @@ func (c *ChannelArbitrator) stateStep(triggerHeight uint32,
|
||||
return StateError, closeTx, err
|
||||
}
|
||||
|
||||
// If the resolution is empty, then we're done here. We don't
|
||||
// need to launch any resolvers, and can go straight to our
|
||||
// final state.
|
||||
if contractResolutions.IsEmpty() {
|
||||
// If the resolution is empty, and we have no HTLCs at all to
|
||||
// tend to, then we're done here. We don't need to launch any
|
||||
// resolvers, and can go straight to our final state.
|
||||
if contractResolutions.IsEmpty() && confCommitSet.IsEmpty() {
|
||||
log.Infof("ChannelArbitrator(%v): contract "+
|
||||
"resolutions empty, marking channel as fully resolved!",
|
||||
c.cfg.ChanPoint)
|
||||
@ -835,7 +895,8 @@ func (c *ChannelArbitrator) stateStep(triggerHeight uint32,
|
||||
// actions, wen create the structures we need to resolve all
|
||||
// outstanding contracts.
|
||||
htlcResolvers, pktsToSend, err := c.prepContractResolutions(
|
||||
chainActions, contractResolutions, triggerHeight,
|
||||
contractResolutions, triggerHeight, trigger,
|
||||
confCommitSet,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("ChannelArbitrator(%v): unable to "+
|
||||
@ -851,11 +912,14 @@ func (c *ChannelArbitrator) stateStep(triggerHeight uint32,
|
||||
|
||||
// With the commitment broadcast, we'll then send over all
|
||||
// messages we can send immediately.
|
||||
err = c.cfg.DeliverResolutionMsg(pktsToSend...)
|
||||
if err != nil {
|
||||
// TODO(roasbeef): make sure packet sends are idempotent
|
||||
log.Errorf("unable to send pkts: %v", err)
|
||||
return StateError, closeTx, err
|
||||
if len(pktsToSend) != 0 {
|
||||
err := c.cfg.DeliverResolutionMsg(pktsToSend...)
|
||||
if err != nil {
|
||||
// TODO(roasbeef): make sure packet sends are
|
||||
// idempotent
|
||||
log.Errorf("unable to send pkts: %v", err)
|
||||
return StateError, closeTx, err
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugf("ChannelArbitrator(%v): inserting %v contract "+
|
||||
@ -932,8 +996,9 @@ func (c *ChannelArbitrator) launchResolvers(resolvers []ContractResolver) {
|
||||
// redundant transition, meaning that the state transition is a noop. The final
|
||||
// param is a callback that allows the caller to execute an arbitrary action
|
||||
// after each state transition.
|
||||
func (c *ChannelArbitrator) advanceState(triggerHeight uint32,
|
||||
trigger transitionTrigger) (ArbitratorState, *wire.MsgTx, error) {
|
||||
func (c *ChannelArbitrator) advanceState(
|
||||
triggerHeight uint32, trigger transitionTrigger,
|
||||
confCommitSet *CommitSet) (ArbitratorState, *wire.MsgTx, error) {
|
||||
|
||||
var (
|
||||
priorState ArbitratorState
|
||||
@ -949,7 +1014,7 @@ func (c *ChannelArbitrator) advanceState(triggerHeight uint32,
|
||||
priorState)
|
||||
|
||||
nextState, closeTx, err := c.stateStep(
|
||||
triggerHeight, trigger,
|
||||
triggerHeight, trigger, confCommitSet,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("ChannelArbitrator(%v): unable to advance "+
|
||||
@ -1050,6 +1115,13 @@ func (c ChainAction) String() string {
|
||||
// be acted upon for a given action type. The channel
|
||||
type ChainActionMap map[ChainAction][]channeldb.HTLC
|
||||
|
||||
// Merge merges the passed chain actions with the target chain action map.
|
||||
func (c ChainActionMap) Merge(actions ChainActionMap) {
|
||||
for chainAction, htlcs := range actions {
|
||||
c[chainAction] = append(c[chainAction], htlcs...)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldGoOnChain takes into account the absolute timeout of the HTLC, if the
|
||||
// confirmation delta that we need is close, and returns a bool indicating if
|
||||
// we should go on chain to claim. We do this rather than waiting up until the
|
||||
@ -1077,15 +1149,15 @@ func (c *ChannelArbitrator) shouldGoOnChain(htlcExpiry, broadcastDelta,
|
||||
return currentHeight >= broadcastCutOff
|
||||
}
|
||||
|
||||
// checkChainActions is called for each new block connected to the end of the
|
||||
// main chain. Given the new block height, this new method will examine all
|
||||
// checkCommitChainActions is called for each new block connected to the end of
|
||||
// the main chain. Given the new block height, this new method will examine all
|
||||
// active HTLC's, and determine if we need to go on-chain to claim any of them.
|
||||
// A map of action -> []htlc is returned, detailing what action (if any) should
|
||||
// be performed for each HTLC. For timed out HTLC's, once the commitment has
|
||||
// been sufficiently confirmed, the HTLC's should be canceled backwards. For
|
||||
// redeemed HTLC's, we should send the pre-image back to the incoming link.
|
||||
func (c *ChannelArbitrator) checkChainActions(height uint32,
|
||||
trigger transitionTrigger) (ChainActionMap, error) {
|
||||
func (c *ChannelArbitrator) checkCommitChainActions(height uint32,
|
||||
trigger transitionTrigger, htlcs htlcSet) (ChainActionMap, error) {
|
||||
|
||||
// TODO(roasbeef): would need to lock channel? channel totem?
|
||||
// * race condition if adding and we broadcast, etc
|
||||
@ -1099,7 +1171,7 @@ func (c *ChannelArbitrator) checkChainActions(height uint32,
|
||||
// First, we'll make an initial pass over the set of incoming and
|
||||
// outgoing HTLC's to decide if we need to go on chain at all.
|
||||
haveChainActions := false
|
||||
for _, htlc := range c.activeHTLCs.outgoingHTLCs {
|
||||
for _, htlc := range htlcs.outgoingHTLCs {
|
||||
// We'll need to go on-chain for an outgoing HTLC if it was
|
||||
// never resolved downstream, and it's "close" to timing out.
|
||||
toChain := c.shouldGoOnChain(
|
||||
@ -1120,7 +1192,7 @@ func (c *ChannelArbitrator) checkChainActions(height uint32,
|
||||
haveChainActions = haveChainActions || toChain
|
||||
}
|
||||
|
||||
for _, htlc := range c.activeHTLCs.incomingHTLCs {
|
||||
for _, htlc := range htlcs.incomingHTLCs {
|
||||
// We'll need to go on-chain to pull an incoming HTLC iff we
|
||||
// know the pre-image and it's close to timing out. We need to
|
||||
// ensure that we claim the funds that our rightfully ours
|
||||
@ -1154,7 +1226,7 @@ func (c *ChannelArbitrator) checkChainActions(height uint32,
|
||||
|
||||
// If we don't have any actions to make, then we'll return an empty
|
||||
// action map. We only do this if this was a chain trigger though, as
|
||||
// if we're going to broadcast the commitment (or the remote party) did
|
||||
// if we're going to broadcast the commitment (or the remote party did)
|
||||
// we're *forced* to act on each HTLC.
|
||||
if !haveChainActions && trigger == chainTrigger {
|
||||
log.Tracef("ChannelArbitrator(%v): no actions to take at "+
|
||||
@ -1166,7 +1238,7 @@ func (c *ChannelArbitrator) checkChainActions(height uint32,
|
||||
// active outgoing HTLC's to see if we either need to: sweep them after
|
||||
// a timeout (then cancel backwards), cancel them backwards
|
||||
// immediately, or watch them as they're still active contracts.
|
||||
for _, htlc := range c.activeHTLCs.outgoingHTLCs {
|
||||
for _, htlc := range htlcs.outgoingHTLCs {
|
||||
switch {
|
||||
// If the HTLC is dust, then we can cancel it backwards
|
||||
// immediately as there's no matching contract to arbitrate
|
||||
@ -1221,7 +1293,7 @@ func (c *ChannelArbitrator) checkChainActions(height uint32,
|
||||
// observe the output on-chain if we don't In this last, case we'll
|
||||
// either learn of it eventually from the outgoing HTLC, or the sender
|
||||
// will timeout the HTLC.
|
||||
for _, htlc := range c.activeHTLCs.incomingHTLCs {
|
||||
for _, htlc := range htlcs.incomingHTLCs {
|
||||
log.Tracef("ChannelArbitrator(%v): watching chain to decide "+
|
||||
"action for incoming htlc=%x", c.cfg.ChanPoint,
|
||||
htlc.RHash[:])
|
||||
@ -1269,15 +1341,254 @@ func (c *ChannelArbitrator) isPreimageAvailable(hash lntypes.Hash) (bool,
|
||||
return preimageAvailable, nil
|
||||
}
|
||||
|
||||
// checkLocalChainActions is similar to checkCommitChainActions, but it also
|
||||
// examines the set of HTLCs on the remote party's commitment. This allows us
|
||||
// to ensure we're able to satisfy the HTLC timeout constraints for incoming vs
|
||||
// outgoing HTLCs.
|
||||
func (c *ChannelArbitrator) checkLocalChainActions(
|
||||
height uint32, trigger transitionTrigger,
|
||||
activeHTLCs map[HtlcSetKey]htlcSet,
|
||||
commitsConfirmed bool) (ChainActionMap, error) {
|
||||
|
||||
// First, we'll check our local chain actions as normal. This will only
|
||||
// examine HTLCs on our local commitment (timeout or settle).
|
||||
localCommitActions, err := c.checkCommitChainActions(
|
||||
height, trigger, activeHTLCs[LocalHtlcSet],
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Next, we'll examine the remote commitment (and maybe a dangling one)
|
||||
// to see if the set difference of our HTLCs is non-empty. If so, then
|
||||
// we may need to cancel back some HTLCs if we decide go to chain.
|
||||
remoteDanglingActions := c.checkRemoteDanglingActions(
|
||||
height, activeHTLCs, commitsConfirmed,
|
||||
)
|
||||
|
||||
// Finally, we'll merge the two set of chain actions.
|
||||
localCommitActions.Merge(remoteDanglingActions)
|
||||
|
||||
return localCommitActions, nil
|
||||
}
|
||||
|
||||
// checkRemoteDanglingActions examines the set of remote commitments for any
|
||||
// HTLCs that are close to timing out. If we find any, then we'll return a set
|
||||
// of chain actions for HTLCs that are on our commitment, but not theirs to
|
||||
// cancel immediately.
|
||||
func (c *ChannelArbitrator) checkRemoteDanglingActions(
|
||||
height uint32, activeHTLCs map[HtlcSetKey]htlcSet,
|
||||
commitsConfirmed bool) ChainActionMap {
|
||||
|
||||
var (
|
||||
pendingRemoteHTLCs []channeldb.HTLC
|
||||
localHTLCs = make(map[uint64]struct{})
|
||||
remoteHTLCs = make(map[uint64]channeldb.HTLC)
|
||||
actionMap = make(ChainActionMap)
|
||||
)
|
||||
|
||||
// First, we'll construct two sets of the outgoing HTLCs: those on our
|
||||
// local commitment, and those that are on the remote commitment(s).
|
||||
for htlcSetKey, htlcs := range activeHTLCs {
|
||||
if htlcSetKey.IsRemote {
|
||||
for _, htlc := range htlcs.outgoingHTLCs {
|
||||
remoteHTLCs[htlc.HtlcIndex] = htlc
|
||||
}
|
||||
} else {
|
||||
for _, htlc := range htlcs.outgoingHTLCs {
|
||||
localHTLCs[htlc.HtlcIndex] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// With both sets constructed, we'll now compute the set difference of
|
||||
// our two sets of HTLCs. This'll give us the HTLCs that exist on the
|
||||
// remote commitment transaction, but not on ours.
|
||||
for htlcIndex, htlc := range remoteHTLCs {
|
||||
if _, ok := localHTLCs[htlcIndex]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
pendingRemoteHTLCs = append(pendingRemoteHTLCs, htlc)
|
||||
}
|
||||
|
||||
// Finally, we'll examine all the pending remote HTLCs for those that
|
||||
// have expired. If we find any, then we'll recommend that they be
|
||||
// failed now so we can free up the incoming HTLC.
|
||||
for _, htlc := range pendingRemoteHTLCs {
|
||||
// We'll now check if we need to go to chain in order to cancel
|
||||
// the incoming HTLC.
|
||||
goToChain := c.shouldGoOnChain(
|
||||
htlc.RefundTimeout, c.cfg.OutgoingBroadcastDelta,
|
||||
height,
|
||||
)
|
||||
|
||||
// If we don't need to go to chain, and no commitments have
|
||||
// been confirmed, then we can move on. Otherwise, if
|
||||
// commitments have been confirmed, then we need to cancel back
|
||||
// *all* of the pending remote HTLCS.
|
||||
if !goToChain && !commitsConfirmed {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Tracef("ChannelArbitrator(%v): immediately failing "+
|
||||
"htlc=%x from remote commitment",
|
||||
c.cfg.ChanPoint, htlc.RHash[:])
|
||||
|
||||
actionMap[HtlcFailNowAction] = append(
|
||||
actionMap[HtlcFailNowAction], htlc,
|
||||
)
|
||||
}
|
||||
|
||||
return actionMap
|
||||
}
|
||||
|
||||
// checkRemoteChainActions examines the two possible remote commitment chains
|
||||
// and returns the set of chain actions we need to carry out if the remote
|
||||
// commitment (non pending) confirms. The pendingConf indicates if the pending
|
||||
// remote commitment confirmed. This is similar to checkCommitChainActions, but
|
||||
// we'll immediately fail any HTLCs on the pending remote commit, but not the
|
||||
// remote commit (or the other way around).
|
||||
func (c *ChannelArbitrator) checkRemoteChainActions(
|
||||
height uint32, trigger transitionTrigger,
|
||||
activeHTLCs map[HtlcSetKey]htlcSet,
|
||||
pendingConf bool) (ChainActionMap, error) {
|
||||
|
||||
// First, we'll examine all the normal chain actions on the remote
|
||||
// commitment that confirmed.
|
||||
confHTLCs := activeHTLCs[RemoteHtlcSet]
|
||||
if pendingConf {
|
||||
confHTLCs = activeHTLCs[RemotePendingHtlcSet]
|
||||
}
|
||||
remoteCommitActions, err := c.checkCommitChainActions(
|
||||
height, trigger, confHTLCs,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// With this actions computed, we'll now check the diff of the HTLCs on
|
||||
// the commitments, and cancel back any that are on the pending but not
|
||||
// the non-pending.
|
||||
remoteDiffActions := c.checkRemoteDiffActions(
|
||||
height, activeHTLCs, pendingConf,
|
||||
)
|
||||
|
||||
// Finally, we'll merge all the chain actions and the final set of
|
||||
// chain actions.
|
||||
remoteCommitActions.Merge(remoteDiffActions)
|
||||
return remoteCommitActions, nil
|
||||
}
|
||||
|
||||
// checkRemoteDiffActions checks the set difference of the HTLCs on the remote
|
||||
// confirmed commit and remote dangling commit for HTLCS that we need to cancel
|
||||
// back. If we find any HTLCs on the remote pending but not the remote, then
|
||||
// we'll mark them to be failed immediately.
|
||||
func (c *ChannelArbitrator) checkRemoteDiffActions(height uint32,
|
||||
activeHTLCs map[HtlcSetKey]htlcSet,
|
||||
pendingConf bool) ChainActionMap {
|
||||
|
||||
// First, we'll partition the HTLCs into those that are present on the
|
||||
// confirmed commitment, and those on the dangling commitment.
|
||||
confHTLCs := activeHTLCs[RemoteHtlcSet]
|
||||
danglingHTLCs := activeHTLCs[RemotePendingHtlcSet]
|
||||
if pendingConf {
|
||||
confHTLCs = activeHTLCs[RemotePendingHtlcSet]
|
||||
danglingHTLCs = activeHTLCs[RemoteHtlcSet]
|
||||
}
|
||||
|
||||
// Next, we'll create a set of all the HTLCs confirmed commitment.
|
||||
remoteHtlcs := make(map[uint64]struct{})
|
||||
for _, htlc := range confHTLCs.outgoingHTLCs {
|
||||
remoteHtlcs[htlc.HtlcIndex] = struct{}{}
|
||||
}
|
||||
|
||||
// With the remote HTLCs assembled, we'll mark any HTLCs only on the
|
||||
// remote dangling commitment to be failed asap.
|
||||
actionMap := make(ChainActionMap)
|
||||
for _, htlc := range danglingHTLCs.outgoingHTLCs {
|
||||
if _, ok := remoteHtlcs[htlc.HtlcIndex]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
actionMap[HtlcFailNowAction] = append(
|
||||
actionMap[HtlcFailNowAction], htlc,
|
||||
)
|
||||
|
||||
log.Tracef("ChannelArbitrator(%v): immediately failing "+
|
||||
"htlc=%x from remote commitment",
|
||||
c.cfg.ChanPoint, htlc.RHash[:])
|
||||
}
|
||||
|
||||
return actionMap
|
||||
}
|
||||
|
||||
// constructChainActions returns the set of actions that should be taken for
|
||||
// confirmed HTLCs at the specified height. Our actions will depend on the set
|
||||
// of HTLCs that were active across all channels at the time of channel
|
||||
// closure.
|
||||
func (c *ChannelArbitrator) constructChainActions(confCommitSet *CommitSet,
|
||||
height uint32, trigger transitionTrigger) (ChainActionMap, error) {
|
||||
|
||||
// If we've reached this point and have not confirmed commitment set,
|
||||
// then this is an older node that had a pending close channel before
|
||||
// the CommitSet was introduced. In this case, we'll just return the
|
||||
// existing ChainActionMap they had on disk.
|
||||
if confCommitSet == nil {
|
||||
return c.log.FetchChainActions()
|
||||
}
|
||||
|
||||
// Otherwise we have the full commitment set written to disk, and can
|
||||
// proceed as normal.
|
||||
htlcSets := confCommitSet.toActiveHTLCSets()
|
||||
switch *confCommitSet.ConfCommitKey {
|
||||
|
||||
// If the local commitment transaction confirmed, then we'll examine
|
||||
// that as well as their commitments to the set of chain actions.
|
||||
case LocalHtlcSet:
|
||||
return c.checkLocalChainActions(
|
||||
height, trigger, htlcSets, true,
|
||||
)
|
||||
|
||||
// If the remote commitment confirmed, then we'll grab all the chain
|
||||
// actions for the remote commit, and check the pending commit for any
|
||||
// HTLCS we need to handle immediately (dust).
|
||||
case RemoteHtlcSet:
|
||||
return c.checkRemoteChainActions(
|
||||
height, trigger, htlcSets, false,
|
||||
)
|
||||
|
||||
// Otherwise, the remote pending commitment confirmed, so we'll examine
|
||||
// the HTLCs on that unrevoked dangling commitment.
|
||||
case RemotePendingHtlcSet:
|
||||
return c.checkRemoteChainActions(
|
||||
height, trigger, htlcSets, true,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unable to locate chain actions")
|
||||
}
|
||||
|
||||
// prepContractResolutions is called either int he case that we decide we need
|
||||
// to go to chain, or the remote party goes to chain. Given a set of actions we
|
||||
// need to take for each HTLC, this method will return a set of contract
|
||||
// resolvers that will resolve the contracts on-chain if needed, and also a set
|
||||
// of packets to send to the htlcswitch in order to ensure all incoming HTLC's
|
||||
// are properly resolved.
|
||||
func (c *ChannelArbitrator) prepContractResolutions(htlcActions ChainActionMap,
|
||||
func (c *ChannelArbitrator) prepContractResolutions(
|
||||
contractResolutions *ContractResolutions, height uint32,
|
||||
) ([]ContractResolver, []ResolutionMsg, error) {
|
||||
trigger transitionTrigger,
|
||||
confCommitSet *CommitSet) ([]ContractResolver, []ResolutionMsg, error) {
|
||||
|
||||
// First, we'll reconstruct a fresh set of chain actions as the set of
|
||||
// actions we need to act on may differ based on if it was our
|
||||
// commitment, or they're commitment that hit the chain.
|
||||
htlcActions, err := c.constructChainActions(
|
||||
confCommitSet, height, trigger,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// There may be a class of HTLC's which we can fail back immediately,
|
||||
// for those we'll prepare a slice of packets to add to our outbox. Any
|
||||
@ -1671,7 +1982,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
|
||||
// Now that a new block has arrived, we'll attempt to
|
||||
// advance our state forward.
|
||||
nextState, _, err := c.advanceState(
|
||||
uint32(bestHeight), chainTrigger,
|
||||
uint32(bestHeight), chainTrigger, nil,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to advance state: %v", err)
|
||||
@ -1703,16 +2014,19 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
|
||||
// A new set of HTLC's has been added or removed from the
|
||||
// commitment transaction. So we'll update our activeHTLCs map
|
||||
// accordingly.
|
||||
case newStateHTLCs := <-c.htlcUpdates:
|
||||
// We'll wipe out our old set of HTLC's and instead
|
||||
// monitor only the HTLC's that are still active on the
|
||||
// current commitment state.
|
||||
c.activeHTLCs = newHtlcSet(newStateHTLCs)
|
||||
case htlcUpdate := <-c.htlcUpdates:
|
||||
// We'll wipe out our old set of HTLC's for each
|
||||
// htlcSetKey type included in this update in order to
|
||||
// only monitor the HTLCs that are still active on this
|
||||
// target commitment.
|
||||
c.activeHTLCs[htlcUpdate.HtlcKey] = newHtlcSet(
|
||||
htlcUpdate.Htlcs,
|
||||
)
|
||||
|
||||
log.Tracef("ChannelArbitrator(%v): fresh set of "+
|
||||
"htlcs=%v", c.cfg.ChanPoint,
|
||||
log.Tracef("ChannelArbitrator(%v): fresh set of htlcs=%v",
|
||||
c.cfg.ChanPoint,
|
||||
newLogClosure(func() string {
|
||||
return spew.Sdump(c.activeHTLCs)
|
||||
return spew.Sdump(htlcUpdate)
|
||||
}),
|
||||
)
|
||||
|
||||
@ -1734,7 +2048,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
|
||||
// We'll now advance our state machine until it reaches
|
||||
// a terminal state, and the channel is marked resolved.
|
||||
_, _, err = c.advanceState(
|
||||
closeInfo.CloseHeight, coopCloseTrigger,
|
||||
closeInfo.CloseHeight, coopCloseTrigger, nil,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to advance state: %v", err)
|
||||
@ -1763,13 +2077,22 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
|
||||
// When processing a unilateral close event, we'll
|
||||
// transition to the ContractClosed state. We'll log
|
||||
// out the set of resolutions such that they are
|
||||
// available to fetch in that state.
|
||||
// available to fetch in that state, we'll also write
|
||||
// the commit set so we can reconstruct our chain
|
||||
// actions on restart.
|
||||
err := c.log.LogContractResolutions(contractRes)
|
||||
if err != nil {
|
||||
log.Errorf("unable to write resolutions: %v",
|
||||
err)
|
||||
return
|
||||
}
|
||||
err = c.log.InsertConfirmedCommitSet(
|
||||
&closeInfo.CommitSet,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to write commit set: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// After the set of resolutions are successfully
|
||||
// logged, we can safely close the channel. After this
|
||||
@ -1794,7 +2117,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
|
||||
// a terminal state.
|
||||
_, _, err = c.advanceState(
|
||||
uint32(closeInfo.SpendingHeight),
|
||||
localCloseTrigger,
|
||||
localCloseTrigger, &closeInfo.CommitSet,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to advance state: %v", err)
|
||||
@ -1804,7 +2127,6 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
|
||||
// We'll examine our state to determine if we need to act at
|
||||
// all.
|
||||
case uniClosure := <-c.cfg.ChainEvents.RemoteUnilateralClosure:
|
||||
|
||||
log.Infof("ChannelArbitrator(%v): remote party has "+
|
||||
"closed channel out on-chain", c.cfg.ChanPoint)
|
||||
|
||||
@ -1817,22 +2139,25 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
|
||||
HtlcResolutions: *uniClosure.HtlcResolutions,
|
||||
}
|
||||
|
||||
// As we're now acting upon an event triggered by the
|
||||
// broadcast of the remote commitment transaction,
|
||||
// we'll swap out our active HTLC set with the set
|
||||
// present on their commitment.
|
||||
c.activeHTLCs = newHtlcSet(uniClosure.RemoteCommit.Htlcs)
|
||||
|
||||
// When processing a unilateral close event, we'll
|
||||
// transition to the ContractClosed state. We'll log
|
||||
// out the set of resolutions such that they are
|
||||
// available to fetch in that state.
|
||||
// available to fetch in that state, we'll also write
|
||||
// the commit set so we can reconstruct our chain
|
||||
// actions on restart.
|
||||
err := c.log.LogContractResolutions(contractRes)
|
||||
if err != nil {
|
||||
log.Errorf("unable to write resolutions: %v",
|
||||
err)
|
||||
return
|
||||
}
|
||||
err = c.log.InsertConfirmedCommitSet(
|
||||
&uniClosure.CommitSet,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to write commit set: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// After the set of resolutions are successfully
|
||||
// logged, we can safely close the channel. After this
|
||||
@ -1856,7 +2181,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
|
||||
// a terminal state.
|
||||
_, _, err = c.advanceState(
|
||||
uint32(uniClosure.SpendingHeight),
|
||||
remoteCloseTrigger,
|
||||
remoteCloseTrigger, &uniClosure.CommitSet,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to advance state: %v", err)
|
||||
@ -1870,7 +2195,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
|
||||
"fully resolved!", c.cfg.ChanPoint)
|
||||
|
||||
nextState, _, err := c.advanceState(
|
||||
uint32(bestHeight), chainTrigger,
|
||||
uint32(bestHeight), chainTrigger, nil,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to advance state: %v", err)
|
||||
@ -1904,7 +2229,7 @@ func (c *ChannelArbitrator) channelAttendant(bestHeight int32) {
|
||||
}
|
||||
|
||||
nextState, closeTx, err := c.advanceState(
|
||||
uint32(bestHeight), userTrigger,
|
||||
uint32(bestHeight), userTrigger, nil,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("unable to advance state: %v", err)
|
||||
|
@ -26,6 +26,8 @@ type mockArbitratorLog struct {
|
||||
chainActions ChainActionMap
|
||||
resolvers map[ContractResolver]struct{}
|
||||
|
||||
commitSet *CommitSet
|
||||
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
@ -108,14 +110,17 @@ func (b *mockArbitratorLog) FetchContractResolutions() (*ContractResolutions, er
|
||||
return b.resolutions, nil
|
||||
}
|
||||
|
||||
func (b *mockArbitratorLog) LogChainActions(actions ChainActionMap) error {
|
||||
b.chainActions = actions
|
||||
func (b *mockArbitratorLog) FetchChainActions() (ChainActionMap, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (b *mockArbitratorLog) InsertConfirmedCommitSet(c *CommitSet) error {
|
||||
b.commitSet = c
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *mockArbitratorLog) FetchChainActions() (ChainActionMap, error) {
|
||||
actionsMap := b.chainActions
|
||||
return actionsMap, nil
|
||||
func (b *mockArbitratorLog) FetchConfirmedCommitSet() (*CommitSet, error) {
|
||||
return b.commitSet, nil
|
||||
}
|
||||
|
||||
func (b *mockArbitratorLog) WipeHistory() error {
|
||||
@ -144,27 +149,33 @@ func (*mockChainIO) GetBlock(blockHash *chainhash.Hash) (*wire.MsgBlock, error)
|
||||
}
|
||||
|
||||
func createTestChannelArbitrator(log ArbitratorLog) (*ChannelArbitrator,
|
||||
chan struct{}, error) {
|
||||
chan struct{}, chan []ResolutionMsg, chan *chainntnfs.BlockEpoch, error) {
|
||||
|
||||
blockEpochs := make(chan *chainntnfs.BlockEpoch)
|
||||
blockEpoch := &chainntnfs.BlockEpochEvent{
|
||||
Epochs: blockEpochs,
|
||||
Cancel: func() {},
|
||||
}
|
||||
|
||||
chanPoint := wire.OutPoint{}
|
||||
shortChanID := lnwire.ShortChannelID{}
|
||||
chanEvents := &ChainEventSubscription{
|
||||
RemoteUnilateralClosure: make(chan *lnwallet.UnilateralCloseSummary, 1),
|
||||
RemoteUnilateralClosure: make(chan *RemoteUnilateralCloseInfo, 1),
|
||||
LocalUnilateralClosure: make(chan *LocalUnilateralCloseInfo, 1),
|
||||
CooperativeClosure: make(chan *CooperativeCloseInfo, 1),
|
||||
ContractBreach: make(chan *lnwallet.BreachRetribution, 1),
|
||||
}
|
||||
|
||||
resolutionChan := make(chan []ResolutionMsg, 1)
|
||||
|
||||
chainIO := &mockChainIO{}
|
||||
chainArbCfg := ChainArbitratorConfig{
|
||||
ChainIO: chainIO,
|
||||
PublishTx: func(*wire.MsgTx) error {
|
||||
return nil
|
||||
},
|
||||
DeliverResolutionMsg: func(...ResolutionMsg) error {
|
||||
DeliverResolutionMsg: func(msgs ...ResolutionMsg) error {
|
||||
resolutionChan <- msgs
|
||||
return nil
|
||||
},
|
||||
OutgoingBroadcastDelta: 5,
|
||||
@ -213,7 +224,9 @@ func createTestChannelArbitrator(log ArbitratorLog) (*ChannelArbitrator,
|
||||
ChainEvents: chanEvents,
|
||||
}
|
||||
|
||||
return NewChannelArbitrator(arbCfg, nil, log), resolvedChan, nil
|
||||
htlcSets := make(map[HtlcSetKey]htlcSet)
|
||||
return NewChannelArbitrator(arbCfg, htlcSets, log), resolvedChan,
|
||||
resolutionChan, blockEpochs, nil
|
||||
}
|
||||
|
||||
// assertState checks that the ChannelArbitrator is in the state we expect it
|
||||
@ -233,7 +246,7 @@ func TestChannelArbitratorCooperativeClose(t *testing.T) {
|
||||
newStates: make(chan ArbitratorState, 5),
|
||||
}
|
||||
|
||||
chanArb, resolved, err := createTestChannelArbitrator(log)
|
||||
chanArb, resolved, _, _, err := createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -306,7 +319,7 @@ func TestChannelArbitratorRemoteForceClose(t *testing.T) {
|
||||
newStates: make(chan ArbitratorState, 5),
|
||||
}
|
||||
|
||||
chanArb, resolved, err := createTestChannelArbitrator(log)
|
||||
chanArb, resolved, _, _, err := createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -328,7 +341,13 @@ func TestChannelArbitratorRemoteForceClose(t *testing.T) {
|
||||
SpendDetail: commitSpend,
|
||||
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
||||
}
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
||||
UnilateralCloseSummary: uniClose,
|
||||
CommitSet: CommitSet{
|
||||
ConfCommitKey: &RemoteHtlcSet,
|
||||
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
||||
},
|
||||
}
|
||||
|
||||
// It should transition StateDefault -> StateContractClosed ->
|
||||
// StateFullyResolved.
|
||||
@ -354,7 +373,7 @@ func TestChannelArbitratorLocalForceClose(t *testing.T) {
|
||||
newStates: make(chan ArbitratorState, 5),
|
||||
}
|
||||
|
||||
chanArb, resolved, err := createTestChannelArbitrator(log)
|
||||
chanArb, resolved, _, _, err := createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -430,12 +449,12 @@ func TestChannelArbitratorLocalForceClose(t *testing.T) {
|
||||
|
||||
// Now notify about the local force close getting confirmed.
|
||||
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
|
||||
&chainntnfs.SpendDetail{},
|
||||
&lnwallet.LocalForceCloseSummary{
|
||||
SpendDetail: &chainntnfs.SpendDetail{},
|
||||
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
|
||||
CloseTx: &wire.MsgTx{},
|
||||
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
||||
},
|
||||
&channeldb.ChannelCloseSummary{},
|
||||
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
||||
}
|
||||
|
||||
// It should transition StateContractClosed -> StateFullyResolved.
|
||||
@ -461,7 +480,9 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
||||
resolvers: make(map[ContractResolver]struct{}),
|
||||
}
|
||||
|
||||
chanArb, resolved, err := createTestChannelArbitrator(arbLog)
|
||||
chanArb, resolved, resolutions, _, err := createTestChannelArbitrator(
|
||||
arbLog,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -483,7 +504,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
||||
defer chanArb.Stop()
|
||||
|
||||
// Create htlcUpdates channel.
|
||||
htlcUpdates := make(chan []channeldb.HTLC)
|
||||
htlcUpdates := make(chan *ContractUpdate)
|
||||
|
||||
signals := &ContractSignals{
|
||||
HtlcUpdates: htlcUpdates,
|
||||
@ -492,14 +513,16 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
||||
chanArb.UpdateContractSignals(signals)
|
||||
|
||||
// Add HTLC to channel arbitrator.
|
||||
htlcIndex := uint64(99)
|
||||
htlc := channeldb.HTLC{
|
||||
Incoming: false,
|
||||
Amt: 10000,
|
||||
HtlcIndex: 0,
|
||||
HtlcIndex: htlcIndex,
|
||||
}
|
||||
|
||||
htlcUpdates <- []channeldb.HTLC{
|
||||
htlc,
|
||||
htlcUpdates <- &ContractUpdate{
|
||||
HtlcKey: LocalHtlcSet,
|
||||
Htlcs: []channeldb.HTLC{htlc},
|
||||
}
|
||||
|
||||
errChan := make(chan error, 1)
|
||||
@ -572,8 +595,8 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
||||
}
|
||||
|
||||
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
|
||||
&chainntnfs.SpendDetail{},
|
||||
&lnwallet.LocalForceCloseSummary{
|
||||
SpendDetail: &chainntnfs.SpendDetail{},
|
||||
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
|
||||
CloseTx: closeTx,
|
||||
HtlcResolutions: &lnwallet.HtlcResolutions{
|
||||
OutgoingHTLCs: []lnwallet.OutgoingHtlcResolution{
|
||||
@ -581,7 +604,13 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
&channeldb.ChannelCloseSummary{},
|
||||
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
||||
CommitSet: CommitSet{
|
||||
ConfCommitKey: &LocalHtlcSet,
|
||||
HtlcSets: map[HtlcSetKey][]channeldb.HTLC{
|
||||
LocalHtlcSet: {htlc},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
assertStateTransitions(
|
||||
@ -613,6 +642,22 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
||||
// spent.
|
||||
notifier.spendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx}
|
||||
|
||||
// Finally, we should also receive a resolution message instructing the
|
||||
// switch to cancel back the HTLC.
|
||||
select {
|
||||
case msgs := <-resolutions:
|
||||
if len(msgs) != 1 {
|
||||
t.Fatalf("expected 1 message, instead got %v", len(msgs))
|
||||
}
|
||||
|
||||
if msgs[0].HtlcIndex != htlcIndex {
|
||||
t.Fatalf("wrong htlc index: expected %v, got %v",
|
||||
htlcIndex, msgs[0].HtlcIndex)
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("resolution msgs not sent")
|
||||
}
|
||||
|
||||
// As this is our own commitment transaction, the HTLC will go through
|
||||
// to the second level. Channel arbitrator should still not be marked
|
||||
// as resolved.
|
||||
@ -627,7 +672,6 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) {
|
||||
|
||||
// At this point channel should be marked as resolved.
|
||||
assertStateTransitions(t, arbLog.newStates, StateFullyResolved)
|
||||
|
||||
select {
|
||||
case <-resolved:
|
||||
case <-time.After(5 * time.Second):
|
||||
@ -644,7 +688,7 @@ func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) {
|
||||
newStates: make(chan ArbitratorState, 5),
|
||||
}
|
||||
|
||||
chanArb, resolved, err := createTestChannelArbitrator(log)
|
||||
chanArb, resolved, _, _, err := createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -726,7 +770,9 @@ func TestChannelArbitratorLocalForceCloseRemoteConfirmed(t *testing.T) {
|
||||
SpendDetail: commitSpend,
|
||||
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
||||
}
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
||||
UnilateralCloseSummary: uniClose,
|
||||
}
|
||||
|
||||
// It should transition StateContractClosed -> StateFullyResolved.
|
||||
assertStateTransitions(t, log.newStates, StateContractClosed,
|
||||
@ -751,7 +797,7 @@ func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) {
|
||||
newStates: make(chan ArbitratorState, 5),
|
||||
}
|
||||
|
||||
chanArb, resolved, err := createTestChannelArbitrator(log)
|
||||
chanArb, resolved, _, _, err := createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -832,7 +878,9 @@ func TestChannelArbitratorLocalForceDoubleSpend(t *testing.T) {
|
||||
SpendDetail: commitSpend,
|
||||
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
||||
}
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
||||
UnilateralCloseSummary: uniClose,
|
||||
}
|
||||
|
||||
// It should transition StateContractClosed -> StateFullyResolved.
|
||||
assertStateTransitions(t, log.newStates, StateContractClosed,
|
||||
@ -857,7 +905,7 @@ func TestChannelArbitratorPersistence(t *testing.T) {
|
||||
failLog: true,
|
||||
}
|
||||
|
||||
chanArb, resolved, err := createTestChannelArbitrator(log)
|
||||
chanArb, resolved, _, _, err := createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -878,7 +926,9 @@ func TestChannelArbitratorPersistence(t *testing.T) {
|
||||
SpendDetail: commitSpend,
|
||||
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
||||
}
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
||||
UnilateralCloseSummary: uniClose,
|
||||
}
|
||||
|
||||
// Since writing the resolutions fail, the arbitrator should not
|
||||
// advance to the next state.
|
||||
@ -889,7 +939,7 @@ func TestChannelArbitratorPersistence(t *testing.T) {
|
||||
chanArb.Stop()
|
||||
|
||||
// Create a new arbitrator with the same log.
|
||||
chanArb, resolved, err = createTestChannelArbitrator(log)
|
||||
chanArb, resolved, _, _, err = createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -909,7 +959,9 @@ func TestChannelArbitratorPersistence(t *testing.T) {
|
||||
}
|
||||
|
||||
// Send a new remote force close event.
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
||||
UnilateralCloseSummary: uniClose,
|
||||
}
|
||||
|
||||
// Since closing the channel failed, the arbitrator should stay in the
|
||||
// default state.
|
||||
@ -920,7 +972,7 @@ func TestChannelArbitratorPersistence(t *testing.T) {
|
||||
chanArb.Stop()
|
||||
|
||||
// Create yet another arbitrator with the same log.
|
||||
chanArb, resolved, err = createTestChannelArbitrator(log)
|
||||
chanArb, resolved, _, _, err = createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -934,7 +986,9 @@ func TestChannelArbitratorPersistence(t *testing.T) {
|
||||
|
||||
// Now make fetching the resolutions fail.
|
||||
log.failFetch = fmt.Errorf("intentional fetch failure")
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
||||
UnilateralCloseSummary: uniClose,
|
||||
}
|
||||
|
||||
// Since logging the resolutions and closing the channel now succeeds,
|
||||
// it should advance to StateContractClosed.
|
||||
@ -952,7 +1006,7 @@ func TestChannelArbitratorPersistence(t *testing.T) {
|
||||
|
||||
// Create a new arbitrator, and now make fetching resolutions succeed.
|
||||
log.failFetch = nil
|
||||
chanArb, resolved, err = createTestChannelArbitrator(log)
|
||||
chanArb, resolved, _, _, err = createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -1015,7 +1069,9 @@ func TestChannelArbitratorCommitFailure(t *testing.T) {
|
||||
SpendDetail: commitSpend,
|
||||
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
||||
}
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- uniClose
|
||||
chanArb.cfg.ChainEvents.RemoteUnilateralClosure <- &RemoteUnilateralCloseInfo{
|
||||
UnilateralCloseSummary: uniClose,
|
||||
}
|
||||
},
|
||||
expectedStates: []ArbitratorState{StateContractClosed, StateFullyResolved},
|
||||
},
|
||||
@ -1023,12 +1079,12 @@ func TestChannelArbitratorCommitFailure(t *testing.T) {
|
||||
closeType: channeldb.LocalForceClose,
|
||||
sendEvent: func(chanArb *ChannelArbitrator) {
|
||||
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- &LocalUnilateralCloseInfo{
|
||||
&chainntnfs.SpendDetail{},
|
||||
&lnwallet.LocalForceCloseSummary{
|
||||
SpendDetail: &chainntnfs.SpendDetail{},
|
||||
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
|
||||
CloseTx: &wire.MsgTx{},
|
||||
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
||||
},
|
||||
&channeldb.ChannelCloseSummary{},
|
||||
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
||||
}
|
||||
},
|
||||
expectedStates: []ArbitratorState{StateContractClosed, StateFullyResolved},
|
||||
@ -1046,7 +1102,7 @@ func TestChannelArbitratorCommitFailure(t *testing.T) {
|
||||
failCommitState: test.expectedStates[0],
|
||||
}
|
||||
|
||||
chanArb, resolved, err := createTestChannelArbitrator(log)
|
||||
chanArb, resolved, _, _, err := createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -1086,7 +1142,7 @@ func TestChannelArbitratorCommitFailure(t *testing.T) {
|
||||
|
||||
// Start the arbitrator again, with IsPendingClose reporting
|
||||
// the channel closed in the database.
|
||||
chanArb, resolved, err = createTestChannelArbitrator(log)
|
||||
chanArb, resolved, _, _, err = createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -1132,7 +1188,7 @@ func TestChannelArbitratorEmptyResolutions(t *testing.T) {
|
||||
failFetch: errNoResolutions,
|
||||
}
|
||||
|
||||
chanArb, _, err := createTestChannelArbitrator(log)
|
||||
chanArb, _, _, _, err := createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -1170,7 +1226,7 @@ func TestChannelArbitratorAlreadyForceClosed(t *testing.T) {
|
||||
log := &mockArbitratorLog{
|
||||
state: StateCommitmentBroadcasted,
|
||||
}
|
||||
chanArb, _, err := createTestChannelArbitrator(log)
|
||||
chanArb, _, _, _, err := createTestChannelArbitrator(log)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
@ -1192,8 +1248,8 @@ func TestChannelArbitratorAlreadyForceClosed(t *testing.T) {
|
||||
case <-chanArb.quit:
|
||||
}
|
||||
|
||||
// Finally, we should ensure that we are not able to do so by seeing the
|
||||
// expected errAlreadyForceClosed error.
|
||||
// Finally, we should ensure that we are not able to do so by seeing
|
||||
// the expected errAlreadyForceClosed error.
|
||||
select {
|
||||
case err = <-errChan:
|
||||
if err != errAlreadyForceClosed {
|
||||
@ -1203,3 +1259,229 @@ func TestChannelArbitratorAlreadyForceClosed(t *testing.T) {
|
||||
t.Fatal("expected to receive error response")
|
||||
}
|
||||
}
|
||||
|
||||
// TestChannelArbitratorDanglingCommitForceClose tests that if there're HTLCs
|
||||
// on the remote party's commitment, but not ours, and they're about to time
|
||||
// out, then we'll go on chain so we can cancel back the HTLCs on the incoming
|
||||
// commitment.
|
||||
func TestChannelArbitratorDanglingCommitForceClose(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
type testCase struct {
|
||||
htlcExpired bool
|
||||
remotePendingHTLC bool
|
||||
confCommit HtlcSetKey
|
||||
}
|
||||
var testCases []testCase
|
||||
|
||||
testOptions := []bool{true, false}
|
||||
confOptions := []HtlcSetKey{
|
||||
LocalHtlcSet, RemoteHtlcSet, RemotePendingHtlcSet,
|
||||
}
|
||||
for _, htlcExpired := range testOptions {
|
||||
for _, remotePendingHTLC := range testOptions {
|
||||
for _, commitConf := range confOptions {
|
||||
switch {
|
||||
// If the HTLC is on the remote commitment, and
|
||||
// that one confirms, then there's no special
|
||||
// behavior, we should play all the HTLCs on
|
||||
// that remote commitment as normal.
|
||||
case !remotePendingHTLC && commitConf == RemoteHtlcSet:
|
||||
fallthrough
|
||||
|
||||
// If the HTLC is on the remote pending, and
|
||||
// that confirms, then we don't have any
|
||||
// special actions.
|
||||
case remotePendingHTLC && commitConf == RemotePendingHtlcSet:
|
||||
continue
|
||||
}
|
||||
|
||||
testCases = append(testCases, testCase{
|
||||
htlcExpired: htlcExpired,
|
||||
remotePendingHTLC: remotePendingHTLC,
|
||||
confCommit: commitConf,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
testName := fmt.Sprintf("testCase: htlcExpired=%v,"+
|
||||
"remotePendingHTLC=%v,remotePendingCommitConf=%v",
|
||||
testCase.htlcExpired, testCase.remotePendingHTLC,
|
||||
testCase.confCommit)
|
||||
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
arbLog := &mockArbitratorLog{
|
||||
state: StateDefault,
|
||||
newStates: make(chan ArbitratorState, 5),
|
||||
resolvers: make(map[ContractResolver]struct{}),
|
||||
}
|
||||
|
||||
chanArb, _, resolutions, blockEpochs, err := createTestChannelArbitrator(
|
||||
arbLog,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create ChannelArbitrator: %v", err)
|
||||
}
|
||||
if err := chanArb.Start(); err != nil {
|
||||
t.Fatalf("unable to start ChannelArbitrator: %v", err)
|
||||
}
|
||||
defer chanArb.Stop()
|
||||
|
||||
// Now that our channel arb has started, we'll set up
|
||||
// its contract signals channel so we can send it
|
||||
// various HTLC updates for this test.
|
||||
htlcUpdates := make(chan *ContractUpdate)
|
||||
signals := &ContractSignals{
|
||||
HtlcUpdates: htlcUpdates,
|
||||
ShortChanID: lnwire.ShortChannelID{},
|
||||
}
|
||||
chanArb.UpdateContractSignals(signals)
|
||||
|
||||
htlcKey := RemoteHtlcSet
|
||||
if testCase.remotePendingHTLC {
|
||||
htlcKey = RemotePendingHtlcSet
|
||||
}
|
||||
|
||||
// Next, we'll send it a new HTLC that is set to expire
|
||||
// in 10 blocks, this HTLC will only appear on the
|
||||
// commitment transaction of the _remote_ party.
|
||||
htlcIndex := uint64(99)
|
||||
htlcExpiry := uint32(10)
|
||||
danglingHTLC := channeldb.HTLC{
|
||||
Incoming: false,
|
||||
Amt: 10000,
|
||||
HtlcIndex: htlcIndex,
|
||||
RefundTimeout: htlcExpiry,
|
||||
}
|
||||
htlcUpdates <- &ContractUpdate{
|
||||
HtlcKey: htlcKey,
|
||||
Htlcs: []channeldb.HTLC{danglingHTLC},
|
||||
}
|
||||
|
||||
// At this point, we now have a split commitment state
|
||||
// from the PoV of the channel arb. There's now an HTLC
|
||||
// that only exists on the commitment transaction of
|
||||
// the remote party.
|
||||
errChan := make(chan error, 1)
|
||||
respChan := make(chan *wire.MsgTx, 1)
|
||||
switch {
|
||||
// If we want an HTLC expiration trigger, then We'll
|
||||
// now mine a block (height 5), which is 5 blocks away
|
||||
// (our grace delta) from the expiry of that HTLC.
|
||||
case testCase.htlcExpired:
|
||||
blockEpochs <- &chainntnfs.BlockEpoch{Height: 5}
|
||||
|
||||
// Otherwise, we'll just trigger a regular force close
|
||||
// request.
|
||||
case !testCase.htlcExpired:
|
||||
chanArb.forceCloseReqs <- &forceCloseReq{
|
||||
errResp: errChan,
|
||||
closeTx: respChan,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// At this point, the resolver should now have
|
||||
// determined that it needs to go to chain in order to
|
||||
// block off the redemption path so it can cancel the
|
||||
// incoming HTLC.
|
||||
assertStateTransitions(
|
||||
t, arbLog.newStates, StateBroadcastCommit,
|
||||
StateCommitmentBroadcasted,
|
||||
)
|
||||
|
||||
// Next we'll craft a fake commitment transaction to
|
||||
// send to signal that the channel has closed out on
|
||||
// chain.
|
||||
closeTx := &wire.MsgTx{
|
||||
TxIn: []*wire.TxIn{
|
||||
{
|
||||
PreviousOutPoint: wire.OutPoint{},
|
||||
Witness: [][]byte{
|
||||
{0x9},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// We'll now signal to the channel arb that the HTLC
|
||||
// has fully closed on chain. Our local commit set
|
||||
// shows now HTLC on our commitment, but one on the
|
||||
// remote commitment. This should result in the HTLC
|
||||
// being canalled back. Also note that there're no HTLC
|
||||
// resolutions sent since we have none on our
|
||||
// commitment transaction.
|
||||
uniCloseInfo := &LocalUnilateralCloseInfo{
|
||||
SpendDetail: &chainntnfs.SpendDetail{},
|
||||
LocalForceCloseSummary: &lnwallet.LocalForceCloseSummary{
|
||||
CloseTx: closeTx,
|
||||
HtlcResolutions: &lnwallet.HtlcResolutions{},
|
||||
},
|
||||
ChannelCloseSummary: &channeldb.ChannelCloseSummary{},
|
||||
CommitSet: CommitSet{
|
||||
ConfCommitKey: &testCase.confCommit,
|
||||
HtlcSets: make(map[HtlcSetKey][]channeldb.HTLC),
|
||||
},
|
||||
}
|
||||
|
||||
// If the HTLC was meant to expire, then we'll mark the
|
||||
// closing transaction at the proper expiry height
|
||||
// since our comparison "need to timeout" comparison is
|
||||
// based on the confirmation height.
|
||||
if testCase.htlcExpired {
|
||||
uniCloseInfo.SpendDetail.SpendingHeight = 5
|
||||
}
|
||||
|
||||
// Depending on if we're testing the remote pending
|
||||
// commitment or not, we'll populate either a fake
|
||||
// dangling remote commitment, or a regular locked in
|
||||
// one.
|
||||
htlcs := []channeldb.HTLC{danglingHTLC}
|
||||
if testCase.remotePendingHTLC {
|
||||
uniCloseInfo.CommitSet.HtlcSets[RemotePendingHtlcSet] = htlcs
|
||||
} else {
|
||||
uniCloseInfo.CommitSet.HtlcSets[RemoteHtlcSet] = htlcs
|
||||
}
|
||||
|
||||
chanArb.cfg.ChainEvents.LocalUnilateralClosure <- uniCloseInfo
|
||||
|
||||
// The channel arb should now transition to waiting
|
||||
// until the HTLCs have been fully resolved.
|
||||
assertStateTransitions(
|
||||
t, arbLog.newStates, StateContractClosed,
|
||||
StateWaitingFullResolution,
|
||||
)
|
||||
|
||||
// Now that we've sent this signal, we should have that
|
||||
// HTLC be cancelled back immediately.
|
||||
select {
|
||||
case msgs := <-resolutions:
|
||||
if len(msgs) != 1 {
|
||||
t.Fatalf("expected 1 message, "+
|
||||
"instead got %v", len(msgs))
|
||||
}
|
||||
|
||||
if msgs[0].HtlcIndex != htlcIndex {
|
||||
t.Fatalf("wrong htlc index: expected %v, got %v",
|
||||
htlcIndex, msgs[0].HtlcIndex)
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Fatalf("resolution msgs not sent")
|
||||
}
|
||||
|
||||
// There's no contract to send a fully resolve message,
|
||||
// so instead, we'll mine another block which'll cause
|
||||
// it to re-examine its state and realize there're no
|
||||
// more HTLCs.
|
||||
blockEpochs <- &chainntnfs.BlockEpoch{Height: 6}
|
||||
assertStateTransitions(
|
||||
t, arbLog.newStates, StateFullyResolved,
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -320,7 +320,7 @@ type channelLink struct {
|
||||
|
||||
// htlcUpdates is a channel that we'll use to update outside
|
||||
// sub-systems with the latest set of active HTLC's on our channel.
|
||||
htlcUpdates chan []channeldb.HTLC
|
||||
htlcUpdates chan *contractcourt.ContractUpdate
|
||||
|
||||
// logCommitTimer is a timer which is sent upon if we go an interval
|
||||
// without receiving/sending a commitment update. It's role is to
|
||||
@ -372,7 +372,7 @@ func NewChannelLink(cfg ChannelLinkConfig,
|
||||
// TODO(roasbeef): just do reserve here?
|
||||
logCommitTimer: time.NewTimer(300 * time.Millisecond),
|
||||
overflowQueue: newPacketQueue(input.MaxHTLCNumber / 2),
|
||||
htlcUpdates: make(chan []channeldb.HTLC),
|
||||
htlcUpdates: make(chan *contractcourt.ContractUpdate),
|
||||
hodlMap: make(map[lntypes.Hash][]hodlHtlc),
|
||||
hodlQueue: queue.NewConcurrentQueue(10),
|
||||
quit: make(chan struct{}),
|
||||
@ -1721,7 +1721,10 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
|
||||
// of HTLC's on our commitment, so we'll send them over our
|
||||
// HTLC update channel so any callers can be notified.
|
||||
select {
|
||||
case l.htlcUpdates <- currentHtlcs:
|
||||
case l.htlcUpdates <- &contractcourt.ContractUpdate{
|
||||
HtlcKey: contractcourt.LocalHtlcSet,
|
||||
Htlcs: currentHtlcs,
|
||||
}:
|
||||
case <-l.quit:
|
||||
return
|
||||
}
|
||||
@ -1761,7 +1764,9 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
|
||||
// We've received a revocation from the remote chain, if valid,
|
||||
// this moves the remote chain forward, and expands our
|
||||
// revocation window.
|
||||
fwdPkg, adds, settleFails, err := l.channel.ReceiveRevocation(msg)
|
||||
fwdPkg, adds, settleFails, remoteHTLCs, err := l.channel.ReceiveRevocation(
|
||||
msg,
|
||||
)
|
||||
if err != nil {
|
||||
// TODO(halseth): force close?
|
||||
l.fail(LinkFailureError{code: ErrInvalidRevocation},
|
||||
@ -1769,6 +1774,18 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
|
||||
return
|
||||
}
|
||||
|
||||
// The remote party now has a new primary commitment, so we'll
|
||||
// update the contract court to be aware of this new set (the
|
||||
// prior old remote pending).
|
||||
select {
|
||||
case l.htlcUpdates <- &contractcourt.ContractUpdate{
|
||||
HtlcKey: contractcourt.RemoteHtlcSet,
|
||||
Htlcs: remoteHTLCs,
|
||||
}:
|
||||
case <-l.quit:
|
||||
return
|
||||
}
|
||||
|
||||
l.processRemoteSettleFails(fwdPkg, settleFails)
|
||||
needUpdate := l.processRemoteAdds(fwdPkg, adds)
|
||||
|
||||
@ -1894,7 +1911,7 @@ func (l *channelLink) updateCommitTx() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
theirCommitSig, htlcSigs, err := l.channel.SignNextCommitment()
|
||||
theirCommitSig, htlcSigs, pendingHTLCs, err := l.channel.SignNextCommitment()
|
||||
if err == lnwallet.ErrNoWindow {
|
||||
l.tracef("revocation window exhausted, unable to send: %v, "+
|
||||
"dangling_opens=%v, dangling_closes%v",
|
||||
@ -1910,6 +1927,18 @@ func (l *channelLink) updateCommitTx() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// The remote party now has a new pending commitment, so we'll update
|
||||
// the contract court to be aware of this new set (the prior old remote
|
||||
// pending).
|
||||
select {
|
||||
case l.htlcUpdates <- &contractcourt.ContractUpdate{
|
||||
HtlcKey: contractcourt.RemotePendingHtlcSet,
|
||||
Htlcs: pendingHTLCs,
|
||||
}:
|
||||
case <-l.quit:
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := l.ackDownStreamPackets(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1761,7 +1761,7 @@ func handleStateUpdate(link *channelLink,
|
||||
}
|
||||
link.HandleChannelUpdate(remoteRev)
|
||||
|
||||
remoteSig, remoteHtlcSigs, err := remoteChannel.SignNextCommitment()
|
||||
remoteSig, remoteHtlcSigs, _, err := remoteChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1782,7 +1782,7 @@ func handleStateUpdate(link *channelLink,
|
||||
if !ok {
|
||||
return fmt.Errorf("expected RevokeAndAck got %T", msg)
|
||||
}
|
||||
_, _, _, err = remoteChannel.ReceiveRevocation(revoke)
|
||||
_, _, _, _, err = remoteChannel.ReceiveRevocation(revoke)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to receive "+
|
||||
"revocation: %v", err)
|
||||
@ -1812,7 +1812,7 @@ func updateState(batchTick chan time.Time, link *channelLink,
|
||||
|
||||
// The remote is triggering the state update, emulate this by
|
||||
// signing and sending CommitSig to the link.
|
||||
remoteSig, remoteHtlcSigs, err := remoteChannel.SignNextCommitment()
|
||||
remoteSig, remoteHtlcSigs, _, err := remoteChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1836,7 +1836,7 @@ func updateState(batchTick chan time.Time, link *channelLink,
|
||||
return fmt.Errorf("expected RevokeAndAck got %T",
|
||||
msg)
|
||||
}
|
||||
_, _, _, err = remoteChannel.ReceiveRevocation(revoke)
|
||||
_, _, _, _, err = remoteChannel.ReceiveRevocation(revoke)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to receive "+
|
||||
"revocation: %v", err)
|
||||
@ -4397,7 +4397,7 @@ func sendCommitSigBobToAlice(t *testing.T, aliceLink ChannelLink,
|
||||
|
||||
t.Helper()
|
||||
|
||||
sig, htlcSigs, err := bobChannel.SignNextCommitment()
|
||||
sig, htlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("error signing commitment: %v", err)
|
||||
}
|
||||
@ -4435,7 +4435,7 @@ func receiveRevAndAckAliceToBob(t *testing.T, aliceMsgs chan lnwire.Message,
|
||||
t.Fatalf("expected RevokeAndAck, got %T", msg)
|
||||
}
|
||||
|
||||
_, _, _, err := bobChannel.ReceiveRevocation(rev)
|
||||
_, _, _, _, err := bobChannel.ReceiveRevocation(rev)
|
||||
if err != nil {
|
||||
t.Fatalf("bob failed receiving revocation: %v", err)
|
||||
}
|
||||
@ -5326,7 +5326,7 @@ func TestChannelLinkFail(t *testing.T) {
|
||||
|
||||
// Sign a commitment that will include
|
||||
// signature for the HTLC just sent.
|
||||
sig, htlcSigs, err :=
|
||||
sig, htlcSigs, _, err :=
|
||||
remoteChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("error signing commitment: %v",
|
||||
@ -5358,7 +5358,7 @@ func TestChannelLinkFail(t *testing.T) {
|
||||
|
||||
// Sign a commitment that will include
|
||||
// signature for the HTLC just sent.
|
||||
sig, htlcSigs, err :=
|
||||
sig, htlcSigs, _, err :=
|
||||
remoteChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("error signing commitment: %v",
|
||||
|
@ -3035,8 +3035,10 @@ func (lc *LightningChannel) createCommitDiff(
|
||||
// The first return parameter is the signature for the commitment transaction
|
||||
// itself, while the second parameter is a slice of all HTLC signatures (if
|
||||
// any). The HTLC signatures are sorted according to the BIP 69 order of the
|
||||
// HTLC's on the commitment transaction.
|
||||
func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, error) {
|
||||
// HTLC's on the commitment transaction. Finally, the new set of pending HTLCs
|
||||
// for the remote party's commitment are also returned.
|
||||
func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, []channeldb.HTLC, error) {
|
||||
|
||||
lc.Lock()
|
||||
defer lc.Unlock()
|
||||
|
||||
@ -3052,7 +3054,7 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, erro
|
||||
commitPoint := lc.channelState.RemoteNextRevocation
|
||||
if lc.remoteCommitChain.hasUnackedCommitment() || commitPoint == nil {
|
||||
|
||||
return sig, htlcSigs, ErrNoWindow
|
||||
return sig, htlcSigs, nil, ErrNoWindow
|
||||
}
|
||||
|
||||
// Determine the last update on the remote log that has been locked in.
|
||||
@ -3067,7 +3069,7 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, erro
|
||||
remoteACKedIndex, lc.localUpdateLog.logIndex, true, nil,
|
||||
)
|
||||
if err != nil {
|
||||
return sig, htlcSigs, err
|
||||
return sig, htlcSigs, nil, err
|
||||
}
|
||||
|
||||
// Grab the next commitment point for the remote party. This will be
|
||||
@ -3089,7 +3091,7 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, erro
|
||||
remoteACKedIndex, remoteHtlcIndex, keyRing,
|
||||
)
|
||||
if err != nil {
|
||||
return sig, htlcSigs, err
|
||||
return sig, htlcSigs, nil, err
|
||||
}
|
||||
|
||||
walletLog.Tracef("ChannelPoint(%v): extending remote chain to height %v, "+
|
||||
@ -3114,7 +3116,7 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, erro
|
||||
lc.localChanCfg, lc.remoteChanCfg, newCommitView,
|
||||
)
|
||||
if err != nil {
|
||||
return sig, htlcSigs, err
|
||||
return sig, htlcSigs, nil, err
|
||||
}
|
||||
lc.sigPool.SubmitSignBatch(sigBatch)
|
||||
|
||||
@ -3125,12 +3127,12 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, erro
|
||||
rawSig, err := lc.Signer.SignOutputRaw(newCommitView.txn, lc.signDesc)
|
||||
if err != nil {
|
||||
close(cancelChan)
|
||||
return sig, htlcSigs, err
|
||||
return sig, htlcSigs, nil, err
|
||||
}
|
||||
sig, err = lnwire.NewSigFromRawSignature(rawSig)
|
||||
if err != nil {
|
||||
close(cancelChan)
|
||||
return sig, htlcSigs, err
|
||||
return sig, htlcSigs, nil, err
|
||||
}
|
||||
|
||||
// We'll need to send over the signatures to the remote party in the
|
||||
@ -3150,7 +3152,7 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, erro
|
||||
// jobs.
|
||||
if jobResp.Err != nil {
|
||||
close(cancelChan)
|
||||
return sig, htlcSigs, err
|
||||
return sig, htlcSigs, nil, err
|
||||
}
|
||||
|
||||
htlcSigs = append(htlcSigs, jobResp.Sig)
|
||||
@ -3161,11 +3163,11 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, erro
|
||||
// can retransmit it if necessary.
|
||||
commitDiff, err := lc.createCommitDiff(newCommitView, sig, htlcSigs)
|
||||
if err != nil {
|
||||
return sig, htlcSigs, err
|
||||
return sig, htlcSigs, nil, err
|
||||
}
|
||||
err = lc.channelState.AppendRemoteCommitChain(commitDiff)
|
||||
if err != nil {
|
||||
return sig, htlcSigs, err
|
||||
return sig, htlcSigs, nil, err
|
||||
}
|
||||
|
||||
// TODO(roasbeef): check that one eclair bug
|
||||
@ -3176,7 +3178,7 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, erro
|
||||
// latest commitment update.
|
||||
lc.remoteCommitChain.addCommitment(newCommitView)
|
||||
|
||||
return sig, htlcSigs, nil
|
||||
return sig, htlcSigs, commitDiff.Commitment.Htlcs, nil
|
||||
}
|
||||
|
||||
// ProcessChanSyncMsg processes a ChannelReestablish message sent by the remote
|
||||
@ -3352,7 +3354,7 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
|
||||
// revocation, but also initiate a state transition to re-sync
|
||||
// them.
|
||||
if !lc.FullySynced() {
|
||||
commitSig, htlcSigs, err := lc.SignNextCommitment()
|
||||
commitSig, htlcSigs, _, err := lc.SignNextCommitment()
|
||||
switch {
|
||||
|
||||
// If we signed this state, then we'll accumulate
|
||||
@ -4277,8 +4279,11 @@ func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck, []c
|
||||
// revocation.
|
||||
// 3. The PaymentDescriptor of any Settle/Fail HTLCs that were locked in by
|
||||
// this revocation.
|
||||
// 4. The set of HTLCs present on the current valid commitment transaction
|
||||
// for the remote party.
|
||||
func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) (
|
||||
*channeldb.FwdPkg, []*PaymentDescriptor, []*PaymentDescriptor, error) {
|
||||
*channeldb.FwdPkg, []*PaymentDescriptor, []*PaymentDescriptor,
|
||||
[]channeldb.HTLC, error) {
|
||||
|
||||
lc.Lock()
|
||||
defer lc.Unlock()
|
||||
@ -4287,10 +4292,10 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) (
|
||||
store := lc.channelState.RevocationStore
|
||||
revocation, err := chainhash.NewHash(revMsg.Revocation[:])
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
if err := store.AddNextEntry(revocation); err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
// Verify that if we use the commitment point computed based off of the
|
||||
@ -4299,7 +4304,7 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) (
|
||||
currentCommitPoint := lc.channelState.RemoteCurrentRevocation
|
||||
derivedCommitPoint := input.ComputeCommitmentPoint(revMsg.Revocation[:])
|
||||
if !derivedCommitPoint.IsEqual(currentCommitPoint) {
|
||||
return nil, nil, nil, fmt.Errorf("revocation key mismatch")
|
||||
return nil, nil, nil, nil, fmt.Errorf("revocation key mismatch")
|
||||
}
|
||||
|
||||
// Now that we've verified that the prior commitment has been properly
|
||||
@ -4470,7 +4475,7 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) (
|
||||
// commitment chain.
|
||||
err = lc.channelState.AdvanceCommitChainTail(fwdPkg)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
// Since they revoked the current lowest height in their commitment
|
||||
@ -4485,7 +4490,9 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) (
|
||||
remoteChainTail,
|
||||
)
|
||||
|
||||
return fwdPkg, addsToForward, settleFailsToForward, nil
|
||||
remoteHTLCs := lc.channelState.RemoteCommitment.Htlcs
|
||||
|
||||
return fwdPkg, addsToForward, settleFailsToForward, remoteHTLCs, nil
|
||||
}
|
||||
|
||||
// LoadFwdPkgs loads any pending log updates from disk and returns the payment
|
||||
|
@ -99,7 +99,7 @@ func TestSimpleAddSettleWorkflow(t *testing.T) {
|
||||
// we expect the messages to be ordered, Bob will receive the HTLC we
|
||||
// just sent before he receives this signature, so the signature will
|
||||
// cover the HTLC.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -123,7 +123,7 @@ func TestSimpleAddSettleWorkflow(t *testing.T) {
|
||||
// This signature will cover the HTLC, since Bob will first send the
|
||||
// revocation just created. The revocation also acks every received
|
||||
// HTLC up to the point where Alice sent here signature.
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to sign alice's commitment: %v", err)
|
||||
}
|
||||
@ -131,7 +131,7 @@ func TestSimpleAddSettleWorkflow(t *testing.T) {
|
||||
// Alice then processes this revocation, sending her own revocation for
|
||||
// her prior commitment transaction. Alice shouldn't have any HTLCs to
|
||||
// forward since she's sending an outgoing HTLC.
|
||||
fwdPkg, _, _, err := aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
fwdPkg, _, _, _, err := aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to process bob's revocation: %v", err)
|
||||
}
|
||||
@ -162,7 +162,7 @@ func TestSimpleAddSettleWorkflow(t *testing.T) {
|
||||
// is fully locked in within both commitment transactions. Bob should
|
||||
// also be able to forward an HTLC now that the HTLC has been locked
|
||||
// into both commitment transactions.
|
||||
fwdPkg, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
fwdPkg, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to process alice's revocation: %v", err)
|
||||
}
|
||||
@ -239,7 +239,7 @@ func TestSimpleAddSettleWorkflow(t *testing.T) {
|
||||
t.Fatalf("alice unable to accept settle of outbound htlc: %v", err)
|
||||
}
|
||||
|
||||
bobSig2, bobHtlcSigs2, err := bobChannel.SignNextCommitment()
|
||||
bobSig2, bobHtlcSigs2, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to sign settle commitment: %v", err)
|
||||
}
|
||||
@ -252,12 +252,12 @@ func TestSimpleAddSettleWorkflow(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to generate revocation: %v", err)
|
||||
}
|
||||
aliceSig2, aliceHtlcSigs2, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig2, aliceHtlcSigs2, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to sign new commitment: %v", err)
|
||||
}
|
||||
|
||||
fwdPkg, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation2)
|
||||
fwdPkg, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation2)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to process alice's revocation: %v", err)
|
||||
}
|
||||
@ -279,7 +279,7 @@ func TestSimpleAddSettleWorkflow(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to revoke commitment: %v", err)
|
||||
}
|
||||
fwdPkg, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation2)
|
||||
fwdPkg, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation2)
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to process bob's revocation: %v", err)
|
||||
}
|
||||
@ -1106,7 +1106,7 @@ func TestHTLCSigNumber(t *testing.T) {
|
||||
aboveDust)
|
||||
defer cleanUp()
|
||||
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing next commitment: %v", err)
|
||||
}
|
||||
@ -1130,7 +1130,7 @@ func TestHTLCSigNumber(t *testing.T) {
|
||||
aliceChannel, bobChannel, cleanUp = createChanWithHTLC(aboveDust)
|
||||
defer cleanUp()
|
||||
|
||||
aliceSig, aliceHtlcSigs, err = aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing next commitment: %v", err)
|
||||
}
|
||||
@ -1153,7 +1153,7 @@ func TestHTLCSigNumber(t *testing.T) {
|
||||
aliceChannel, bobChannel, cleanUp = createChanWithHTLC(belowDust)
|
||||
defer cleanUp()
|
||||
|
||||
aliceSig, aliceHtlcSigs, err = aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing next commitment: %v", err)
|
||||
}
|
||||
@ -1176,7 +1176,7 @@ func TestHTLCSigNumber(t *testing.T) {
|
||||
aliceChannel, bobChannel, cleanUp = createChanWithHTLC(aboveDust)
|
||||
defer cleanUp()
|
||||
|
||||
aliceSig, aliceHtlcSigs, err = aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing next commitment: %v", err)
|
||||
}
|
||||
@ -1203,7 +1203,7 @@ func TestHTLCSigNumber(t *testing.T) {
|
||||
|
||||
// Alice should produce only one signature, since one HTLC is below
|
||||
// dust.
|
||||
aliceSig, aliceHtlcSigs, err = aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("Error signing next commitment: %v", err)
|
||||
}
|
||||
@ -1989,7 +1989,7 @@ func TestUpdateFeeFail(t *testing.T) {
|
||||
|
||||
// Alice sends signature for commitment that does not cover any fee
|
||||
// update.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -2040,13 +2040,13 @@ func TestUpdateFeeConcurrentSig(t *testing.T) {
|
||||
}
|
||||
|
||||
// Alice signs a commitment, and sends this to bob.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to sign commitment: %v", err)
|
||||
}
|
||||
|
||||
// At the same time, Bob signs a commitment.
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to sign alice's commitment: %v", err)
|
||||
}
|
||||
@ -2127,7 +2127,7 @@ func TestUpdateFeeSenderCommits(t *testing.T) {
|
||||
// Alice signs a commitment, which will cover everything sent to Bob
|
||||
// (the HTLC and the fee update), and everything acked by Bob (nothing
|
||||
// so far).
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -2157,7 +2157,7 @@ func TestUpdateFeeSenderCommits(t *testing.T) {
|
||||
|
||||
// Bob commits to all updates he has received from Alice. This includes
|
||||
// the HTLC he received, and the fee update.
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to sign alice's commitment: %v", err)
|
||||
}
|
||||
@ -2165,7 +2165,8 @@ func TestUpdateFeeSenderCommits(t *testing.T) {
|
||||
// Alice receives the revocation of the old one, and can now assume
|
||||
// that Bob's received everything up to the signature she sent,
|
||||
// including the HTLC and fee update.
|
||||
if _, _, _, err := aliceChannel.ReceiveRevocation(bobRevocation); err != nil {
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to process bob's revocation: %v", err)
|
||||
}
|
||||
|
||||
@ -2192,7 +2193,8 @@ func TestUpdateFeeSenderCommits(t *testing.T) {
|
||||
}
|
||||
|
||||
// Bob receives revocation from Alice.
|
||||
if _, _, _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to process alice's revocation: %v", err)
|
||||
}
|
||||
|
||||
@ -2239,7 +2241,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) {
|
||||
// Bob commits to every change he has sent since last time (none). He
|
||||
// does not commit to the received HTLC and fee update, since Alice
|
||||
// cannot know if he has received them.
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -2260,14 +2262,15 @@ func TestUpdateFeeReceiverCommits(t *testing.T) {
|
||||
}
|
||||
|
||||
// Bob receives the revocation of the old commitment
|
||||
if _, _, _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to process bob's revocation: %v", err)
|
||||
}
|
||||
|
||||
// Alice will sign next commitment. Since she sent the revocation, she
|
||||
// also ack'ed everything received, but in this case this is nothing.
|
||||
// Since she sent the two updates, this signature will cover those two.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to sign alice's commitment: %v", err)
|
||||
}
|
||||
@ -2297,14 +2300,15 @@ func TestUpdateFeeReceiverCommits(t *testing.T) {
|
||||
|
||||
// Bob will send a new signature, which will cover what he just acked:
|
||||
// the HTLC and fee update.
|
||||
bobSig, bobHtlcSigs, err = bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err = bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to sign commitment: %v", err)
|
||||
}
|
||||
|
||||
// Alice receives revocation from Bob, and can now be sure that Bob
|
||||
// received the two updates, and they are considered locked in.
|
||||
if _, _, _, err := aliceChannel.ReceiveRevocation(bobRevocation); err != nil {
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to process alice's revocation: %v", err)
|
||||
}
|
||||
|
||||
@ -2331,7 +2335,8 @@ func TestUpdateFeeReceiverCommits(t *testing.T) {
|
||||
}
|
||||
|
||||
// Bob receives revocation from Alice.
|
||||
if _, _, _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to process alice's revocation: %v", err)
|
||||
}
|
||||
}
|
||||
@ -2391,7 +2396,7 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) {
|
||||
// Alice signs a commitment, which will cover everything sent to Bob
|
||||
// (the HTLC and the fee update), and everything acked by Bob (nothing
|
||||
// so far).
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -2437,7 +2442,7 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) {
|
||||
|
||||
// Bob commits to all updates he has received from Alice. This includes
|
||||
// the HTLC he received, and the fee update.
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to sign alice's commitment: %v", err)
|
||||
}
|
||||
@ -2445,7 +2450,8 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) {
|
||||
// Alice receives the revocation of the old one, and can now assume that
|
||||
// Bob's received everything up to the signature she sent, including the
|
||||
// HTLC and fee update.
|
||||
if _, _, _, err := aliceChannel.ReceiveRevocation(bobRevocation); err != nil {
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to process bob's revocation: %v", err)
|
||||
}
|
||||
|
||||
@ -2471,7 +2477,8 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) {
|
||||
}
|
||||
|
||||
// Bob receives revocation from Alice.
|
||||
if _, _, _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to process alice's revocation: %v", err)
|
||||
}
|
||||
}
|
||||
@ -2764,7 +2771,7 @@ func TestChanSyncOweCommitment(t *testing.T) {
|
||||
// Now we'll begin the core of the test itself. Alice will extend a new
|
||||
// commitment to Bob, but the connection drops before Bob can process
|
||||
// it.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -2892,11 +2899,11 @@ func TestChanSyncOweCommitment(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to revoke bob commitment: %v", err)
|
||||
}
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to sign commitment: %v", err)
|
||||
}
|
||||
_, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to recv revocation: %v", err)
|
||||
}
|
||||
@ -2908,7 +2915,8 @@ func TestChanSyncOweCommitment(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to revoke commitment: %v", err)
|
||||
}
|
||||
if _, _, _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to recv revocation: %v", err)
|
||||
}
|
||||
|
||||
@ -3048,7 +3056,7 @@ func TestChanSyncOweRevocation(t *testing.T) {
|
||||
//
|
||||
// Alice signs the next state, then Bob receives and sends his
|
||||
// revocation message.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -3061,12 +3069,12 @@ func TestChanSyncOweRevocation(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to revoke bob commitment: %v", err)
|
||||
}
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to sign commitment: %v", err)
|
||||
}
|
||||
|
||||
_, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to recv revocation: %v", err)
|
||||
}
|
||||
@ -3148,7 +3156,8 @@ func TestChanSyncOweRevocation(t *testing.T) {
|
||||
// TODO(roasbeef): restart bob too???
|
||||
|
||||
// We'll continue by then allowing bob to process Alice's revocation message.
|
||||
if _, _, _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to recv revocation: %v", err)
|
||||
}
|
||||
|
||||
@ -3230,7 +3239,7 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) {
|
||||
|
||||
// Progressing the exchange: Alice will send her signature, Bob will
|
||||
// receive, send a revocation and also a signature for Alice's state.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -3245,7 +3254,7 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to revoke bob commitment: %v", err)
|
||||
}
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -3326,7 +3335,7 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) {
|
||||
|
||||
// We'll now finish the state transition by having Alice process both
|
||||
// messages, and send her final revocation.
|
||||
_, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to recv revocation: %v", err)
|
||||
}
|
||||
@ -3338,7 +3347,8 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to revoke commitment: %v", err)
|
||||
}
|
||||
if _, _, _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to recv revocation: %v", err)
|
||||
}
|
||||
}
|
||||
@ -3407,7 +3417,7 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) {
|
||||
}
|
||||
|
||||
// Bob signs the new state update, and sends the signature to Alice.
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -3425,7 +3435,8 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to revoke commitment: %v", err)
|
||||
}
|
||||
if _, _, _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to recv revocation: %v", err)
|
||||
}
|
||||
|
||||
@ -3442,7 +3453,7 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) {
|
||||
|
||||
// Progressing the exchange: Alice will send her signature, with Bob
|
||||
// processing the new state locally.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -3554,7 +3565,7 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) {
|
||||
// Now, we'll continue the exchange, sending Bob's revocation and
|
||||
// signature message to Alice, ending with Alice sending her revocation
|
||||
// message to Bob.
|
||||
_, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to recv revocation: %v", err)
|
||||
}
|
||||
@ -3568,7 +3579,8 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to revoke commitment: %v", err)
|
||||
}
|
||||
if _, _, _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to recv revocation: %v", err)
|
||||
}
|
||||
}
|
||||
@ -3651,7 +3663,7 @@ func TestChanSyncFailure(t *testing.T) {
|
||||
t.Fatalf("unable to recv bob's htlc: %v", err)
|
||||
}
|
||||
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign next commit: %v", err)
|
||||
}
|
||||
@ -3883,7 +3895,7 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) {
|
||||
|
||||
// Now, Alice will send a new commitment to Bob, but we'll simulate a
|
||||
// connection failure, so Bob doesn't get her signature.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -3986,11 +3998,11 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to revoke bob commitment: %v", err)
|
||||
}
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to sign commitment: %v", err)
|
||||
}
|
||||
_, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to recv revocation: %v", err)
|
||||
}
|
||||
@ -4002,7 +4014,8 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to revoke commitment: %v", err)
|
||||
}
|
||||
if _, _, _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to recv revocation: %v", err)
|
||||
}
|
||||
|
||||
@ -4131,7 +4144,7 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) {
|
||||
|
||||
// Now, Alice will send a new commitment to Bob, but we'll simulate a
|
||||
// connection failure, so Bob doesn't get the signature.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -4198,11 +4211,11 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to revoke bob commitment: %v", err)
|
||||
}
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to sign commitment: %v", err)
|
||||
}
|
||||
_, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to recv revocation: %v", err)
|
||||
}
|
||||
@ -4214,7 +4227,7 @@ func TestFeeUpdateOldDiskFormat(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("alice unable to revoke commitment: %v", err)
|
||||
}
|
||||
_, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to recv revocation: %v", err)
|
||||
}
|
||||
@ -4524,7 +4537,7 @@ func TestSignCommitmentFailNotLockedIn(t *testing.T) {
|
||||
|
||||
// If we now try to initiate a state update, then it should fail as
|
||||
// Alice is unable to actually create a new state.
|
||||
_, _, err = aliceChannel.SignNextCommitment()
|
||||
_, _, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != ErrNoWindow {
|
||||
t.Fatalf("expected ErrNoWindow, instead have: %v", err)
|
||||
}
|
||||
@ -4563,7 +4576,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
|
||||
// We'll now manually initiate a state transition between Alice and
|
||||
// bob.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4577,7 +4590,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
}
|
||||
|
||||
// Alice should detect that she doesn't need to forward any HTLC's.
|
||||
fwdPkg, _, _, err := aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
fwdPkg, _, _, _, err := aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4592,7 +4605,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
|
||||
// Now, have Bob initiate a transition to lock in the Adds sent by
|
||||
// Alice.
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4608,7 +4621,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
|
||||
// Bob should now detect that he now has 2 incoming HTLC's that he can
|
||||
// forward along.
|
||||
fwdPkg, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
fwdPkg, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4645,7 +4658,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
|
||||
// We'll now initiate another state transition, but this time Bob will
|
||||
// lead.
|
||||
bobSig, bobHtlcSigs, err = bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err = bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4661,7 +4674,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
// At this point, Bob receives the revocation from Alice, which is now
|
||||
// his signal to examine all the HTLC's that have been locked in to
|
||||
// process.
|
||||
fwdPkg, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
fwdPkg, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4680,7 +4693,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
|
||||
// Now, begin another state transition led by Alice, and fail the second
|
||||
// HTLC part-way through the dance.
|
||||
aliceSig, aliceHtlcSigs, err = aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4707,7 +4720,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
|
||||
// Alice should detect that she doesn't need to forward any Adds's, but
|
||||
// that the Fail has been locked in an can be forwarded.
|
||||
_, adds, settleFails, err := aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
_, adds, settleFails, _, err := aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4749,7 +4762,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
|
||||
// Have Alice initiate a state transition, which does not include the
|
||||
// HTLCs just readded to the channel state.
|
||||
aliceSig, aliceHtlcSigs, err = aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4764,7 +4777,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
|
||||
// Alice should detect that she doesn't need to forward any HTLC's, as
|
||||
// the updates haven't been committed by Bob yet.
|
||||
fwdPkg, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
fwdPkg, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4778,7 +4791,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
}
|
||||
|
||||
// Now initiate a final update from Bob to lock in the final Fail.
|
||||
bobSig, bobHtlcSigs, err = bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err = bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4795,7 +4808,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
|
||||
// Bob should detect that he has nothing to forward, as he hasn't
|
||||
// received any HTLCs.
|
||||
fwdPkg, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
fwdPkg, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4810,7 +4823,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
|
||||
// Finally, have Bob initiate a state transition that locks in the Fail
|
||||
// added after the restart.
|
||||
aliceSig, aliceHtlcSigs, err = aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4825,7 +4838,7 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
||||
|
||||
// When Alice receives the revocation, she should detect that she
|
||||
// can now forward the freshly locked-in Fail.
|
||||
_, adds, settleFails, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
_, adds, settleFails, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -4869,7 +4882,7 @@ func TestInvalidCommitSigError(t *testing.T) {
|
||||
}
|
||||
|
||||
// Alice will now attempt to initiate a state transition.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign new commit: %v", err)
|
||||
}
|
||||
@ -5075,7 +5088,7 @@ func TestChannelUnilateralClosePendingCommit(t *testing.T) {
|
||||
|
||||
// With the HTLC added, we'll now manually initiate a state transition
|
||||
// from Alice to Bob.
|
||||
_, _, err = aliceChannel.SignNextCommitment()
|
||||
_, _, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -5861,7 +5874,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) {
|
||||
}
|
||||
|
||||
// Let Alice sign a new state, which will include the HTLC just sent.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -5881,7 +5894,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) {
|
||||
// sent. However her local commitment chain still won't include the
|
||||
// state with the HTLC, since she hasn't received a new commitment
|
||||
// signature from Bob yet.
|
||||
_, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to recive revocation: %v", err)
|
||||
}
|
||||
@ -5899,7 +5912,7 @@ func TestChannelRestoreUpdateLogs(t *testing.T) {
|
||||
// and remote commit chains are updated in an async fashion. Since the
|
||||
// remote chain was updated with the latest state (since Bob sent the
|
||||
// revocation earlier) we can keep advancing the remote commit chain.
|
||||
aliceSig, aliceHtlcSigs, err = aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -6064,7 +6077,7 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) {
|
||||
restoreAndAssert(t, aliceChannel, 1, 0, 0, 0)
|
||||
|
||||
// Bob sends a signature.
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -6083,7 +6096,7 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to revoke commitment: %v", err)
|
||||
}
|
||||
_, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("bob unable to process alice's revocation: %v", err)
|
||||
}
|
||||
@ -6097,7 +6110,7 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) {
|
||||
|
||||
// Now send a signature from Alice. This will give Bob a new commitment
|
||||
// where the HTLC is removed.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -6119,7 +6132,7 @@ func TestChannelRestoreUpdateLogsFailedHTLC(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to revoke commitment: %v", err)
|
||||
}
|
||||
_, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to receive revocation: %v", err)
|
||||
}
|
||||
@ -6177,7 +6190,7 @@ func TestDuplicateFailRejection(t *testing.T) {
|
||||
|
||||
// We'll now have Bob sign a new commitment to lock in the HTLC fail
|
||||
// for Alice.
|
||||
_, _, err = bobChannel.SignNextCommitment()
|
||||
_, _, _, err = bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commit: %v", err)
|
||||
}
|
||||
@ -6257,7 +6270,7 @@ func TestDuplicateSettleRejection(t *testing.T) {
|
||||
|
||||
// We'll now have Bob sign a new commitment to lock in the HTLC fail
|
||||
// for Alice.
|
||||
_, _, err = bobChannel.SignNextCommitment()
|
||||
_, _, _, err = bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commit: %v", err)
|
||||
}
|
||||
@ -6350,7 +6363,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
}
|
||||
|
||||
// Let Alice sign a new state, which will include the HTLC just sent.
|
||||
aliceSig, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -6376,7 +6389,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 0, 1, 0)
|
||||
|
||||
// Alice receives the revocation, ACKing her pending commitment.
|
||||
_, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(bobRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to recive revocation: %v", err)
|
||||
}
|
||||
@ -6388,7 +6401,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
|
||||
// Now let Bob send the commitment signature making the HTLC lock in on
|
||||
// Alice's commitment.
|
||||
bobSig, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -6411,7 +6424,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
aliceChannel = restoreAndAssertCommitHeights(t, aliceChannel, false,
|
||||
0, 1, 1)
|
||||
|
||||
_, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
_, _, _, _, err = bobChannel.ReceiveRevocation(aliceRevocation)
|
||||
if err != nil {
|
||||
t.Fatalf("unable to recive revocation: %v", err)
|
||||
}
|
||||
@ -6433,7 +6446,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
|
||||
// Send a new signature from Alice to Bob, making Alice have a pending
|
||||
// remote commitment.
|
||||
aliceSig, aliceHtlcSigs, err = aliceChannel.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -6463,7 +6476,7 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
||||
|
||||
// Sign a new state for Alice, making Bob have a pending remote
|
||||
// commitment.
|
||||
bobSig, bobHtlcSigs, err = bobChannel.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err = bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commitment: %v", err)
|
||||
}
|
||||
@ -6519,7 +6532,7 @@ func TestForceCloseBorkedState(t *testing.T) {
|
||||
// Do the commitment dance until Bob sends a revocation so Alice is
|
||||
// able to receive the revocation, and then also make a new state
|
||||
// herself.
|
||||
aliceSigs, aliceHtlcSigs, err := aliceChannel.SignNextCommitment()
|
||||
aliceSigs, aliceHtlcSigs, _, err := aliceChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commit: %v", err)
|
||||
}
|
||||
@ -6531,7 +6544,7 @@ func TestForceCloseBorkedState(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unable to revoke bob commitment: %v", err)
|
||||
}
|
||||
bobSigs, bobHtlcSigs, err := bobChannel.SignNextCommitment()
|
||||
bobSigs, bobHtlcSigs, _, err := bobChannel.SignNextCommitment()
|
||||
if err != nil {
|
||||
t.Fatalf("unable to sign commit: %v", err)
|
||||
}
|
||||
@ -6563,7 +6576,7 @@ func TestForceCloseBorkedState(t *testing.T) {
|
||||
|
||||
// At this point, all channel mutating methods should now fail as they
|
||||
// shouldn't be able to proceed if the channel is borked.
|
||||
_, _, _, err = aliceChannel.ReceiveRevocation(revokeMsg)
|
||||
_, _, _, _, err = aliceChannel.ReceiveRevocation(revokeMsg)
|
||||
if err != channeldb.ErrChanBorked {
|
||||
t.Fatalf("advance commitment tail should have failed")
|
||||
}
|
||||
@ -6571,7 +6584,7 @@ func TestForceCloseBorkedState(t *testing.T) {
|
||||
// We manually advance the commitment tail here since the above
|
||||
// ReceiveRevocation call will fail before it's actually advanced.
|
||||
aliceChannel.remoteCommitChain.advanceTail()
|
||||
_, _, err = aliceChannel.SignNextCommitment()
|
||||
_, _, _, err = aliceChannel.SignNextCommitment()
|
||||
if err != channeldb.ErrChanBorked {
|
||||
t.Fatalf("sign commitment should have failed: %v", err)
|
||||
}
|
||||
|
@ -496,7 +496,7 @@ func calcStaticFee(numHTLCs int) btcutil.Amount {
|
||||
// pending updates. This method is useful when testing interactions between two
|
||||
// live state machines.
|
||||
func ForceStateTransition(chanA, chanB *LightningChannel) error {
|
||||
aliceSig, aliceHtlcSigs, err := chanA.SignNextCommitment()
|
||||
aliceSig, aliceHtlcSigs, _, err := chanA.SignNextCommitment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -508,12 +508,12 @@ func ForceStateTransition(chanA, chanB *LightningChannel) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bobSig, bobHtlcSigs, err := chanB.SignNextCommitment()
|
||||
bobSig, bobHtlcSigs, _, err := chanB.SignNextCommitment()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, _, _, err := chanA.ReceiveRevocation(bobRevocation); err != nil {
|
||||
if _, _, _, _, err := chanA.ReceiveRevocation(bobRevocation); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := chanA.ReceiveNewCommitment(bobSig, bobHtlcSigs); err != nil {
|
||||
@ -524,7 +524,7 @@ func ForceStateTransition(chanA, chanB *LightningChannel) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, _, _, err := chanB.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
if _, _, _, _, err := chanB.ReceiveRevocation(aliceRevocation); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user