lnwallet: update state machine to the version within the spec

This commit updates the internal channel state machine to the one as
described within the spec and currently implemented within the rest of
the other Lightning implementations.

At a high level the following modifications have been made:
    * When signing we no loner include the index of the remote party’s
log
      that our signature covers. Instead we include ALL of our current
      updates, but only the updates of the remote party that we’ve
ACK’d.
    * A pending change is considered ACK’d once a revocation message
      has been received, locking in the changes in the remote party’s
      commitment transaction.
   * When sending a new commitment, we remember the index of our
     log at that point so we can mark that portion of the log as ACK’d
     once we receive a revocation message from the remote party.
   * When receiving a new commitment signature, we include ALL of
     the remote party’s changes that we’ve received but only our set
     of changes that’ve been ACK’d by the remote party.
   * Implicitly a revocation message now also implicitly serves to ACK
     all the changes that were included in the CommitSig message
     received before it.

The resulting change is a rather minor diff. However, with this state
machine it’s important to note that the order to sig/revoke messages
has been swapped. A proper exchange now looks like the following:
    * Alice -> Add, Add, Add
    * Alice -> Sig
    * Revoke <- Bob
    * Sig <- Bob
    * Alice -> Revoke

One other thing that’s worth noting is that with this state machine,
since what’s included in an update is implicit, both side may need to
at times send a new commitment update in the case of a concurrent state
transition initiated by both sides.

Finally, all counters/indexes have been made 64-bit integers in order
to properly match the spec.
This commit is contained in:
Olaoluwa Osuntokun 2017-02-20 17:55:33 -08:00
parent 6cf905088c
commit 0e0e207802
No known key found for this signature in database
GPG Key ID: 9CC5B105D03521A2

@ -39,7 +39,7 @@ const (
// a greater degree of de-synchronization by allowing either parties to
// extend the other's commitment chain non-interactively, and also
// serves as a flow control mechanism to a degree.
InitialRevocationWindow = 4
InitialRevocationWindow = 1
)
// channelState is an enum like type which represents the current state of a
@ -129,18 +129,19 @@ type PaymentDescriptor struct {
// Index is the log entry number that his HTLC update has within the
// log. Depending on if IsIncoming is true, this is either an entry the
// remote party added, or one that we added locally.
Index uint32
Index uint64
// ParentIndex is the index of the log entry that this HTLC update
// settles or times out.
ParentIndex uint32
ParentIndex uint64
// Payload is an opaque blob which is used to complete multi-hop routing.
// Payload is an opaque blob which is used to complete multi-hop
// routing.
Payload []byte
// Type denotes the exact type of the PaymentDescriptor. In the case of
// a Timeout, or Settle type, then the Parent field will point into the
// log to the HTLC being modified.
// EntryType denotes the exact type of the PaymentDescriptor. In the
// case of a Timeout, or Settle type, then the Parent field will point
// into the log to the HTLC being modified.
EntryType updateType
// addCommitHeight[Remote|Local] encodes the height of the commitment
@ -151,9 +152,10 @@ type PaymentDescriptor struct {
addCommitHeightLocal uint64
// removeCommitHeight[Remote|Local] encodes the height of the
//commitment which removed the parent pointer of this PaymentDescriptor
//either due to a timeout or a settle. Once both these heights are
//above the tail of both chains, the log entries can safely be removed.
// commitment which removed the parent pointer of this
// PaymentDescriptor either due to a timeout or a settle. Once both
// these heights are above the tail of both chains, the log entries can
// safely be removed.
removeCommitHeightRemote uint64
removeCommitHeightLocal uint64
@ -184,9 +186,8 @@ type commitment struct {
// new commitment sent to the remote party includes an index in the
// shared log which details which of their updates we're including in
// this new commitment.
// TODO(roasbeef): also make uint64?
ourMessageIndex uint32
theirMessageIndex uint32
ourMessageIndex uint64
theirMessageIndex uint64
// txn is the commitment transaction generated by including any HTLC
// updates whose index are below the two indexes listed above. If this
@ -219,7 +220,7 @@ func (c *commitment) toChannelDelta() (*channeldb.ChannelDelta, error) {
delta := &channeldb.ChannelDelta{
LocalBalance: c.ourBalance,
RemoteBalance: c.theirBalance,
UpdateNum: uint32(c.height),
UpdateNum: c.height,
Htlcs: make([]*channeldb.HTLC, 0, numHtlcs),
}
@ -482,10 +483,11 @@ type LightningChannel struct {
sync.RWMutex
ourLogCounter uint32
theirLogCounter uint32
pendingACK bool
status channelState
status channelState
// Capcity is the total capacity of this channel.
Capacity btcutil.Amount
// currentHeight is the current height of our local commitment chain.
@ -505,7 +507,7 @@ type LightningChannel struct {
// new commitment. The front of the slice is popped off once we receive
// a revocation for a prior state. This head element then becomes the
// next set of keys/hashes we expect to be revoked.
usedRevocations []*lnwire.CommitRevocation
usedRevocations []*lnwire.RevokeAndAck
// revocationWindow is a window of revocations sent to use by the
// remote party, allowing us to create new commitment transactions
@ -514,7 +516,7 @@ type LightningChannel struct {
// transaction for the remote node that they are able to revoke. If
// this slice is empty, then we cannot make any new updates to their
// commitment chain.
revocationWindow []*lnwire.CommitRevocation
revocationWindow []*lnwire.RevokeAndAck
// remoteCommitChain is the remote node's commitment chain. Any new
// commitments we initiate are added to the tip of this chain.
@ -530,17 +532,12 @@ type LightningChannel struct {
stateMtx sync.RWMutex
channelState *channeldb.OpenChannel
// stateUpdateLog is a (mostly) append-only log storing all the HTLC
// [local|remote]Log is a (mostly) append-only log storing all the HTLC
// updates to this channel. The log is walked backwards as HTLC updates
// are applied in order to re-construct a commitment transaction from a
// commitment. The log is compacted once a revocation is received.
ourUpdateLog *list.List
theirUpdateLog *list.List
// logIndex is an index into the above log. This index is used to
// remove Add state updates, once a timeout/settle is received.
ourLogIndex map[uint32]*list.Element
theirLogIndex map[uint32]*list.Element
localUpdateLog *updateLog
remoteUpdateLog *updateLog
// rHashMap is a map with PaymentHashes pointing to their respective
// PaymentDescriptors. We insert *PaymentDescriptors whenever we
@ -552,9 +549,12 @@ type LightningChannel struct {
LocalDeliveryScript []byte
RemoteDeliveryScript []byte
// FundingWitnessScript is the witness script for the 2-of-2 multi-sig
// that opened the channel.
FundingWitnessScript []byte
fundingTxIn *wire.TxIn
fundingP2WSH []byte
fundingTxIn *wire.TxIn
fundingP2WSH []byte
// ForceCloseSignal is a channel that is closed to indicate that a
// local system has initiated a force close by broadcasting the current
@ -601,10 +601,8 @@ func NewLightningChannel(signer Signer, events chainntnfs.ChainNotifier,
localCommitChain: newCommitmentChain(state.NumUpdates),
channelState: state,
revocationWindowEdge: state.NumUpdates,
ourUpdateLog: list.New(),
theirUpdateLog: list.New(),
ourLogIndex: make(map[uint32]*list.Element),
theirLogIndex: make(map[uint32]*list.Element),
localUpdateLog: newUpdateLog(),
remoteUpdateLog: newUpdateLog(),
rHashMap: make(map[PaymentHash][]*PaymentDescriptor),
Capacity: state.Capacity,
LocalDeliveryScript: state.OurDeliveryScript,
@ -953,7 +951,7 @@ func (lc *LightningChannel) restoreStateLogs() error {
pastHeight = lc.currentHeight - 1
}
var ourCounter, theirCounter uint32
var ourCounter, theirCounter uint64
for _, htlc := range lc.channelState.Htlcs {
// TODO(roasbeef): set isForwarded to false for all? need to
// persist state w.r.t to if forwarded or not, or can
@ -969,21 +967,18 @@ func (lc *LightningChannel) restoreStateLogs() error {
if !htlc.Incoming {
pd.Index = ourCounter
lc.ourLogIndex[pd.Index] = lc.ourUpdateLog.PushBack(pd)
lc.localUpdateLog.appendUpdate(pd)
ourCounter++
} else {
pd.Index = theirCounter
lc.theirLogIndex[pd.Index] = lc.theirUpdateLog.PushBack(pd)
lc.remoteUpdateLog.appendUpdate(pd)
lc.rHashMap[pd.RHash] = append(lc.rHashMap[pd.RHash], pd)
theirCounter++
}
}
lc.ourLogCounter = ourCounter
lc.theirLogCounter = theirCounter
lc.localCommitChain.tail().ourMessageIndex = ourCounter
lc.localCommitChain.tail().theirMessageIndex = theirCounter
lc.remoteCommitChain.tail().ourMessageIndex = ourCounter
@ -1002,9 +997,9 @@ type htlcView struct {
// fetchHTLCView returns all the candidate HTLC updates which should be
// considered for inclusion within a commitment based on the passed HTLC log
// indexes.
func (lc *LightningChannel) fetchHTLCView(theirLogIndex, ourLogIndex uint32) *htlcView {
func (lc *LightningChannel) fetchHTLCView(theirLogIndex, ourLogIndex uint64) *htlcView {
var ourHTLCs []*PaymentDescriptor
for e := lc.ourUpdateLog.Front(); e != nil; e = e.Next() {
for e := lc.localUpdateLog.Front(); e != nil; e = e.Next() {
htlc := e.Value.(*PaymentDescriptor)
// This HTLC is active from this point-of-view iff the log
@ -1016,7 +1011,7 @@ func (lc *LightningChannel) fetchHTLCView(theirLogIndex, ourLogIndex uint32) *ht
}
var theirHTLCs []*PaymentDescriptor
for e := lc.theirUpdateLog.Front(); e != nil; e = e.Next() {
for e := lc.remoteUpdateLog.Front(); e != nil; e = e.Next() {
htlc := e.Value.(*PaymentDescriptor)
// If this is an incoming HTLC, then it is only active from
@ -1040,7 +1035,7 @@ func (lc *LightningChannel) fetchHTLCView(theirLogIndex, ourLogIndex uint32) *ht
// commitment updates. A fully populated commitment is returned which reflects
// the proper balances for both sides at this point in the commitment chain.
func (lc *LightningChannel) fetchCommitmentView(remoteChain bool,
ourLogIndex, theirLogIndex uint32, revocationKey *btcec.PublicKey,
ourLogIndex, theirLogIndex uint64, revocationKey *btcec.PublicKey,
revocationHash [32]byte) (*commitment, error) {
var commitChain *commitmentChain
@ -1161,8 +1156,8 @@ func (lc *LightningChannel) evaluateHTLCView(view *htlcView, ourBalance,
// keep track of which entries we need to skip when creating the final
// htlc view. We skip an entry whenever we find a settle or a timeout
// modifying an entry.
skipUs := make(map[uint32]struct{})
skipThem := make(map[uint32]struct{})
skipUs := make(map[uint64]struct{})
skipThem := make(map[uint64]struct{})
// First we run through non-add entries in both logs, populating the
// skip sets and mutating the current chain state (crediting balances, etc) to
@ -1180,7 +1175,7 @@ func (lc *LightningChannel) evaluateHTLCView(view *htlcView, ourBalance,
lc.channelState.TotalSatoshisReceived += uint64(entry.Amount)
}
addEntry := lc.theirLogIndex[entry.ParentIndex].Value.(*PaymentDescriptor)
addEntry := lc.remoteUpdateLog.lookup(entry.ParentIndex)
skipThem[addEntry.Index] = struct{}{}
processRemoveEntry(entry, ourBalance, theirBalance,
@ -1200,7 +1195,7 @@ func (lc *LightningChannel) evaluateHTLCView(view *htlcView, ourBalance,
lc.channelState.TotalSatoshisSent += uint64(entry.Amount)
}
addEntry := lc.ourLogIndex[entry.ParentIndex].Value.(*PaymentDescriptor)
addEntry := lc.localUpdateLog.lookup(entry.ParentIndex)
skipUs[addEntry.Index] = struct{}{}
processRemoveEntry(entry, ourBalance, theirBalance,
@ -1296,19 +1291,22 @@ func processRemoveEntry(htlc *PaymentDescriptor, ourBalance,
// the HTLC amount.
case isIncoming && htlc.EntryType == Settle:
*ourBalance += htlc.Amount
// Otherwise, this HTLC is being cancelled, therefore the value of the
// Otherwise, this HTLC is being failed out, therefore the value of the
// HTLC should return to the remote party.
case isIncoming && htlc.EntryType == Cancel:
case isIncoming && htlc.EntryType == Fail:
*theirBalance += htlc.Amount
// If an outgoing HTLC is being settled, then this means that the
// downstream party resented the preimage or learned of it via a
// downstream peer. In either case, we credit their settled value with
// the value of the HTLC.
case !isIncoming && htlc.EntryType == Settle:
*theirBalance += htlc.Amount
// Otherwise, one of our outgoing HTLCs has been cancelled, so the
// value of the HTLC should be returned to our settled balance.
case !isIncoming && htlc.EntryType == Cancel:
// Otherwise, one of our outgoing HTLC's has timed out, so the value of
// the HTLC should be returned to our settled balance.
case !isIncoming && htlc.EntryType == Fail:
*ourBalance += htlc.Amount
}
@ -1321,21 +1319,34 @@ func processRemoveEntry(htlc *PaymentDescriptor, ourBalance,
// decrements the available revocation window by 1. After a successful method
// call, the remote party's commitment chain is extended by a new commitment
// which includes all updates to the HTLC log prior to this method invocation.
func (lc *LightningChannel) SignNextCommitment() ([]byte, uint32, error) {
func (lc *LightningChannel) SignNextCommitment() ([]byte, error) {
lc.Lock()
defer lc.Unlock()
err := lc.validateCommitmentSanity(lc.theirLogCounter, lc.ourLogCounter, false)
if err != nil {
return nil, 0, err
// If we're awaiting an ACK to a commitment signature, then we're
// unable to create new states as we don't have any revocations we can
// use.
if lc.pendingACK {
return nil, ErrNoWindow
}
// Ensure that we have enough unused revocation hashes given to us by the
// remote party. If the set is empty, then we're unable to create a new
// state unless they first revoke a prior commitment transaction.
// TODO(roasbeef): remove now due to above?
if len(lc.revocationWindow) == 0 ||
len(lc.usedRevocations) == InitialRevocationWindow {
return nil, 0, ErrNoWindow
return nil, ErrNoWindow
}
// Before we extend this new commitment to the remote commitment chain,
// ensure that we aren't violating any of the constraints the remote
// party set up when we initially set up the channel. If we are, then
// we'll abort this state transition.
err := lc.validateCommitmentSanity(lc.remoteUpdateLog.ackedIndex,
lc.localUpdateLog.logIndex, false)
if err != nil {
return nil, err
}
// Grab the next revocation hash and key to use for this new commitment
@ -1349,11 +1360,13 @@ func (lc *LightningChannel) SignNextCommitment() ([]byte, uint32, error) {
// state of the remote node's new commitment including our latest added
// HTLCs. The view includes the latest balances for both sides on the
// remote node's chain, and also update the addition height of any new
// HTLC log entries.
newCommitView, err := lc.fetchCommitmentView(true, lc.ourLogCounter,
lc.theirLogCounter, remoteRevocationKey, remoteRevocationHash)
// HTLC log entries. When we creating a new remote view, we include
// _all_ of our changes (pending or committed) but only the remote
// node's changes up to the last change we've ACK'd.
newCommitView, err := lc.fetchCommitmentView(true, lc.localUpdateLog.logIndex,
lc.remoteUpdateLog.ackedIndex, remoteRevocationKey, remoteRevocationHash)
if err != nil {
return nil, 0, err
return nil, err
}
walletLog.Tracef("ChannelPoint(%v): extending remote chain to height %v",
@ -1369,7 +1382,7 @@ func (lc *LightningChannel) SignNextCommitment() ([]byte, uint32, error) {
lc.signDesc.SigHashes = txscript.NewTxSigHashes(newCommitView.txn)
sig, err := lc.signer.SignOutputRaw(newCommitView.txn, lc.signDesc)
if err != nil {
return nil, 0, err
return nil, err
}
// Extend the remote commitment chain by one with the addition of our
@ -1383,9 +1396,16 @@ func (lc *LightningChannel) SignNextCommitment() ([]byte, uint32, error) {
lc.revocationWindow[0] = nil // Avoid a GC leak.
lc.revocationWindow = lc.revocationWindow[1:]
// Strip off the sighash flag on the signature in order to send it over
// the wire.
return sig, lc.theirLogCounter, nil
// As we've just created a new update for the remote commitment chain,
// we set the bool indicating that we're waiting for an ACK to our new
// changes.
lc.pendingACK = true
// Additionally, we'll remember our log index at this point, so we can
// properly track which changes have been ACK'd.
lc.localUpdateLog.initiateTransition()
return sig, nil
}
// validateCommitmentSanity is used to validate that on current state the commitment
@ -1393,7 +1413,7 @@ func (lc *LightningChannel) SignNextCommitment() ([]byte, uint32, error) {
// also that all outputs are meet Bitcoin spec requirements and they are
// spendable.
func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter,
ourLogCounter uint32, prediction bool) error {
ourLogCounter uint64, prediction bool) error {
htlcCount := 0
@ -1436,13 +1456,15 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter,
// to our local commitment chain. Once we send a revocation for our prior
// state, then this newly added commitment becomes our current accepted channel
// state.
func (lc *LightningChannel) ReceiveNewCommitment(rawSig []byte,
ourLogIndex uint32) error {
func (lc *LightningChannel) ReceiveNewCommitment(rawSig []byte) error {
lc.Lock()
defer lc.Unlock()
err := lc.validateCommitmentSanity(lc.theirLogCounter, ourLogIndex, false)
// Ensure that this new local update from the remote node respects all
// the constraints we specified during initial channel setup. If not,
// then we'll abort the channel as they've violated our constraints.
err := lc.validateCommitmentSanity(lc.remoteUpdateLog.logIndex,
lc.localUpdateLog.ackedIndex, false)
if err != nil {
return err
}
@ -1465,20 +1487,23 @@ func (lc *LightningChannel) ReceiveNewCommitment(rawSig []byte,
// With the revocation information calculated, construct the new
// commitment view which includes all the entries we know of in their
// HTLC log, and up to ourLogIndex in our HTLC log.
localCommitmentView, err := lc.fetchCommitmentView(false, ourLogIndex,
lc.theirLogCounter, revocationKey, revocationHash)
localCommitmentView, err := lc.fetchCommitmentView(false,
lc.localUpdateLog.ackedIndex, lc.remoteUpdateLog.logIndex,
revocationKey, revocationHash)
if err != nil {
return err
}
walletLog.Tracef("ChannelPoint(%v): extending local chain to height %v",
lc.channelState.ChanID, localCommitmentView.height)
walletLog.Tracef("ChannelPoint(%v): local chain: our_balance=%v, "+
"their_balance=%v, commit_tx: %v", lc.channelState.ChanID,
localCommitmentView.ourBalance, localCommitmentView.theirBalance,
newLogClosure(func() string {
return spew.Sdump(localCommitmentView.txn)
}))
}),
)
// Construct the sighash of the commitment transaction corresponding to
// this newly proposed state update.
@ -1508,6 +1533,11 @@ func (lc *LightningChannel) ReceiveNewCommitment(rawSig []byte,
localCommitmentView.sig = rawSig
lc.localCommitChain.addCommitment(localCommitmentView)
// Finally we'll keep track of the current pending index for the remote
// party so we can ACK up to this value once we revoke our current
// commitment.
lc.remoteUpdateLog.initiateTransition()
return nil
}
@ -1520,6 +1550,8 @@ func (lc *LightningChannel) PendingUpdates() bool {
lc.RLock()
defer lc.RUnlock()
// TODO(roasbeef): instead check our current counter?
fullySynced := (lc.localCommitChain.tip().ourMessageIndex ==
lc.remoteCommitChain.tip().ourMessageIndex)
@ -1531,7 +1563,7 @@ func (lc *LightningChannel) PendingUpdates() bool {
// revocation window is extended by one, and the tail of our local commitment
// chain is advanced by a single commitment. This now lowest unrevoked
// commitment becomes our currently accepted state within the channel.
func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.CommitRevocation, error) {
func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck, error) {
lc.Lock()
defer lc.Unlock()
@ -1539,7 +1571,7 @@ func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.CommitRevocation,
// Now that we've accept a new state transition, we send the remote
// party the revocation for our current commitment state.
revocationMsg := &lnwire.CommitRevocation{}
revocationMsg := &lnwire.RevokeAndAck{}
currentRevocation, err := lc.channelState.LocalElkrem.AtIndex(lc.currentHeight)
if err != nil {
return nil, err
@ -1567,7 +1599,6 @@ func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.CommitRevocation,
// Additionally, generate a channel delta for this state transition for
// persistent storage.
// TODO(roasbeef): update sent/received.
tail := lc.localCommitChain.tail()
delta, err := tail.toChannelDelta()
if err != nil {
@ -1582,7 +1613,13 @@ func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.CommitRevocation,
"our_balance=%v, their_balance=%v", lc.channelState.ChanID,
tail.ourBalance, tail.theirBalance)
revocationMsg.ChannelPoint = lc.channelState.ChanID
// In the process of revoking our current commitment, we've also
// implicitly ACK'd their set of pending changes that arrived before
// the signature the triggered this revocation. So we'll move up their
// ACK'd index within the log to right at this set of pending changes.
lc.remoteUpdateLog.ackTransition()
revocationMsg.ChannelPoint = *lc.channelState.ChanID
return revocationMsg, nil
}
@ -1592,8 +1629,8 @@ func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.CommitRevocation,
// windows are extended, or in response to a state update that we initiate. If
// successful, then the remote commitment chain is advanced by a single
// commitment, and a log compaction is attempted. In addition, a slice of
// HTLCs which can be forwarded upstream are returned.
func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.CommitRevocation) ([]*PaymentDescriptor, error) {
// HTLC's which can be forwarded upstream are returned.
func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) ([]*PaymentDescriptor, error) {
lc.Lock()
defer lc.Unlock()
@ -1604,6 +1641,11 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.CommitRevocation) (
return nil, nil
}
// Now that we've received a new revocation from the remote party,
// we'll toggle our pendingACk bool to indicate that we can create a
// new commitment state after we finish processing this revocation.
lc.pendingACK = false
ourCommitKey := lc.channelState.OurCommitKey
currentRevocationKey := lc.channelState.TheirCurrentRevocation
pendingRevocation := chainhash.Hash(revMsg.Revocation)
@ -1673,7 +1715,7 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.CommitRevocation) (
// log as we may be able to prune portions of it now, and update their
// balance.
var htlcsToForward []*PaymentDescriptor
for e := lc.theirUpdateLog.Front(); e != nil; e = e.Next() {
for e := lc.remoteUpdateLog.Front(); e != nil; e = e.Next() {
htlc := e.Value.(*PaymentDescriptor)
if htlc.isForwarded {
@ -1691,11 +1733,13 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.CommitRevocation) (
if htlc.EntryType == Add &&
remoteChainTail >= htlc.addCommitHeightRemote &&
localChainTail >= htlc.addCommitHeightLocal {
htlc.isForwarded = true
htlcsToForward = append(htlcsToForward, htlc)
} else if htlc.EntryType != Add &&
remoteChainTail >= htlc.removeCommitHeightRemote &&
localChainTail >= htlc.removeCommitHeightLocal {
htlc.isForwarded = true
htlcsToForward = append(htlcsToForward, htlc)
}
@ -1707,6 +1751,10 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.CommitRevocation) (
compactLogs(lc.localUpdateLog, lc.remoteUpdateLog,
localChainTail, remoteChainTail)
// As a final step, now that we've received an ACK for our last
// batch of pending changes, we'll update our local ACK'd index to the
// now commitment index, and reset our pendingACKIndex.
lc.localUpdateLog.ackTransition()
return htlcsToForward, nil
}
@ -1714,15 +1762,15 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.CommitRevocation) (
// ExtendRevocationWindow extends our revocation window by a single revocation,
// increasing the number of new commitment updates the remote party can
// initiate without our cooperation.
func (lc *LightningChannel) ExtendRevocationWindow() (*lnwire.CommitRevocation, error) {
func (lc *LightningChannel) ExtendRevocationWindow() (*lnwire.RevokeAndAck, error) {
lc.Lock()
defer lc.Unlock()
/// TODO(roasbeef): error if window edge differs from tail by more than
// InitialRevocationWindow
revMsg := &lnwire.CommitRevocation{}
revMsg.ChannelPoint = lc.channelState.ChanID
revMsg := &lnwire.RevokeAndAck{}
revMsg.ChannelPoint = *lc.channelState.ChanID
nextHeight := lc.revocationWindowEdge + 1
revocation, err := lc.channelState.LocalElkrem.AtIndex(nextHeight)
@ -1742,27 +1790,27 @@ func (lc *LightningChannel) ExtendRevocationWindow() (*lnwire.CommitRevocation,
// AddHTLC adds an HTLC to the state machine's local update log. This method
// should be called when preparing to send an outgoing HTLC.
//
// TODO(roasbeef): check for duplicates below? edge case during restart w/ HTLC
// persistence
func (lc *LightningChannel) AddHTLC(htlc *lnwire.HTLCAddRequest) (uint32, error) {
func (lc *LightningChannel) AddHTLC(htlc *lnwire.UpdateAddHTLC) (uint64, error) {
lc.Lock()
defer lc.Unlock()
err := lc.validateCommitmentSanity(lc.theirLogCounter, lc.ourLogCounter, true)
if err != nil {
if err := lc.validateCommitmentSanity(lc.remoteUpdateLog.logIndex,
lc.localUpdateLog.logIndex, true); err != nil {
return 0, err
}
pd := &PaymentDescriptor{
EntryType: Add,
RHash: PaymentHash(htlc.RedemptionHashes[0]),
RHash: PaymentHash(htlc.PaymentHash),
Timeout: htlc.Expiry,
Amount: htlc.Amount,
Index: lc.ourLogCounter,
Index: lc.localUpdateLog.logIndex,
}
lc.ourLogIndex[pd.Index] = lc.ourUpdateLog.PushBack(pd)
lc.ourLogCounter++
lc.localUpdateLog.appendUpdate(pd)
return pd.Index, nil
}
@ -1770,25 +1818,24 @@ func (lc *LightningChannel) AddHTLC(htlc *lnwire.HTLCAddRequest) (uint32, error)
// ReceiveHTLC adds an HTLC to the state machine's remote update log. This
// method should be called in response to receiving a new HTLC from the remote
// party.
func (lc *LightningChannel) ReceiveHTLC(htlc *lnwire.HTLCAddRequest) (uint32, error) {
func (lc *LightningChannel) ReceiveHTLC(htlc *lnwire.UpdateAddHTLC) (uint64, error) {
lc.Lock()
defer lc.Unlock()
err := lc.validateCommitmentSanity(lc.theirLogCounter, lc.ourLogCounter, true)
if err != nil {
if err := lc.validateCommitmentSanity(lc.remoteUpdateLog.logIndex,
lc.localUpdateLog.logIndex, true); err != nil {
return 0, err
}
pd := &PaymentDescriptor{
EntryType: Add,
RHash: PaymentHash(htlc.RedemptionHashes[0]),
RHash: PaymentHash(htlc.PaymentHash),
Timeout: htlc.Expiry,
Amount: htlc.Amount,
Index: lc.theirLogCounter,
Index: lc.remoteUpdateLog.logIndex,
}
lc.theirLogIndex[pd.Index] = lc.theirUpdateLog.PushBack(pd)
lc.theirLogCounter++
lc.remoteUpdateLog.appendUpdate(pd)
lc.rHashMap[pd.RHash] = append(lc.rHashMap[pd.RHash], pd)
@ -1799,7 +1846,7 @@ func (lc *LightningChannel) ReceiveHTLC(htlc *lnwire.HTLCAddRequest) (uint32, er
// remote log index of the HTLC settled is returned in order to facilitate
// creating the corresponding wire message. In the case the supplied preimage
// is invalid, an error is returned.
func (lc *LightningChannel) SettleHTLC(preimage [32]byte) (uint32, error) {
func (lc *LightningChannel) SettleHTLC(preimage [32]byte) (uint64, error) {
lc.Lock()
defer lc.Unlock()
@ -1814,13 +1861,12 @@ func (lc *LightningChannel) SettleHTLC(preimage [32]byte) (uint32, error) {
pd := &PaymentDescriptor{
Amount: targetHTLC.Amount,
RPreimage: preimage,
Index: lc.ourLogCounter,
Index: lc.localUpdateLog.logIndex,
ParentIndex: targetHTLC.Index,
EntryType: Settle,
}
lc.ourUpdateLog.PushBack(pd)
lc.ourLogCounter++
lc.localUpdateLog.appendUpdate(pd)
lc.rHashMap[paymentHash][0] = nil
lc.rHashMap[paymentHash] = lc.rHashMap[paymentHash][1:]
@ -1835,17 +1881,16 @@ func (lc *LightningChannel) SettleHTLC(preimage [32]byte) (uint32, error) {
// index into the local log. If the specified index doesn't exist within the
// log, and error is returned. Similarly if the preimage is invalid w.r.t to
// the referenced of then a distinct error is returned.
func (lc *LightningChannel) ReceiveHTLCSettle(preimage [32]byte, logIndex uint32) error {
func (lc *LightningChannel) ReceiveHTLCSettle(preimage [32]byte, logIndex uint64) error {
lc.Lock()
defer lc.Unlock()
paymentHash := fastsha256.Sum256(preimage[:])
addEntry, ok := lc.ourLogIndex[logIndex]
if !ok {
return fmt.Errorf("non existant log entry")
htlc := lc.localUpdateLog.lookup(logIndex)
if htlc == nil {
return fmt.Errorf("non existent log entry")
}
htlc := addEntry.Value.(*PaymentDescriptor)
if !bytes.Equal(htlc.RHash[:], paymentHash[:]) {
return fmt.Errorf("invalid payment hash")
}
@ -1854,27 +1899,26 @@ func (lc *LightningChannel) ReceiveHTLCSettle(preimage [32]byte, logIndex uint32
Amount: htlc.Amount,
RPreimage: preimage,
ParentIndex: htlc.Index,
Index: lc.theirLogCounter,
Index: lc.remoteUpdateLog.logIndex,
EntryType: Settle,
}
lc.theirUpdateLog.PushBack(pd)
lc.theirLogCounter++
lc.remoteUpdateLog.appendUpdate(pd)
return nil
}
// CancelHTLC attempts to cancel a targeted HTLC by its payment hash, inserting
// an entry which will remove the target log entry within the next commitment
// FailHTLC attempts to fail a targeted HTLC by its payment hash, inserting an
// entry which will remove the target log entry within the next commitment
// update. This method is intended to be called in order to cancel in
// _incoming_ HTLC.
func (lc *LightningChannel) CancelHTLC(rHash [32]byte) (uint32, error) {
func (lc *LightningChannel) FailHTLC(rHash [32]byte) (uint64, error) {
lc.Lock()
defer lc.Unlock()
addEntries, ok := lc.rHashMap[rHash]
if !ok {
return 0, fmt.Errorf("unable to find HTLC to cancel")
return 0, fmt.Errorf("unable to find HTLC to fail")
}
addEntry := addEntries[0]
@ -1882,12 +1926,11 @@ func (lc *LightningChannel) CancelHTLC(rHash [32]byte) (uint32, error) {
Amount: addEntry.Amount,
RHash: addEntry.RHash,
ParentIndex: addEntry.Index,
Index: lc.ourLogCounter,
EntryType: Cancel,
Index: lc.localUpdateLog.logIndex,
EntryType: Fail,
}
lc.ourUpdateLog.PushBack(pd)
lc.ourLogCounter++
lc.localUpdateLog.appendUpdate(pd)
lc.rHashMap[rHash][0] = nil
lc.rHashMap[rHash] = lc.rHashMap[rHash][1:]
@ -1898,30 +1941,28 @@ func (lc *LightningChannel) CancelHTLC(rHash [32]byte) (uint32, error) {
return addEntry.Index, nil
}
// ReceiveCancelHTLC attempts to cancel a targeted HTLC by its log index,
// ReceiveFailHTLC attempts to cancel a targeted HTLC by its log index,
// inserting an entry which will remove the target log entry within the next
// commitment update. This method should be called in response to the upstream
// party cancelling an outgoing HTLC.
func (lc *LightningChannel) ReceiveCancelHTLC(logIndex uint32) error {
func (lc *LightningChannel) ReceiveFailHTLC(logIndex uint64) error {
lc.Lock()
defer lc.Unlock()
addEntry, ok := lc.ourLogIndex[logIndex]
if !ok {
return fmt.Errorf("unable to find HTLC to cancel")
htlc := lc.localUpdateLog.lookup(logIndex)
if htlc == nil {
return fmt.Errorf("unable to find HTLC to fail")
}
htlc := addEntry.Value.(*PaymentDescriptor)
pd := &PaymentDescriptor{
Amount: htlc.Amount,
RHash: htlc.RHash,
ParentIndex: htlc.Index,
Index: lc.theirLogCounter,
EntryType: Cancel,
Index: lc.remoteUpdateLog.logIndex,
EntryType: Fail,
}
lc.theirUpdateLog.PushBack(pd)
lc.theirLogCounter++
lc.remoteUpdateLog.appendUpdate(pd)
return nil
}