lnd: switch over internal indexes to use the new Channel ID's

This commit is contained in:
Olaoluwa Osuntokun 2017-04-16 15:41:11 -07:00
parent d74d3fa0f3
commit 60c0cebfd5
No known key found for this signature in database
GPG Key ID: 9CC5B105D03521A2
2 changed files with 102 additions and 95 deletions

@ -108,15 +108,15 @@ func (b *boundedLinkChan) restoreSlots(numSlots uint32) {
// metadata such as the current available bandwidth of the link (in satoshis)
// which aid the switch in optimally forwarding HTLCs.
type link struct {
chanID lnwire.ChannelID
capacity btcutil.Amount
availableBandwidth int64 // atomic
*boundedLinkChan
peer *peer
chanPoint *wire.OutPoint
*boundedLinkChan
}
// htlcPacket is a wrapper around an lnwire message which adds, times out, or
@ -127,7 +127,7 @@ type htlcPacket struct {
dest chainhash.Hash
srcLink wire.OutPoint
srcLink lnwire.ChannelID
onion *sphinx.ProcessedPacket
msg lnwire.Message
@ -192,11 +192,11 @@ type htlcSwitch struct {
started int32 // atomic
shutdown int32 // atomic
// chanIndex maps a channel's outpoint to a link which contains
// additional information about the channel, and additionally houses a
// pointer to the peer managing the channel.
// chanIndex maps a channel's ID to a link which contains additional
// information about the channel, and additionally houses a pointer to
// the peer managing the channel.
chanIndexMtx sync.RWMutex
chanIndex map[wire.OutPoint]*link
chanIndex map[lnwire.ChannelID]*link
// interfaces maps a node's ID to the set of links (active channels) we
// currently have open with that peer.
@ -240,7 +240,7 @@ type htlcSwitch struct {
// newHtlcSwitch creates a new htlcSwitch.
func newHtlcSwitch() *htlcSwitch {
return &htlcSwitch{
chanIndex: make(map[wire.OutPoint]*link),
chanIndex: make(map[lnwire.ChannelID]*link),
interfaces: make(map[chainhash.Hash][]*link),
onionIndex: make(map[[ripemd160.Size]byte][]*link),
paymentCircuits: make(map[circuitKey]*paymentCircuit),
@ -359,7 +359,7 @@ out:
n := atomic.AddInt64(&link.availableBandwidth,
-int64(amt))
hswcLog.Tracef("Decrementing link %v bandwidth to %v",
link.chanPoint, n)
link.chanID, n)
continue out
}
@ -435,7 +435,7 @@ out:
hswcLog.Errorf("unable to forward HTLC "+
"link %v has insufficient "+
"capacity, have %v need %v",
clearLink[0].chanPoint, linkBandwidth,
clearLink[0].chanID, linkBandwidth,
int64(wireMsg.Amount))
pkt := &htlcPacket{
@ -462,8 +462,8 @@ out:
h.paymentCircuits[cKey] = circuit
hswcLog.Debugf("Creating onion circuit for %x: %v<->%v",
cKey[:], clearLink[0].chanPoint,
settleLink.chanPoint)
cKey[:], clearLink[0].chanID,
settleLink.chanID)
// With the circuit initiated, send the htlcPkt
// to the clearing link within the circuit to
@ -482,7 +482,7 @@ out:
n := atomic.AddInt64(&circuit.clear.availableBandwidth,
-int64(pkt.amt))
hswcLog.Tracef("Decrementing link %v bandwidth to %v",
circuit.clear.chanPoint, n)
circuit.clear.chanID, n)
deltaSatRecv += pkt.amt
@ -510,8 +510,8 @@ out:
hswcLog.Debugf("Closing completed onion "+
"circuit for %x: %v<->%v", rHash[:],
circuit.clear.chanPoint,
circuit.settle.chanPoint)
circuit.clear.chanID,
circuit.settle.chanID)
circuit.settle.sendAndRestore(&htlcPacket{
msg: wireMsg,
@ -525,7 +525,7 @@ out:
n := atomic.AddInt64(&circuit.settle.availableBandwidth,
int64(pkt.amt))
hswcLog.Tracef("Incrementing link %v bandwidth to %v",
circuit.settle.chanPoint, n)
circuit.settle.chanID, n)
deltaSatSent += pkt.amt
@ -556,7 +556,7 @@ out:
int64(pkt.amt))
hswcLog.Debugf("HTLC %x has been cancelled, "+
"incrementing link %v bandwidth to %v", pkt.payHash,
circuit.clear.chanPoint, n)
circuit.clear.chanID, n)
// With our link info updated, we now continue
// the error propagation by sending the
@ -636,11 +636,12 @@ out:
// adds the link to the existing set of links for the target interface.
func (h *htlcSwitch) handleRegisterLink(req *registerLinkMsg) {
chanPoint := req.linkInfo.ChannelPoint
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
newLink := &link{
capacity: req.linkInfo.Capacity,
availableBandwidth: int64(req.linkInfo.LocalBalance),
peer: req.peer,
chanPoint: chanPoint,
chanID: chanID,
}
// To ensure we never accidentally cause an HTLC overflow, we'll limit,
@ -655,7 +656,7 @@ func (h *htlcSwitch) handleRegisterLink(req *registerLinkMsg) {
// close them, update their link capacity, or possibly during multi-hop
// HTLC forwarding.
h.chanIndexMtx.Lock()
h.chanIndex[*chanPoint] = newLink
h.chanIndex[chanID] = newLink
h.chanIndexMtx.Unlock()
interfaceID := req.peer.lightningID
@ -687,8 +688,8 @@ func (h *htlcSwitch) handleRegisterLink(req *registerLinkMsg) {
// this link leaves the interface empty, then the interface entry itself is
// also deleted.
func (h *htlcSwitch) handleUnregisterLink(req *unregisterLinkMsg) {
hswcLog.Debugf("unregistering active link, interface=%v, chan_point=%v",
hex.EncodeToString(req.chanInterface[:]), req.chanPoint)
hswcLog.Debugf("unregistering active link, interface=%v, chan_id=%v",
hex.EncodeToString(req.chanInterface[:]), req.chanID)
chanInterface := req.chanInterface
@ -704,21 +705,21 @@ func (h *htlcSwitch) handleUnregisterLink(req *unregisterLinkMsg) {
// A request with a nil channel point indicates that all the current
// links for this channel should be cleared.
if req.chanPoint == nil {
if req.chanID == nil {
hswcLog.Debugf("purging all active links for interface %v",
hex.EncodeToString(chanInterface[:]))
for _, link := range links {
delete(h.chanIndex, *link.chanPoint)
delete(h.chanIndex, link.chanID)
}
links = nil
} else {
delete(h.chanIndex, *req.chanPoint)
delete(h.chanIndex, *req.chanID)
for i := 0; i < len(links); i++ {
chanLink := links[i]
if chanLink.chanPoint == req.chanPoint {
if chanLink.chanID == *req.chanID {
// We perform an in-place delete by sliding
// every element down one, then slicing off the
// last element. Additionally, we update the
@ -762,18 +763,20 @@ func (h *htlcSwitch) handleUnregisterLink(req *unregisterLinkMsg) {
// handleCloseLink sends a message to the peer responsible for the target
// channel point, instructing it to initiate a cooperative channel closure.
func (h *htlcSwitch) handleCloseLink(req *closeLinkReq) {
chanID := lnwire.NewChanIDFromOutPoint(req.chanPoint)
h.chanIndexMtx.RLock()
targetLink, ok := h.chanIndex[*req.chanPoint]
targetLink, ok := h.chanIndex[chanID]
h.chanIndexMtx.RUnlock()
if !ok {
req.err <- fmt.Errorf("channel point %v not found, or peer "+
req.err <- fmt.Errorf("channel %v not found, or peer "+
"offline", req.chanPoint)
return
}
hswcLog.Debugf("requesting interface %v to close link %v",
hex.EncodeToString(targetLink.peer.lightningID[:]), req.chanPoint)
hex.EncodeToString(targetLink.peer.lightningID[:]), chanID)
targetLink.peer.localCloseChanReqs <- req
// TODO(roasbeef): if type was CloseBreach initiate force closure with
@ -784,7 +787,7 @@ func (h *htlcSwitch) handleCloseLink(req *closeLinkReq) {
// channel's available bandwidth by the delta specified within the message.
func (h *htlcSwitch) handleLinkUpdate(req *linkInfoUpdateMsg) {
h.chanIndexMtx.RLock()
link, ok := h.chanIndex[*req.targetLink]
link, ok := h.chanIndex[req.targetLink]
h.chanIndexMtx.RUnlock()
if !ok {
hswcLog.Errorf("received link update for non-existent link: %v",
@ -828,7 +831,7 @@ func (h *htlcSwitch) RegisterLink(p *peer, linkInfo *channeldb.ChannelSnapshot,
// unregisterLinkMsg is a message which requests the active link be unregistered.
type unregisterLinkMsg struct {
chanInterface [32]byte
chanPoint *wire.OutPoint
chanID *lnwire.ChannelID
// remoteID is the identity public key of the node we're removing the
// link between. The public key is expected to be serialized in
@ -842,13 +845,15 @@ type unregisterLinkMsg struct {
// UnregisterLink requests the htlcSwitch to register the new active link. An
// unregistered link will no longer be considered a candidate to forward
// HTLCs.
func (h *htlcSwitch) UnregisterLink(remotePub *btcec.PublicKey, chanPoint *wire.OutPoint) {
func (h *htlcSwitch) UnregisterLink(remotePub *btcec.PublicKey,
chanID *lnwire.ChannelID) {
done := make(chan struct{}, 1)
rawPub := remotePub.SerializeCompressed()
h.linkControl <- &unregisterLinkMsg{
chanInterface: sha256.Sum256(rawPub),
chanPoint: chanPoint,
chanID: chanID,
remoteID: rawPub,
done: done,
}
@ -904,7 +909,7 @@ func (h *htlcSwitch) CloseLink(chanPoint *wire.OutPoint,
// linkInfoUpdateMsg encapsulates a request for the htlc switch to update the
// metadata related to the target link.
type linkInfoUpdateMsg struct {
targetLink *wire.OutPoint
targetLink lnwire.ChannelID
bandwidthDelta btcutil.Amount
}
@ -913,9 +918,9 @@ type linkInfoUpdateMsg struct {
// within the link by the passed satoshi delta. This function may be used when
// re-anchoring to boost the capacity of a channel, or once a peer settles an
// HTLC invoice.
func (h *htlcSwitch) UpdateLink(chanPoint *wire.OutPoint, delta btcutil.Amount) {
func (h *htlcSwitch) UpdateLink(chanID lnwire.ChannelID, delta btcutil.Amount) {
h.linkControl <- &linkInfoUpdateMsg{
targetLink: chanPoint,
targetLink: chanID,
bandwidthDelta: delta,
}
}

114
peer.go

@ -118,11 +118,11 @@ type peer struct {
// active channels. Channels are indexed into the map by the txid of
// the funding transaction which opened the channel.
activeChanMtx sync.RWMutex
activeChannels map[wire.OutPoint]*lnwallet.LightningChannel
activeChannels map[lnwire.ChannelID]*lnwallet.LightningChannel
chanSnapshotReqs chan *chanSnapshotReq
htlcManMtx sync.RWMutex
htlcManagers map[wire.OutPoint]chan lnwire.Message
htlcManagers map[lnwire.ChannelID]chan lnwire.Message
// newChannels is used by the fundingManager to send fully opened
// channels to the source peer which handled the funding workflow.
@ -176,8 +176,8 @@ func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server,
sendQueue: make(chan outgoinMsg, 1),
outgoingQueue: make(chan outgoinMsg, outgoingQueueLen),
activeChannels: make(map[wire.OutPoint]*lnwallet.LightningChannel),
htlcManagers: make(map[wire.OutPoint]chan lnwire.Message),
activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
htlcManagers: make(map[lnwire.ChannelID]chan lnwire.Message),
chanSnapshotReqs: make(chan *chanSnapshotReq),
newChannels: make(chan *newChannelMsg, 1),
@ -216,19 +216,17 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
continue
}
chanID := dbChan.ChanID
lnChan, err := lnwallet.NewLightningChannel(p.server.lnwallet.Signer,
p.server.chainNotifier, dbChan)
if err != nil {
return err
}
chanPoint := wire.OutPoint{
Hash: chanID.Hash,
Index: chanID.Index,
}
chanPoint := *dbChan.ChanID
chanID := lnwire.NewChanIDFromOutPoint(&chanPoint)
p.activeChanMtx.Lock()
p.activeChannels[chanPoint] = lnChan
p.activeChannels[chanID] = lnChan
p.activeChanMtx.Unlock()
peerLog.Infof("peerID(%v) loaded ChannelPoint(%v)", p.id, chanPoint)
@ -244,7 +242,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
upstreamLink := make(chan lnwire.Message, 10)
p.htlcManMtx.Lock()
p.htlcManagers[chanPoint] = upstreamLink
p.htlcManagers[chanID] = upstreamLink
p.htlcManMtx.Unlock()
p.wg.Add(1)
@ -421,7 +419,7 @@ out:
var (
isChanUpdate bool
targetChan wire.OutPoint
targetChan lnwire.ChannelID
)
switch msg := nextMsg.(type) {
@ -450,25 +448,25 @@ out:
case *lnwire.CloseRequest:
p.remoteCloseChanReqs <- msg
case *lnwire.ErrorGeneric:
p.server.fundingMgr.processErrorGeneric(msg, p.addr)
case *lnwire.Error:
p.server.fundingMgr.processFundingError(msg, p.addr)
// TODO(roasbeef): create ChanUpdater interface for the below
case *lnwire.UpdateAddHTLC:
isChanUpdate = true
targetChan = msg.ChannelPoint
targetChan = msg.ChanID
case *lnwire.UpdateFufillHTLC:
isChanUpdate = true
targetChan = msg.ChannelPoint
targetChan = msg.ChanID
case *lnwire.UpdateFailHTLC:
isChanUpdate = true
targetChan = msg.ChannelPoint
targetChan = msg.ChanID
case *lnwire.RevokeAndAck:
isChanUpdate = true
targetChan = msg.ChannelPoint
targetChan = msg.ChanID
case *lnwire.CommitSig:
isChanUpdate = true
targetChan = msg.ChannelPoint
targetChan = msg.ChanID
case *lnwire.ChannelUpdateAnnouncement,
*lnwire.ChannelAnnouncement,
@ -721,7 +719,8 @@ out:
select {
case req := <-p.chanSnapshotReqs:
p.activeChanMtx.RLock()
snapshots := make([]*channeldb.ChannelSnapshot, 0, len(p.activeChannels))
snapshots := make([]*channeldb.ChannelSnapshot, 0,
len(p.activeChannels))
for _, activeChan := range p.activeChannels {
snapshot := activeChan.StateSnapshot()
snapshots = append(snapshots, snapshot)
@ -730,10 +729,11 @@ out:
req.resp <- snapshots
case newChanReq := <-p.newChannels:
chanPoint := *newChanReq.channel.ChannelPoint()
chanPoint := newChanReq.channel.ChannelPoint()
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
p.activeChanMtx.Lock()
p.activeChannels[chanPoint] = newChanReq.channel
p.activeChannels[chanID] = newChanReq.channel
p.activeChanMtx.Unlock()
peerLog.Infof("New channel active ChannelPoint(%v) "+
@ -753,11 +753,12 @@ out:
// new channel.
upstreamLink := make(chan lnwire.Message, 10)
p.htlcManMtx.Lock()
p.htlcManagers[chanPoint] = upstreamLink
p.htlcManagers[chanID] = upstreamLink
p.htlcManMtx.Unlock()
p.wg.Add(1)
go p.htlcManager(newChanReq.channel, plexChan, downstreamLink, upstreamLink)
go p.htlcManager(newChanReq.channel, plexChan,
downstreamLink, upstreamLink)
close(newChanReq.done)
@ -802,7 +803,9 @@ func (p *peer) executeCooperativeClose(channel *lnwallet.LightningChannel) (*cha
if err != nil {
return nil, err
}
closeReq := lnwire.NewCloseRequest(*chanPoint, closeSig)
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
closeReq := lnwire.NewCloseRequest(chanID, closeSig)
p.queueMsg(closeReq, nil)
return txid, nil
@ -818,8 +821,10 @@ func (p *peer) handleLocalClose(req *closeLinkReq) {
closingTxid *chainhash.Hash
)
chanID := lnwire.NewChanIDFromOutPoint(req.chanPoint)
p.activeChanMtx.RLock()
channel := p.activeChannels[*req.chanPoint]
channel := p.activeChannels[chanID]
p.activeChanMtx.RUnlock()
switch req.CloseType {
@ -911,21 +916,17 @@ func (p *peer) handleLocalClose(req *closeLinkReq) {
// handleRemoteClose completes a request for cooperative channel closure
// initiated by the remote node.
func (p *peer) handleRemoteClose(req *lnwire.CloseRequest) {
chanPoint := req.ChannelPoint
key := wire.OutPoint{
Hash: chanPoint.Hash,
Index: chanPoint.Index,
}
p.activeChanMtx.RLock()
channel, ok := p.activeChannels[key]
channel, ok := p.activeChannels[req.ChanID]
p.activeChanMtx.RUnlock()
if !ok {
peerLog.Errorf("unable to close channel, ChannelPoint(%v) is "+
"unknown", key)
peerLog.Errorf("unable to close channel, ChannelID(%v) is "+
"unknown", req.ChanID)
return
}
chanPoint := channel.ChannelPoint()
// Now that we have their signature for the closure transaction, we
// can assemble the final closure transaction, complete with our
// signature.
@ -955,27 +956,26 @@ func (p *peer) handleRemoteClose(req *lnwire.CloseRequest) {
}
// TODO(roasbeef): also wait for confs before removing state
peerLog.Infof("ChannelPoint(%v) is now "+
"closed", key)
peerLog.Infof("ChannelPoint(%v) is now closed", chanPoint)
if err := wipeChannel(p, channel); err != nil {
peerLog.Errorf("unable to wipe channel: %v", err)
}
p.server.breachArbiter.settledContracts <- &req.ChannelPoint
p.server.breachArbiter.settledContracts <- chanPoint
}
// wipeChannel removes the passed channel from all indexes associated with the
// peer, and deletes the channel from the database.
func wipeChannel(p *peer, channel *lnwallet.LightningChannel) error {
chanID := channel.ChannelPoint()
chanID := lnwire.NewChanIDFromOutPoint(channel.ChannelPoint())
p.activeChanMtx.Lock()
delete(p.activeChannels, *chanID)
delete(p.activeChannels, chanID)
p.activeChanMtx.Unlock()
// Instruct the Htlc Switch to close this link as the channel is no
// longer active.
p.server.htlcSwitch.UnregisterLink(p.addr.IdentityKey, chanID)
p.server.htlcSwitch.UnregisterLink(p.addr.IdentityKey, &chanID)
// Additionally, close up "down stream" link for the htlcManager which
// has been assigned to this channel. This servers the link between the
@ -985,7 +985,7 @@ func wipeChannel(p *peer, channel *lnwallet.LightningChannel) error {
// If the channel can't be found in the map, then this channel has
// already been wiped.
htlcWireLink, ok := p.htlcManagers[*chanID]
htlcWireLink, ok := p.htlcManagers[chanID]
if !ok {
p.htlcManMtx.RUnlock()
return nil
@ -999,7 +999,7 @@ func wipeChannel(p *peer, channel *lnwallet.LightningChannel) error {
// goroutine should have exited gracefully due to the channel closure
// above.
p.htlcManMtx.RLock()
delete(p.htlcManagers, *chanID)
delete(p.htlcManagers, chanID)
p.htlcManMtx.RUnlock()
// Finally, we purge the channel's state from the database, leaving a
@ -1072,6 +1072,7 @@ type commitmentState struct {
channel *lnwallet.LightningChannel
chanPoint *wire.OutPoint
chanID lnwire.ChannelID
}
// htlcManager is the primary goroutine which drives a channel's commitment
@ -1104,9 +1105,11 @@ func (p *peer) htlcManager(channel *lnwallet.LightningChannel,
p.queueMsg(rev, nil)
}
chanPoint := channel.ChannelPoint()
state := &commitmentState{
channel: channel,
chanPoint: channel.ChannelPoint(),
chanPoint: chanPoint,
chanID: lnwire.NewChanIDFromOutPoint(chanPoint),
clearedHTCLs: make(map[uint64]*pendingPayment),
htlcsToSettle: make(map[uint64]*channeldb.Invoice),
htlcsToCancel: make(map[uint64]lnwire.FailCode),
@ -1253,7 +1256,7 @@ func (p *peer) handleDownStreamPkt(state *commitmentState, pkt *htlcPacket) {
// downstream channel, so we add the new HTLC
// to our local log, then update the commitment
// chains.
htlc.ChannelPoint = *state.chanPoint
htlc.ChanID = state.chanID
index, err := state.channel.AddHTLC(htlc)
if err != nil {
// TODO: possibly perform fallback/retry logic
@ -1272,7 +1275,7 @@ func (p *peer) handleDownStreamPkt(state *commitmentState, pkt *htlcPacket) {
msg: &lnwire.UpdateFailHTLC{
Reason: []byte{byte(0)},
},
srcLink: *state.chanPoint,
srcLink: state.chanID,
}
return
}
@ -1303,7 +1306,7 @@ func (p *peer) handleDownStreamPkt(state *commitmentState, pkt *htlcPacket) {
// With the HTLC settled, we'll need to populate the wire
// message to target the specific channel and HTLC to be
// cancelled.
htlc.ChannelPoint = *state.chanPoint
htlc.ChanID = state.chanID
htlc.ID = logIndex
// Then we send the HTLC settle message to the connected peer
@ -1324,7 +1327,7 @@ func (p *peer) handleDownStreamPkt(state *commitmentState, pkt *htlcPacket) {
// message to target the specific channel and HTLC to be
// cancelled. The "Reason" field will have already been set
// within the switch.
htlc.ChannelPoint = *state.chanPoint
htlc.ChanID = state.chanID
htlc.ID = logIndex
// Finally, we send the HTLC message to the peer which
@ -1559,7 +1562,7 @@ func (p *peer) handleUpstreamMsg(state *commitmentState, msg lnwire.Message) {
}
settleMsg := &lnwire.UpdateFufillHTLC{
ChannelPoint: *state.chanPoint,
ChanID: state.chanID,
ID: logIndex,
PaymentPreimage: preimage,
}
@ -1589,7 +1592,7 @@ func (p *peer) handleUpstreamMsg(state *commitmentState, msg lnwire.Message) {
}
cancelMsg := &lnwire.UpdateFailHTLC{
ChannelPoint: *state.chanPoint,
ChanID: state.chanID,
ID: logIndex,
Reason: []byte{byte(reason)},
}
@ -1619,7 +1622,7 @@ func (p *peer) handleUpstreamMsg(state *commitmentState, msg lnwire.Message) {
// Send this fully activated HTLC to the htlc
// switch to continue the chained clear/settle.
pkt, err := logEntryToHtlcPkt(*state.chanPoint,
pkt, err := logEntryToHtlcPkt(state.chanID,
htlc, onionPkt, reason)
if err != nil {
peerLog.Errorf("unable to make htlc pkt: %v",
@ -1640,7 +1643,7 @@ func (p *peer) handleUpstreamMsg(state *commitmentState, msg lnwire.Message) {
// payment bandwidth.
// TODO(roasbeef): ideally should wait for next state update.
if bandwidthUpdate != 0 {
p.server.htlcSwitch.UpdateLink(state.chanPoint,
p.server.htlcSwitch.UpdateLink(state.chanID,
bandwidthUpdate)
}
@ -1684,7 +1687,7 @@ func (p *peer) updateCommitTx(state *commitmentState) error {
}
commitSig := &lnwire.CommitSig{
ChannelPoint: *state.chanPoint,
ChanID: state.chanID,
CommitSig: parsedSig,
}
p.queueMsg(commitSig, nil)
@ -1707,8 +1710,7 @@ func (p *peer) updateCommitTx(state *commitmentState) error {
// log entry the corresponding htlcPacket with src/dest set along with the
// proper wire message. This helper method is provided in order to aid an
// htlcManager in forwarding packets to the htlcSwitch.
func logEntryToHtlcPkt(chanPoint wire.OutPoint,
pd *lnwallet.PaymentDescriptor,
func logEntryToHtlcPkt(chanID lnwire.ChannelID, pd *lnwallet.PaymentDescriptor,
onionPkt *sphinx.ProcessedPacket,
reason lnwire.FailCode) (*htlcPacket, error) {
@ -1750,7 +1752,7 @@ func logEntryToHtlcPkt(chanPoint wire.OutPoint,
pkt.amt = pd.Amount
pkt.msg = msg
pkt.srcLink = chanPoint
pkt.srcLink = chanID
pkt.onion = onionPkt
return pkt, nil