Merge pull request #3564 from joostjager/htlcswitch-logging
build+htlcswitch+lnwallet: add prefix logger
This commit is contained in:
commit
dbc0e88584
112
build/prefix_log.go
Normal file
112
build/prefix_log.go
Normal file
@ -0,0 +1,112 @@
|
||||
package build
|
||||
|
||||
import "github.com/btcsuite/btclog"
|
||||
|
||||
// PrefixLog is a pass-through logger that adds a prefix to every logged line.
|
||||
type PrefixLog struct {
|
||||
log btclog.Logger
|
||||
prefix string
|
||||
}
|
||||
|
||||
// NewPrefixLog instantiates a new prefixed logger.
|
||||
func NewPrefixLog(prefix string, log btclog.Logger) *PrefixLog {
|
||||
return &PrefixLog{
|
||||
prefix: prefix,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// addFormatPrefix prepends the prefix to a format string.
|
||||
func (p *PrefixLog) addFormatPrefix(s string) string {
|
||||
return p.prefix + " " + s
|
||||
}
|
||||
|
||||
// addArgsPrefix prepends the prefix to a list of arguments.
|
||||
func (p *PrefixLog) addArgsPrefix(args []interface{}) []interface{} {
|
||||
return append([]interface{}{p.prefix}, args...)
|
||||
}
|
||||
|
||||
// Tracef formats message according to format specifier and writes to to log
|
||||
// with LevelTrace.
|
||||
func (p *PrefixLog) Tracef(format string, params ...interface{}) {
|
||||
p.log.Tracef(p.addFormatPrefix(format), params...)
|
||||
}
|
||||
|
||||
// Debugf formats message according to format specifier and writes to log with
|
||||
// LevelDebug.
|
||||
func (p *PrefixLog) Debugf(format string, params ...interface{}) {
|
||||
p.log.Debugf(p.addFormatPrefix(format), params...)
|
||||
}
|
||||
|
||||
// Infof formats message according to format specifier and writes to log with
|
||||
// LevelInfo.
|
||||
func (p *PrefixLog) Infof(format string, params ...interface{}) {
|
||||
p.log.Infof(p.addFormatPrefix(format), params...)
|
||||
}
|
||||
|
||||
// Warnf formats message according to format specifier and writes to to log with
|
||||
// LevelWarn.
|
||||
func (p *PrefixLog) Warnf(format string, params ...interface{}) {
|
||||
p.log.Warnf(p.addFormatPrefix(format), params...)
|
||||
}
|
||||
|
||||
// Errorf formats message according to format specifier and writes to to log
|
||||
// with LevelError.
|
||||
func (p *PrefixLog) Errorf(format string, params ...interface{}) {
|
||||
p.log.Errorf(p.addFormatPrefix(format), params...)
|
||||
}
|
||||
|
||||
// Criticalf formats message according to format specifier and writes to log
|
||||
// with LevelCritical.
|
||||
func (p *PrefixLog) Criticalf(format string, params ...interface{}) {
|
||||
p.log.Criticalf(p.addFormatPrefix(format), params...)
|
||||
}
|
||||
|
||||
// Trace formats message using the default formats for its operands and writes
|
||||
// to log with LevelTrace.
|
||||
func (p *PrefixLog) Trace(v ...interface{}) {
|
||||
p.log.Trace(p.addArgsPrefix(v)...)
|
||||
}
|
||||
|
||||
// Debug formats message using the default formats for its operands and writes
|
||||
// to log with LevelDebug.
|
||||
func (p *PrefixLog) Debug(v ...interface{}) {
|
||||
p.log.Debug(p.addArgsPrefix(v)...)
|
||||
}
|
||||
|
||||
// Info formats message using the default formats for its operands and writes to
|
||||
// log with LevelInfo.
|
||||
func (p *PrefixLog) Info(v ...interface{}) {
|
||||
p.log.Info(p.addArgsPrefix(v)...)
|
||||
}
|
||||
|
||||
// Warn formats message using the default formats for its operands and writes to
|
||||
// log with LevelWarn.
|
||||
func (p *PrefixLog) Warn(v ...interface{}) {
|
||||
p.log.Warn(p.addArgsPrefix(v)...)
|
||||
}
|
||||
|
||||
// Error formats message using the default formats for its operands and writes
|
||||
// to log with LevelError.
|
||||
func (p *PrefixLog) Error(v ...interface{}) {
|
||||
p.log.Error(p.addArgsPrefix(v)...)
|
||||
}
|
||||
|
||||
// Critical formats message using the default formats for its operands and
|
||||
// writes to log with LevelCritical.
|
||||
func (p *PrefixLog) Critical(v ...interface{}) {
|
||||
p.log.Critical(p.addArgsPrefix(v)...)
|
||||
}
|
||||
|
||||
// Level returns the current logging level.
|
||||
func (p *PrefixLog) Level() btclog.Level {
|
||||
return p.log.Level()
|
||||
}
|
||||
|
||||
// SetLevel changes the logging level to the passed level.
|
||||
func (p *PrefixLog) SetLevel(level btclog.Level) {
|
||||
p.log.SetLevel(level)
|
||||
}
|
||||
|
||||
// Assert that PrefixLog fulfills the btclog.Logger interface.
|
||||
var _ btclog.Logger = &PrefixLog{}
|
@ -11,8 +11,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/go-errors/errors"
|
||||
"github.com/lightningnetwork/lnd/build"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/contractcourt"
|
||||
"github.com/lightningnetwork/lnd/htlcswitch/hodl"
|
||||
@ -378,6 +380,9 @@ type channelLink struct {
|
||||
// resolving those htlcs when we receive a message on hodlQueue.
|
||||
hodlMap map[channeldb.CircuitKey]hodlHtlc
|
||||
|
||||
// log is a link-specific logging instance.
|
||||
log btclog.Logger
|
||||
|
||||
wg sync.WaitGroup
|
||||
quit chan struct{}
|
||||
}
|
||||
@ -393,6 +398,8 @@ type hodlHtlc struct {
|
||||
func NewChannelLink(cfg ChannelLinkConfig,
|
||||
channel *lnwallet.LightningChannel) ChannelLink {
|
||||
|
||||
logPrefix := fmt.Sprintf("ChannelLink(%v):", channel.ShortChanID())
|
||||
|
||||
return &channelLink{
|
||||
cfg: cfg,
|
||||
channel: channel,
|
||||
@ -403,6 +410,7 @@ func NewChannelLink(cfg ChannelLinkConfig,
|
||||
htlcUpdates: make(chan *contractcourt.ContractUpdate),
|
||||
hodlMap: make(map[channeldb.CircuitKey]hodlHtlc),
|
||||
hodlQueue: queue.NewConcurrentQueue(10),
|
||||
log: build.NewPrefixLog(logPrefix, log),
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
@ -418,11 +426,11 @@ var _ ChannelLink = (*channelLink)(nil)
|
||||
func (l *channelLink) Start() error {
|
||||
if !atomic.CompareAndSwapInt32(&l.started, 0, 1) {
|
||||
err := errors.Errorf("channel link(%v): already started", l)
|
||||
log.Warn(err)
|
||||
l.log.Warn("already started")
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("ChannelLink(%v) is starting", l)
|
||||
l.log.Info("starting")
|
||||
|
||||
// If the config supplied watchtower client, ensure the channel is
|
||||
// registered before trying to use it during operation.
|
||||
@ -475,8 +483,7 @@ func (l *channelLink) Start() error {
|
||||
|
||||
err := l.cfg.UpdateContractSignals(signals)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to update signals for "+
|
||||
"ChannelLink(%v)", l)
|
||||
l.log.Errorf("unable to update signals")
|
||||
}
|
||||
}()
|
||||
}
|
||||
@ -495,11 +502,11 @@ func (l *channelLink) Start() error {
|
||||
// NOTE: Part of the ChannelLink interface.
|
||||
func (l *channelLink) Stop() {
|
||||
if !atomic.CompareAndSwapInt32(&l.shutdown, 0, 1) {
|
||||
log.Warnf("channel link(%v): already stopped", l)
|
||||
l.log.Warn("already stopped")
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("ChannelLink(%v) is stopping", l)
|
||||
l.log.Info("stopping")
|
||||
|
||||
// As the link is stopping, we are no longer interested in hodl events
|
||||
// coming from the invoice registry.
|
||||
@ -524,7 +531,7 @@ func (l *channelLink) Stop() {
|
||||
// we had learned them at some point.
|
||||
err := l.cfg.PreimageCache.AddPreimages(l.uncommittedPreimages...)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to add preimages=%v to cache: %v",
|
||||
l.log.Errorf("unable to add preimages=%v to cache: %v",
|
||||
l.uncommittedPreimages, err)
|
||||
}
|
||||
}
|
||||
@ -571,8 +578,8 @@ func (l *channelLink) sampleNetworkFee() (lnwallet.SatPerKWeight, error) {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
log.Debugf("ChannelLink(%v): sampled fee rate for 3 block conf: %v "+
|
||||
"sat/kw", l, int64(feePerKw))
|
||||
l.log.Debugf("sampled fee rate for 3 block conf: %v sat/kw",
|
||||
int64(feePerKw))
|
||||
|
||||
return feePerKw, nil
|
||||
}
|
||||
@ -603,8 +610,7 @@ func shouldAdjustCommitFee(netFee, chanFee lnwallet.SatPerKWeight) bool {
|
||||
// flow. We'll compare out commitment chains with the remote party, and re-send
|
||||
// either a danging commit signature, a revocation, or both.
|
||||
func (l *channelLink) syncChanStates() error {
|
||||
log.Infof("Attempting to re-resynchronize ChannelPoint(%v)",
|
||||
l.channel.ChannelPoint())
|
||||
l.log.Info("attempting to re-resynchronize")
|
||||
|
||||
// First, we'll generate our ChanSync message to send to the other
|
||||
// side. Based on this message, the remote party will decide if they
|
||||
@ -643,9 +649,7 @@ func (l *channelLink) syncChanStates() error {
|
||||
localChanSyncMsg.NextLocalCommitHeight == 1 &&
|
||||
!l.channel.IsPending() {
|
||||
|
||||
log.Infof("ChannelPoint(%v): resending "+
|
||||
"FundingLocked message to peer",
|
||||
l.channel.ChannelPoint())
|
||||
l.log.Infof("resending FundingLocked message to peer")
|
||||
|
||||
nextRevocation, err := l.channel.NextRevocationKey()
|
||||
if err != nil {
|
||||
@ -664,8 +668,7 @@ func (l *channelLink) syncChanStates() error {
|
||||
}
|
||||
|
||||
// In any case, we'll then process their ChanSync message.
|
||||
log.Infof("Received re-establishment message from remote side "+
|
||||
"for channel(%v)", l.channel.ChannelPoint())
|
||||
l.log.Info("received re-establishment message from remote side")
|
||||
|
||||
var (
|
||||
openedCircuits []CircuitKey
|
||||
@ -694,9 +697,8 @@ func (l *channelLink) syncChanStates() error {
|
||||
}
|
||||
|
||||
if len(msgsToReSend) > 0 {
|
||||
log.Infof("Sending %v updates to synchronize the "+
|
||||
"state for ChannelPoint(%v)", len(msgsToReSend),
|
||||
l.channel.ChannelPoint())
|
||||
l.log.Infof("sending %v updates to synchronize the "+
|
||||
"state", len(msgsToReSend))
|
||||
}
|
||||
|
||||
// If we have any messages to retransmit, we'll do so
|
||||
@ -724,7 +726,7 @@ func (l *channelLink) resolveFwdPkgs() error {
|
||||
return err
|
||||
}
|
||||
|
||||
l.debugf("loaded %d fwd pks", len(fwdPkgs))
|
||||
l.log.Debugf("loaded %d fwd pks", len(fwdPkgs))
|
||||
|
||||
var needUpdate bool
|
||||
for _, fwdPkg := range fwdPkgs {
|
||||
@ -751,13 +753,13 @@ func (l *channelLink) resolveFwdPkgs() error {
|
||||
func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) (bool, error) {
|
||||
// Remove any completed packages to clear up space.
|
||||
if fwdPkg.State == channeldb.FwdStateCompleted {
|
||||
l.debugf("removing completed fwd pkg for height=%d",
|
||||
l.log.Debugf("removing completed fwd pkg for height=%d",
|
||||
fwdPkg.Height)
|
||||
|
||||
err := l.channel.RemoveFwdPkg(fwdPkg.Height)
|
||||
if err != nil {
|
||||
l.errorf("unable to remove fwd pkg for height=%d: %v",
|
||||
fwdPkg.Height, err)
|
||||
l.log.Errorf("unable to remove fwd pkg for height=%d: "+
|
||||
"%v", fwdPkg.Height, err)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
@ -775,7 +777,7 @@ func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) (bool, error) {
|
||||
fwdPkg.Source, fwdPkg.Height, fwdPkg.SettleFails,
|
||||
)
|
||||
if err != nil {
|
||||
l.errorf("Unable to process remote log updates: %v",
|
||||
l.log.Errorf("unable to process remote log updates: %v",
|
||||
err)
|
||||
return false, err
|
||||
}
|
||||
@ -792,7 +794,7 @@ func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) (bool, error) {
|
||||
fwdPkg.Source, fwdPkg.Height, fwdPkg.Adds,
|
||||
)
|
||||
if err != nil {
|
||||
l.errorf("Unable to process remote log updates: %v",
|
||||
l.log.Errorf("unable to process remote log updates: %v",
|
||||
err)
|
||||
return false, err
|
||||
}
|
||||
@ -827,7 +829,8 @@ func (l *channelLink) fwdPkgGarbager() {
|
||||
case <-l.cfg.FwdPkgGCTicker.Ticks():
|
||||
fwdPkgs, err := l.channel.LoadFwdPkgs()
|
||||
if err != nil {
|
||||
l.warnf("unable to load fwdpkgs for gc: %v", err)
|
||||
l.log.Warnf("unable to load fwdpkgs for gc: %v",
|
||||
err)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -839,7 +842,7 @@ func (l *channelLink) fwdPkgGarbager() {
|
||||
|
||||
err = l.channel.RemoveFwdPkg(fwdPkg.Height)
|
||||
if err != nil {
|
||||
l.warnf("unable to remove fwd pkg "+
|
||||
l.log.Warnf("unable to remove fwd pkg "+
|
||||
"for height=%d: %v",
|
||||
fwdPkg.Height, err)
|
||||
}
|
||||
@ -864,11 +867,10 @@ func (l *channelLink) htlcManager() {
|
||||
defer func() {
|
||||
l.cfg.BatchTicker.Stop()
|
||||
l.wg.Done()
|
||||
log.Infof("ChannelLink(%v) has exited", l)
|
||||
l.log.Infof("exited")
|
||||
}()
|
||||
|
||||
log.Infof("HTLC manager for ChannelPoint(%v) started, "+
|
||||
"bandwidth=%v", l.channel.ChannelPoint(), l.Bandwidth())
|
||||
l.log.Infof("HTLC manager started, bandwidth=%v", l.Bandwidth())
|
||||
|
||||
// TODO(roasbeef): need to call wipe chan whenever D/C?
|
||||
|
||||
@ -879,14 +881,14 @@ func (l *channelLink) htlcManager() {
|
||||
if l.cfg.SyncStates {
|
||||
err := l.syncChanStates()
|
||||
if err != nil {
|
||||
log.Warnf("Error when syncing channel states: %v", err)
|
||||
l.log.Warnf("error when syncing channel states: %v", err)
|
||||
|
||||
errDataLoss, localDataLoss :=
|
||||
err.(*lnwallet.ErrCommitSyncLocalDataLoss)
|
||||
|
||||
switch {
|
||||
case err == ErrLinkShuttingDown:
|
||||
log.Debugf("unable to sync channel states, " +
|
||||
l.log.Debugf("unable to sync channel states, " +
|
||||
"link is shutting down")
|
||||
return
|
||||
|
||||
@ -934,7 +936,7 @@ func (l *channelLink) htlcManager() {
|
||||
errDataLoss.CommitPoint,
|
||||
)
|
||||
if err != nil {
|
||||
log.Errorf("Unable to mark channel "+
|
||||
l.log.Errorf("unable to mark channel "+
|
||||
"data loss: %v", err)
|
||||
}
|
||||
|
||||
@ -945,7 +947,7 @@ func (l *channelLink) htlcManager() {
|
||||
// cases where this error is returned?
|
||||
case err == lnwallet.ErrCannotSyncCommitChains:
|
||||
if err := l.channel.MarkBorked(); err != nil {
|
||||
log.Errorf("Unable to mark channel "+
|
||||
l.log.Errorf("unable to mark channel "+
|
||||
"borked: %v", err)
|
||||
}
|
||||
|
||||
@ -1008,7 +1010,7 @@ out:
|
||||
// We must always check if we failed at some point processing
|
||||
// the last update before processing the next.
|
||||
if l.failed {
|
||||
l.errorf("link failed, exiting htlcManager")
|
||||
l.log.Errorf("link failed, exiting htlcManager")
|
||||
break out
|
||||
}
|
||||
|
||||
@ -1036,7 +1038,8 @@ out:
|
||||
// blocks.
|
||||
netFee, err := l.sampleNetworkFee()
|
||||
if err != nil {
|
||||
log.Errorf("unable to sample network fee: %v", err)
|
||||
l.log.Errorf("unable to sample network fee: %v",
|
||||
err)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -1055,7 +1058,8 @@ out:
|
||||
// If we do, then we'll send a new UpdateFee message to
|
||||
// the remote party, to be locked in with a new update.
|
||||
if err := l.updateChannelFee(newCommitFee); err != nil {
|
||||
log.Errorf("unable to update fee rate: %v", err)
|
||||
l.log.Errorf("unable to update fee rate: %v",
|
||||
err)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -1066,15 +1070,15 @@ out:
|
||||
//
|
||||
// TODO(roasbeef): add force closure? also breach?
|
||||
case <-l.cfg.ChainEvents.RemoteUnilateralClosure:
|
||||
log.Warnf("Remote peer has closed ChannelPoint(%v) on-chain",
|
||||
l.channel.ChannelPoint())
|
||||
l.log.Warnf("remote peer has closed on-chain")
|
||||
|
||||
// TODO(roasbeef): remove all together
|
||||
go func() {
|
||||
chanPoint := l.channel.ChannelPoint()
|
||||
err := l.cfg.Peer.WipeChannel(chanPoint)
|
||||
if err != nil {
|
||||
log.Errorf("unable to wipe channel %v", err)
|
||||
l.log.Errorf("unable to wipe channel "+
|
||||
"%v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -1121,7 +1125,7 @@ out:
|
||||
// to continue propagating within the network.
|
||||
case packet := <-l.overflowQueue.outgoingPkts:
|
||||
msg := packet.htlc.(*lnwire.UpdateAddHTLC)
|
||||
log.Tracef("Reprocessing downstream add update "+
|
||||
l.log.Tracef("reprocessing downstream add update "+
|
||||
"with payment hash(%x)", msg.PaymentHash[:])
|
||||
|
||||
l.handleDownStreamPkt(packet, true)
|
||||
@ -1136,7 +1140,7 @@ out:
|
||||
// failed, then we'll free up a new slot.
|
||||
htlc, ok := pkt.htlc.(*lnwire.UpdateAddHTLC)
|
||||
if ok && l.overflowQueue.Length() != 0 {
|
||||
log.Infof("Downstream htlc add update with "+
|
||||
l.log.Infof("downstream htlc add update with "+
|
||||
"payment hash(%x) have been added to "+
|
||||
"reprocessing queue, batch_size=%v",
|
||||
htlc.PaymentHash[:],
|
||||
@ -1224,7 +1228,7 @@ func (l *channelLink) processHodlEvent(hodlEvent invoices.HodlEvent,
|
||||
|
||||
// Determine required action for the resolution.
|
||||
if hodlEvent.Preimage != nil {
|
||||
l.debugf("Received hodl settle event for %v", circuitKey)
|
||||
l.log.Debugf("received hodl settle event for %v", circuitKey)
|
||||
|
||||
return l.settleHTLC(
|
||||
*hodlEvent.Preimage, htlc.pd.HtlcIndex,
|
||||
@ -1232,7 +1236,7 @@ func (l *channelLink) processHodlEvent(hodlEvent invoices.HodlEvent,
|
||||
)
|
||||
}
|
||||
|
||||
l.debugf("Received hodl cancel event for %v", circuitKey)
|
||||
l.log.Debugf("received hodl cancel event for %v", circuitKey)
|
||||
|
||||
// In case of a cancel, always return
|
||||
// incorrect_or_unknown_payment_details in order to avoid leaking info.
|
||||
@ -1270,7 +1274,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
// arbitrary delays between the switch adding an ADD to the
|
||||
// mailbox, and the HTLC being added to the commitment state.
|
||||
if l.cfg.HodlMask.Active(hodl.AddOutgoing) {
|
||||
l.warnf(hodl.AddOutgoing.Warning())
|
||||
l.log.Warnf(hodl.AddOutgoing.Warning())
|
||||
l.mailBox.AckPacket(pkt.inKey())
|
||||
return
|
||||
}
|
||||
@ -1287,7 +1291,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
// The channels spare bandwidth is fully allocated, so
|
||||
// we'll put this HTLC into the overflow queue.
|
||||
case lnwallet.ErrMaxHTLCNumber:
|
||||
l.infof("Downstream htlc add update with "+
|
||||
l.log.Infof("downstream htlc add update with "+
|
||||
"payment hash(%x) have been added to "+
|
||||
"reprocessing queue, batch: %v",
|
||||
htlc.PaymentHash[:],
|
||||
@ -1300,7 +1304,8 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
// machine, as a result, we'll signal the switch to
|
||||
// cancel the pending payment.
|
||||
default:
|
||||
l.warnf("Unable to handle downstream add HTLC: %v", err)
|
||||
l.log.Warnf("unable to handle downstream add "+
|
||||
"HTLC: %v", err)
|
||||
|
||||
var (
|
||||
localFailure = false
|
||||
@ -1325,7 +1330,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
var b bytes.Buffer
|
||||
err := lnwire.EncodeFailure(&b, failure, 0)
|
||||
if err != nil {
|
||||
l.errorf("unable to encode failure: %v", err)
|
||||
l.log.Errorf("unable to encode failure: %v", err)
|
||||
l.mailBox.AckPacket(pkt.inKey())
|
||||
return
|
||||
}
|
||||
@ -1335,7 +1340,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
var err error
|
||||
reason, err = pkt.obfuscator.EncryptFirstHop(failure)
|
||||
if err != nil {
|
||||
l.errorf("unable to obfuscate error: %v", err)
|
||||
l.log.Errorf("unable to obfuscate error: %v", err)
|
||||
l.mailBox.AckPacket(pkt.inKey())
|
||||
return
|
||||
}
|
||||
@ -1370,7 +1375,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
}
|
||||
}
|
||||
|
||||
l.tracef("Received downstream htlc: payment_hash=%x, "+
|
||||
l.log.Tracef("received downstream htlc: payment_hash=%x, "+
|
||||
"local_log_index=%v, batch_size=%v",
|
||||
htlc.PaymentHash[:], index, l.batchCounter+1)
|
||||
|
||||
@ -1378,7 +1383,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
pkt.outgoingHTLCID = index
|
||||
htlc.ID = index
|
||||
|
||||
l.debugf("Queueing keystone of ADD open circuit: %s->%s",
|
||||
l.log.Debugf("queueing keystone of ADD open circuit: %s->%s",
|
||||
pkt.inKey(), pkt.outKey())
|
||||
|
||||
l.openedCircuits = append(l.openedCircuits, pkt.inKey())
|
||||
@ -1392,7 +1397,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
// SETTLE to the mailbox, and the HTLC being added to the
|
||||
// commitment state.
|
||||
if l.cfg.HodlMask.Active(hodl.SettleOutgoing) {
|
||||
l.warnf(hodl.SettleOutgoing.Warning())
|
||||
l.log.Warnf(hodl.SettleOutgoing.Warning())
|
||||
l.mailBox.AckPacket(pkt.inKey())
|
||||
return
|
||||
}
|
||||
@ -1409,7 +1414,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
&inKey,
|
||||
)
|
||||
if err != nil {
|
||||
l.errorf("unable to settle incoming HTLC for "+
|
||||
l.log.Errorf("unable to settle incoming HTLC for "+
|
||||
"circuit-key=%v: %v", inKey, err)
|
||||
|
||||
// If the HTLC index for Settle response was not known
|
||||
@ -1428,8 +1433,8 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
return
|
||||
}
|
||||
|
||||
l.debugf("Queueing removal of SETTLE closed circuit: %s->%s",
|
||||
pkt.inKey(), pkt.outKey())
|
||||
l.log.Debugf("queueing removal of SETTLE closed circuit: "+
|
||||
"%s->%s", pkt.inKey(), pkt.outKey())
|
||||
|
||||
l.closedCircuits = append(l.closedCircuits, pkt.inKey())
|
||||
|
||||
@ -1450,7 +1455,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
// the mailbox, and the HTLC being added to the commitment
|
||||
// state.
|
||||
if l.cfg.HodlMask.Active(hodl.FailOutgoing) {
|
||||
l.warnf(hodl.FailOutgoing.Warning())
|
||||
l.log.Warnf(hodl.FailOutgoing.Warning())
|
||||
l.mailBox.AckPacket(pkt.inKey())
|
||||
return
|
||||
}
|
||||
@ -1466,7 +1471,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
&inKey,
|
||||
)
|
||||
if err != nil {
|
||||
l.errorf("unable to cancel incoming HTLC for "+
|
||||
l.log.Errorf("unable to cancel incoming HTLC for "+
|
||||
"circuit-key=%v: %v", inKey, err)
|
||||
|
||||
// If the HTLC index for Fail response was not known to
|
||||
@ -1485,7 +1490,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
return
|
||||
}
|
||||
|
||||
l.debugf("Queueing removal of FAIL closed circuit: %s->%s",
|
||||
l.log.Debugf("queueing removal of FAIL closed circuit: %s->%s",
|
||||
pkt.inKey(), pkt.outKey())
|
||||
|
||||
l.closedCircuits = append(l.closedCircuits, pkt.inKey())
|
||||
@ -1523,13 +1528,13 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
|
||||
func (l *channelLink) cleanupSpuriousResponse(pkt *htlcPacket) {
|
||||
inKey := pkt.inKey()
|
||||
|
||||
l.debugf("Cleaning up spurious response for incoming circuit-key=%v",
|
||||
inKey)
|
||||
l.log.Debugf("cleaning up spurious response for incoming "+
|
||||
"circuit-key=%v", inKey)
|
||||
|
||||
// If the htlc packet doesn't have a source reference, it is unsafe to
|
||||
// proceed, as skipping this ack may cause the htlc to be reforwarded.
|
||||
if pkt.sourceRef == nil {
|
||||
l.errorf("uanble to cleanup response for incoming "+
|
||||
l.log.Errorf("uanble to cleanup response for incoming "+
|
||||
"circuit-key=%v, does not contain source reference",
|
||||
inKey)
|
||||
return
|
||||
@ -1540,7 +1545,7 @@ func (l *channelLink) cleanupSpuriousResponse(pkt *htlcPacket) {
|
||||
// of the incoming HTLC belonging to this link.
|
||||
err := l.channel.AckAddHtlcs(*pkt.sourceRef)
|
||||
if err != nil {
|
||||
l.errorf("unable to ack AddRef for incoming "+
|
||||
l.log.Errorf("unable to ack AddRef for incoming "+
|
||||
"circuit-key=%v: %v", inKey, err)
|
||||
|
||||
// If this operation failed, it is unsafe to attempt removal of
|
||||
@ -1563,19 +1568,19 @@ func (l *channelLink) cleanupSpuriousResponse(pkt *htlcPacket) {
|
||||
if pkt.destRef != nil {
|
||||
err := l.channel.AckSettleFails(*pkt.destRef)
|
||||
if err != nil {
|
||||
l.errorf("unable to ack SettleFailRef "+
|
||||
l.log.Errorf("unable to ack SettleFailRef "+
|
||||
"for incoming circuit-key=%v: %v",
|
||||
inKey, err)
|
||||
}
|
||||
}
|
||||
|
||||
l.debugf("Deleting circuit for incoming circuit-key=%x", inKey)
|
||||
l.log.Debugf("deleting circuit for incoming circuit-key=%x", inKey)
|
||||
|
||||
// With all known references acked, we can now safely delete the circuit
|
||||
// from the switch's circuit map, as the state is no longer needed.
|
||||
err = l.cfg.Circuits.DeleteCircuits(inKey)
|
||||
if err != nil {
|
||||
l.errorf("unable to delete circuit for "+
|
||||
l.log.Errorf("unable to delete circuit for "+
|
||||
"circuit-key=%v: %v", inKey, err)
|
||||
}
|
||||
}
|
||||
@ -1597,7 +1602,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
|
||||
return
|
||||
}
|
||||
|
||||
l.tracef("Receive upstream htlc with payment hash(%x), "+
|
||||
l.log.Tracef("receive upstream htlc with payment hash(%x), "+
|
||||
"assigning index: %v", msg.PaymentHash[:], index)
|
||||
|
||||
case *lnwire.UpdateFulfillHTLC:
|
||||
@ -1650,7 +1655,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
|
||||
OnionSHA256: msg.ShaOnionBlob,
|
||||
}
|
||||
default:
|
||||
log.Warnf("Unexpected failure code received in "+
|
||||
l.log.Warnf("unexpected failure code received in "+
|
||||
"UpdateFailMailformedHTLC: %v", msg.FailureCode)
|
||||
|
||||
// We don't just pass back the error we received from
|
||||
@ -1671,7 +1676,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
|
||||
// form.
|
||||
var b bytes.Buffer
|
||||
if err := lnwire.EncodeFailure(&b, failure, 0); err != nil {
|
||||
l.errorf("unable to encode malformed error: %v", err)
|
||||
l.log.Errorf("unable to encode malformed error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -1758,7 +1763,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
|
||||
// state.
|
||||
nextRevocation, currentHtlcs, err := l.channel.RevokeCurrentCommitment()
|
||||
if err != nil {
|
||||
log.Errorf("unable to revoke commitment: %v", err)
|
||||
l.log.Errorf("unable to revoke commitment: %v", err)
|
||||
return
|
||||
}
|
||||
l.cfg.Peer.SendMessage(false, nextRevocation)
|
||||
@ -1894,8 +1899,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
|
||||
"ChannelPoint(%v): received error from peer: %v",
|
||||
l.channel.ChannelPoint(), msg.Error())
|
||||
default:
|
||||
log.Warnf("ChannelPoint(%v): received unknown message of type %T",
|
||||
l.channel.ChannelPoint(), msg)
|
||||
l.log.Warnf("received unknown message of type %T", msg)
|
||||
}
|
||||
|
||||
}
|
||||
@ -1919,7 +1923,7 @@ func (l *channelLink) ackDownStreamPackets() error {
|
||||
continue
|
||||
}
|
||||
|
||||
l.debugf("removing Add packet %s from mailbox", inKey)
|
||||
l.log.Debugf("removing Add packet %s from mailbox", inKey)
|
||||
l.mailBox.AckPacket(inKey)
|
||||
}
|
||||
|
||||
@ -1933,7 +1937,7 @@ func (l *channelLink) ackDownStreamPackets() error {
|
||||
// Successful deletion.
|
||||
|
||||
default:
|
||||
l.errorf("unable to delete %d circuits: %v",
|
||||
l.log.Errorf("unable to delete %d circuits: %v",
|
||||
len(l.closedCircuits), err)
|
||||
return err
|
||||
}
|
||||
@ -1944,7 +1948,8 @@ func (l *channelLink) ackDownStreamPackets() error {
|
||||
// the circuits must have been removed at some point, so it is now safe
|
||||
// to un-queue the corresponding Settle/Fails.
|
||||
for _, inKey := range l.closedCircuits {
|
||||
l.debugf("removing Fail/Settle packet %s from mailbox", inKey)
|
||||
l.log.Debugf("removing Fail/Settle packet %s from mailbox",
|
||||
inKey)
|
||||
l.mailBox.AckPacket(inKey)
|
||||
}
|
||||
|
||||
@ -1976,14 +1981,14 @@ func (l *channelLink) updateCommitTx() error {
|
||||
// permits testing of either the switch or link's ability to trim
|
||||
// circuits that have been opened, but unsuccessfully committed.
|
||||
if l.cfg.HodlMask.Active(hodl.Commit) {
|
||||
l.warnf(hodl.Commit.Warning())
|
||||
l.log.Warnf(hodl.Commit.Warning())
|
||||
return nil
|
||||
}
|
||||
|
||||
theirCommitSig, htlcSigs, pendingHTLCs, err := l.channel.SignNextCommitment()
|
||||
if err == lnwallet.ErrNoWindow {
|
||||
l.tracef("revocation window exhausted, unable to send: %v, "+
|
||||
"dangling_opens=%v, dangling_closes%v",
|
||||
l.log.Tracef("revocation window exhausted, unable to send: "+
|
||||
"%v, dangling_opens=%v, dangling_closes%v",
|
||||
l.batchCounter, newLogClosure(func() string {
|
||||
return spew.Sdump(l.openedCircuits)
|
||||
}),
|
||||
@ -2077,14 +2082,14 @@ func (l *channelLink) UpdateShortChanID() (lnwire.ShortChannelID, error) {
|
||||
// short channel ID.
|
||||
err := l.channel.State().RefreshShortChanID()
|
||||
if err != nil {
|
||||
l.errorf("unable to refresh short_chan_id for chan_id=%v: %v",
|
||||
chanID, err)
|
||||
l.log.Errorf("unable to refresh short_chan_id for chan_id=%v: "+
|
||||
"%v", chanID, err)
|
||||
return hop.Source, err
|
||||
}
|
||||
|
||||
sid := l.channel.ShortChanID()
|
||||
|
||||
l.infof("Updating to short_chan_id=%v for chan_id=%v", sid, chanID)
|
||||
l.log.Infof("updating to short_chan_id=%v for chan_id=%v", sid, chanID)
|
||||
|
||||
l.Lock()
|
||||
l.shortChanID = sid
|
||||
@ -2096,8 +2101,7 @@ func (l *channelLink) UpdateShortChanID() (lnwire.ShortChannelID, error) {
|
||||
ShortChanID: sid,
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("Unable to update signals for "+
|
||||
"ChannelLink(%v)", l)
|
||||
l.log.Errorf("unable to update signals")
|
||||
}
|
||||
}()
|
||||
|
||||
@ -2206,8 +2210,9 @@ func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte,
|
||||
// any case, we'll cancel this HTLC.
|
||||
actualFee := incomingHtlcAmt - amtToForward
|
||||
if incomingHtlcAmt < amtToForward || actualFee < expectedFee {
|
||||
l.errorf("outgoing htlc(%x) has insufficient fee: expected %v, "+
|
||||
"got %v", payHash[:], int64(expectedFee), int64(actualFee))
|
||||
l.log.Errorf("outgoing htlc(%x) has insufficient fee: "+
|
||||
"expected %v, got %v",
|
||||
payHash[:], int64(expectedFee), int64(actualFee))
|
||||
|
||||
// As part of the returned error, we'll send our latest routing
|
||||
// policy so the sending node obtains the most up to date data.
|
||||
@ -2230,7 +2235,7 @@ func (l *channelLink) HtlcSatifiesPolicy(payHash [32]byte,
|
||||
// sender messed up, or an intermediate node tampered with the HTLC.
|
||||
timeDelta := policy.TimeLockDelta
|
||||
if incomingTimeout < outgoingTimeout+timeDelta {
|
||||
l.errorf("Incoming htlc(%x) has incorrect time-lock value: "+
|
||||
l.log.Errorf("incoming htlc(%x) has incorrect time-lock value: "+
|
||||
"expected at least %v block delta, got %v block delta",
|
||||
payHash[:], timeDelta, incomingTimeout-outgoingTimeout)
|
||||
|
||||
@ -2282,7 +2287,7 @@ func (l *channelLink) htlcSatifiesPolicyOutgoing(policy ForwardingPolicy,
|
||||
// too small for the next hop. If so, then we'll cancel the HTLC
|
||||
// directly.
|
||||
if amt < policy.MinHTLC {
|
||||
l.errorf("outgoing htlc(%x) is too small: min_htlc=%v, "+
|
||||
l.log.Errorf("outgoing htlc(%x) is too small: min_htlc=%v, "+
|
||||
"htlc_value=%v", payHash[:], policy.MinHTLC,
|
||||
amt)
|
||||
|
||||
@ -2304,7 +2309,7 @@ func (l *channelLink) htlcSatifiesPolicyOutgoing(policy ForwardingPolicy,
|
||||
// Next, ensure that the passed HTLC isn't too large. If so, we'll cancel
|
||||
// the HTLC directly.
|
||||
if policy.MaxHTLC != 0 && amt > policy.MaxHTLC {
|
||||
l.errorf("outgoing htlc(%x) is too large: max_htlc=%v, "+
|
||||
l.log.Errorf("outgoing htlc(%x) is too large: max_htlc=%v, "+
|
||||
"htlc_value=%v", payHash[:], policy.MaxHTLC, amt)
|
||||
|
||||
// As part of the returned error, we'll send our latest routing policy
|
||||
@ -2324,7 +2329,7 @@ func (l *channelLink) htlcSatifiesPolicyOutgoing(policy ForwardingPolicy,
|
||||
// future, so we'll reject an HTLC if the outgoing expiration time is
|
||||
// too close to the current height.
|
||||
if timeout <= heightNow+l.cfg.OutgoingCltvRejectDelta {
|
||||
l.errorf("htlc(%x) has an expiry that's too soon: "+
|
||||
l.log.Errorf("htlc(%x) has an expiry that's too soon: "+
|
||||
"outgoing_expiry=%v, best_height=%v", payHash[:],
|
||||
timeout, heightNow)
|
||||
|
||||
@ -2343,8 +2348,8 @@ func (l *channelLink) htlcSatifiesPolicyOutgoing(policy ForwardingPolicy,
|
||||
|
||||
// Check absolute max delta.
|
||||
if timeout > l.cfg.MaxOutgoingCltvExpiry+heightNow {
|
||||
l.errorf("outgoing htlc(%x) has a time lock too far in the "+
|
||||
"future: got %v, but maximum is %v", payHash[:],
|
||||
l.log.Errorf("outgoing htlc(%x) has a time lock too far in "+
|
||||
"the future: got %v, but maximum is %v", payHash[:],
|
||||
timeout-heightNow, l.cfg.MaxOutgoingCltvExpiry)
|
||||
|
||||
return &lnwire.FailExpiryTooFar{}
|
||||
@ -2377,7 +2382,7 @@ func (l *channelLink) String() string {
|
||||
//
|
||||
// NOTE: Part of the ChannelLink interface.
|
||||
func (l *channelLink) HandleSwitchPacket(pkt *htlcPacket) error {
|
||||
l.tracef("received switch packet inkey=%v, outkey=%v",
|
||||
l.log.Tracef("received switch packet inkey=%v, outkey=%v",
|
||||
pkt.inKey(), pkt.outKey())
|
||||
|
||||
l.mailBox.AddPacket(pkt)
|
||||
@ -2396,14 +2401,12 @@ func (l *channelLink) HandleChannelUpdate(message lnwire.Message) {
|
||||
// committing to an update_fee message.
|
||||
func (l *channelLink) updateChannelFee(feePerKw lnwallet.SatPerKWeight) error {
|
||||
|
||||
log.Infof("ChannelPoint(%v): updating commit fee to %v sat/kw", l,
|
||||
feePerKw)
|
||||
l.log.Infof("updating commit fee to %v sat/kw", feePerKw)
|
||||
|
||||
// We skip sending the UpdateFee message if the channel is not
|
||||
// currently eligible to forward messages.
|
||||
if !l.EligibleToForward() {
|
||||
log.Debugf("ChannelPoint(%v): skipping fee update for "+
|
||||
"inactive channel", l.ChanID())
|
||||
l.log.Debugf("skipping fee update for inactive channel")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -2433,8 +2436,7 @@ func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg,
|
||||
return
|
||||
}
|
||||
|
||||
log.Debugf("ChannelLink(%v): settle-fail-filter %v",
|
||||
l.ShortChanID(), fwdPkg.SettleFailFilter)
|
||||
l.log.Debugf("settle-fail-filter %v", fwdPkg.SettleFailFilter)
|
||||
|
||||
var switchPackets []*htlcPacket
|
||||
for i, pd := range settleFails {
|
||||
@ -2458,7 +2460,7 @@ func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg,
|
||||
// forward the SETTLE to the switch and will not signal
|
||||
// a free slot on the commitment transaction.
|
||||
if l.cfg.HodlMask.Active(hodl.SettleIncoming) {
|
||||
l.warnf(hodl.SettleIncoming.Warning())
|
||||
l.log.Warnf(hodl.SettleIncoming.Warning())
|
||||
continue
|
||||
}
|
||||
|
||||
@ -2486,7 +2488,7 @@ func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg,
|
||||
// forward the FAIL to the switch and will not signal a
|
||||
// free slot on the commitment transaction.
|
||||
if l.cfg.HodlMask.Active(hodl.FailIncoming) {
|
||||
l.warnf(hodl.FailIncoming.Warning())
|
||||
l.log.Warnf(hodl.FailIncoming.Warning())
|
||||
continue
|
||||
}
|
||||
|
||||
@ -2538,7 +2540,7 @@ func (l *channelLink) processRemoteSettleFails(fwdPkg *channeldb.FwdPkg,
|
||||
func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
|
||||
lockedInHtlcs []*lnwallet.PaymentDescriptor) bool {
|
||||
|
||||
l.tracef("processing %d remote adds for height %d",
|
||||
l.log.Tracef("processing %d remote adds for height %d",
|
||||
len(lockedInHtlcs), fwdPkg.Height)
|
||||
|
||||
decodeReqs := make(
|
||||
@ -2620,7 +2622,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
|
||||
onionBlob[:], pd.SourceRef)
|
||||
needUpdate = true
|
||||
|
||||
log.Errorf("unable to decode onion hop "+
|
||||
l.log.Errorf("unable to decode onion hop "+
|
||||
"iterator: %v", failureCode)
|
||||
continue
|
||||
}
|
||||
@ -2639,7 +2641,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
|
||||
)
|
||||
needUpdate = true
|
||||
|
||||
log.Errorf("unable to decode onion "+
|
||||
l.log.Errorf("unable to decode onion "+
|
||||
"obfuscator: %v", failureCode)
|
||||
continue
|
||||
}
|
||||
@ -2659,7 +2661,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
|
||||
)
|
||||
needUpdate = true
|
||||
|
||||
log.Errorf("Unable to decode forwarding "+
|
||||
l.log.Errorf("unable to decode forwarding "+
|
||||
"instructions: %v", err)
|
||||
continue
|
||||
}
|
||||
@ -2688,7 +2690,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
|
||||
// validate the forwarded ADD, nor will we send the
|
||||
// packet to the htlc switch.
|
||||
if l.cfg.HodlMask.Active(hodl.AddIncoming) {
|
||||
l.warnf(hodl.AddIncoming.Warning())
|
||||
l.log.Warnf(hodl.AddIncoming.Warning())
|
||||
continue
|
||||
}
|
||||
|
||||
@ -2759,7 +2761,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
|
||||
buf := bytes.NewBuffer(addMsg.OnionBlob[0:0])
|
||||
err := chanIterator.EncodeNextHop(buf)
|
||||
if err != nil {
|
||||
log.Errorf("unable to encode the "+
|
||||
l.log.Errorf("unable to encode the "+
|
||||
"remaining route %v", err)
|
||||
|
||||
var failure lnwire.FailureMessage
|
||||
@ -2825,7 +2827,7 @@ func (l *channelLink) processRemoteAdds(fwdPkg *channeldb.FwdPkg,
|
||||
return needUpdate
|
||||
}
|
||||
|
||||
l.debugf("forwarding %d packets to switch", len(switchPackets))
|
||||
l.log.Debugf("forwarding %d packets to switch", len(switchPackets))
|
||||
|
||||
// NOTE: This call is made synchronous so that we ensure all circuits
|
||||
// are committed in the exact order that they are processed in the link.
|
||||
@ -2847,7 +2849,7 @@ func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor,
|
||||
// ADD, nor will we settle the corresponding invoice or respond with the
|
||||
// preimage.
|
||||
if l.cfg.HodlMask.Active(hodl.ExitSettle) {
|
||||
l.warnf(hodl.ExitSettle.Warning())
|
||||
l.log.Warnf(hodl.ExitSettle.Warning())
|
||||
|
||||
return false, nil
|
||||
}
|
||||
@ -2857,7 +2859,7 @@ func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor,
|
||||
// matches the HTLC we were extended.
|
||||
if pd.Amount != fwdInfo.AmountToForward {
|
||||
|
||||
log.Errorf("Onion payload of incoming htlc(%x) has incorrect "+
|
||||
l.log.Errorf("onion payload of incoming htlc(%x) has incorrect "+
|
||||
"value: expected %v, got %v", pd.RHash,
|
||||
pd.Amount, fwdInfo.AmountToForward)
|
||||
|
||||
@ -2870,7 +2872,7 @@ func (l *channelLink) processExitHop(pd *lnwallet.PaymentDescriptor,
|
||||
// We'll also ensure that our time-lock value has been computed
|
||||
// correctly.
|
||||
if pd.Timeout != fwdInfo.OutgoingCTLV {
|
||||
log.Errorf("Onion payload of incoming htlc(%x) has incorrect "+
|
||||
l.log.Errorf("onion payload of incoming htlc(%x) has incorrect "+
|
||||
"time-lock: expected %v, got %v",
|
||||
pd.RHash[:], pd.Timeout, fwdInfo.OutgoingCTLV)
|
||||
|
||||
@ -2939,7 +2941,7 @@ func (l *channelLink) settleHTLC(preimage lntypes.Preimage, htlcIndex uint64,
|
||||
|
||||
hash := preimage.Hash()
|
||||
|
||||
l.infof("settling htlc %v as exit hop", hash)
|
||||
l.log.Infof("settling htlc %v as exit hop", hash)
|
||||
|
||||
err := l.channel.SettleHTLC(
|
||||
preimage, htlcIndex, sourceRef, nil, nil,
|
||||
@ -2951,7 +2953,7 @@ func (l *channelLink) settleHTLC(preimage lntypes.Preimage, htlcIndex uint64,
|
||||
// If the link is in hodl.BogusSettle mode, replace the preimage with a
|
||||
// fake one before sending it to the peer.
|
||||
if l.cfg.HodlMask.Active(hodl.BogusSettle) {
|
||||
l.warnf(hodl.BogusSettle.Warning())
|
||||
l.log.Warnf(hodl.BogusSettle.Warning())
|
||||
preimage = [32]byte{}
|
||||
copy(preimage[:], bytes.Repeat([]byte{2}, 32))
|
||||
}
|
||||
@ -3002,7 +3004,7 @@ func (l *channelLink) handleBatchFwdErrs(errChan chan error) {
|
||||
continue
|
||||
}
|
||||
|
||||
l.errorf("unhandled error while forwarding htlc packet over "+
|
||||
l.log.Errorf("unhandled error while forwarding htlc packet over "+
|
||||
"htlcswitch: %v", err)
|
||||
}
|
||||
}
|
||||
@ -3014,13 +3016,13 @@ func (l *channelLink) sendHTLCError(htlcIndex uint64, failure lnwire.FailureMess
|
||||
|
||||
reason, err := e.EncryptFirstHop(failure)
|
||||
if err != nil {
|
||||
log.Errorf("unable to obfuscate error: %v", err)
|
||||
l.log.Errorf("unable to obfuscate error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = l.channel.FailHTLC(htlcIndex, reason, sourceRef, nil, nil)
|
||||
if err != nil {
|
||||
log.Errorf("unable cancel htlc: %v", err)
|
||||
l.log.Errorf("unable cancel htlc: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -3039,7 +3041,7 @@ func (l *channelLink) sendMalformedHTLCError(htlcIndex uint64,
|
||||
shaOnionBlob := sha256.Sum256(onionBlob)
|
||||
err := l.channel.MalformedFailHTLC(htlcIndex, code, shaOnionBlob, sourceRef)
|
||||
if err != nil {
|
||||
log.Errorf("unable cancel htlc: %v", err)
|
||||
l.log.Errorf("unable cancel htlc: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -3062,45 +3064,15 @@ func (l *channelLink) fail(linkErr LinkFailureError,
|
||||
|
||||
// Return if we have already notified about a failure.
|
||||
if l.failed {
|
||||
l.warnf("Ignoring link failure (%v), as link already failed",
|
||||
reason)
|
||||
l.log.Warnf("ignoring link failure (%v), as link already "+
|
||||
"failed", reason)
|
||||
return
|
||||
}
|
||||
|
||||
l.errorf("Failing link: %s", reason)
|
||||
l.log.Errorf("failing link: %s", reason)
|
||||
|
||||
// Set failed, such that we won't process any more updates, and notify
|
||||
// the peer about the failure.
|
||||
l.failed = true
|
||||
l.cfg.OnChannelFailure(l.ChanID(), l.ShortChanID(), linkErr)
|
||||
}
|
||||
|
||||
// infof prefixes the channel's identifier before printing to info log.
|
||||
func (l *channelLink) infof(format string, a ...interface{}) {
|
||||
msg := fmt.Sprintf(format, a...)
|
||||
log.Infof("ChannelLink(%s) %s", l.ShortChanID(), msg)
|
||||
}
|
||||
|
||||
// debugf prefixes the channel's identifier before printing to debug log.
|
||||
func (l *channelLink) debugf(format string, a ...interface{}) {
|
||||
msg := fmt.Sprintf(format, a...)
|
||||
log.Debugf("ChannelLink(%s) %s", l.ShortChanID(), msg)
|
||||
}
|
||||
|
||||
// warnf prefixes the channel's identifier before printing to warn log.
|
||||
func (l *channelLink) warnf(format string, a ...interface{}) {
|
||||
msg := fmt.Sprintf(format, a...)
|
||||
log.Warnf("ChannelLink(%s) %s", l.ShortChanID(), msg)
|
||||
}
|
||||
|
||||
// errorf prefixes the channel's identifier before printing to error log.
|
||||
func (l *channelLink) errorf(format string, a ...interface{}) {
|
||||
msg := fmt.Sprintf(format, a...)
|
||||
log.Errorf("ChannelLink(%s) %s", l.ShortChanID(), msg)
|
||||
}
|
||||
|
||||
// tracef prefixes the channel's identifier before printing to trace log.
|
||||
func (l *channelLink) tracef(format string, a ...interface{}) {
|
||||
msg := fmt.Sprintf(format, a...)
|
||||
log.Tracef("ChannelLink(%s) %s", l.ShortChanID(), msg)
|
||||
}
|
||||
|
@ -5417,6 +5417,7 @@ func TestHtlcSatisfyPolicy(t *testing.T) {
|
||||
FetchLastChannelUpdate: fetchLastChannelUpdate,
|
||||
MaxOutgoingCltvExpiry: DefaultMaxOutgoingCltvExpiry,
|
||||
},
|
||||
log: log,
|
||||
}
|
||||
|
||||
var hash [32]byte
|
||||
|
@ -14,10 +14,11 @@ import (
|
||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||
"github.com/btcsuite/btcd/txscript"
|
||||
"github.com/btcsuite/btcd/wire"
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/btcsuite/btcutil"
|
||||
"github.com/btcsuite/btcutil/txsort"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
|
||||
"github.com/lightningnetwork/lnd/build"
|
||||
"github.com/lightningnetwork/lnd/chainntnfs"
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/input"
|
||||
@ -1378,6 +1379,9 @@ type LightningChannel struct {
|
||||
// channel.
|
||||
RemoteFundingKey *btcec.PublicKey
|
||||
|
||||
// log is a channel-specific logging instance.
|
||||
log btclog.Logger
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
@ -1402,6 +1406,8 @@ func NewLightningChannel(signer input.Signer,
|
||||
localCommit.RemoteLogIndex, localCommit.RemoteHtlcIndex,
|
||||
)
|
||||
|
||||
logPrefix := fmt.Sprintf("ChannelPoint(%v):", state.FundingOutpoint)
|
||||
|
||||
lc := &LightningChannel{
|
||||
Signer: signer,
|
||||
sigPool: sigPool,
|
||||
@ -1417,6 +1423,7 @@ func NewLightningChannel(signer input.Signer,
|
||||
Capacity: state.Capacity,
|
||||
LocalFundingKey: state.LocalChanCfg.MultiSigKey.PubKey,
|
||||
RemoteFundingKey: state.RemoteChanCfg.MultiSigKey.PubKey,
|
||||
log: build.NewPrefixLog(logPrefix, walletLog),
|
||||
}
|
||||
|
||||
// With the main channel struct reconstructed, we'll now restore the
|
||||
@ -1652,8 +1659,8 @@ func (lc *LightningChannel) restoreCommitState(
|
||||
}
|
||||
lc.localCommitChain.addCommitment(localCommit)
|
||||
|
||||
walletLog.Debugf("ChannelPoint(%v), starting local commitment: %v",
|
||||
lc.channelState.FundingOutpoint, newLogClosure(func() string {
|
||||
lc.log.Debugf("starting local commitment: %v",
|
||||
newLogClosure(func() string {
|
||||
return spew.Sdump(lc.localCommitChain.tail())
|
||||
}),
|
||||
)
|
||||
@ -1668,8 +1675,8 @@ func (lc *LightningChannel) restoreCommitState(
|
||||
}
|
||||
lc.remoteCommitChain.addCommitment(remoteCommit)
|
||||
|
||||
walletLog.Debugf("ChannelPoint(%v), starting remote commitment: %v",
|
||||
lc.channelState.FundingOutpoint, newLogClosure(func() string {
|
||||
lc.log.Debugf("starting remote commitment: %v",
|
||||
newLogClosure(func() string {
|
||||
return spew.Sdump(lc.remoteCommitChain.tail())
|
||||
}),
|
||||
)
|
||||
@ -1703,8 +1710,7 @@ func (lc *LightningChannel) restoreCommitState(
|
||||
}
|
||||
lc.remoteCommitChain.addCommitment(pendingRemoteCommit)
|
||||
|
||||
walletLog.Debugf("ChannelPoint(%v), pending remote "+
|
||||
"commitment: %v", lc.channelState.FundingOutpoint,
|
||||
lc.log.Debugf("pending remote commitment: %v",
|
||||
newLogClosure(func() string {
|
||||
return spew.Sdump(lc.remoteCommitChain.tip())
|
||||
}),
|
||||
@ -1834,7 +1840,7 @@ func (lc *LightningChannel) restoreStateLogs(
|
||||
lc.localUpdateLog.logIndex > 0 {
|
||||
|
||||
payDesc.LogIndex = lc.localUpdateLog.logIndex
|
||||
walletLog.Debugf("Found FeeUpdate on "+
|
||||
lc.log.Debugf("Found FeeUpdate on "+
|
||||
"pendingRemoteCommitDiff without logIndex, "+
|
||||
"using %v", payDesc.LogIndex)
|
||||
}
|
||||
@ -3278,14 +3284,14 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, []ch
|
||||
return sig, htlcSigs, nil, err
|
||||
}
|
||||
|
||||
walletLog.Tracef("ChannelPoint(%v): extending remote chain to height %v, "+
|
||||
lc.log.Tracef("extending remote chain to height %v, "+
|
||||
"local_log=%v, remote_log=%v",
|
||||
lc.channelState.FundingOutpoint, newCommitView.height,
|
||||
newCommitView.height,
|
||||
lc.localUpdateLog.logIndex, remoteACKedIndex)
|
||||
|
||||
walletLog.Tracef("ChannelPoint(%v): remote chain: our_balance=%v, "+
|
||||
lc.log.Tracef("remote chain: our_balance=%v, "+
|
||||
"their_balance=%v, commit_tx: %v",
|
||||
lc.channelState.FundingOutpoint, newCommitView.ourBalance,
|
||||
newCommitView.ourBalance,
|
||||
newCommitView.theirBalance,
|
||||
newLogClosure(func() string {
|
||||
return spew.Sdump(newCommitView.txn)
|
||||
@ -3423,9 +3429,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
|
||||
// In this case, we'll return an error to indicate the
|
||||
// remote node sent us the wrong values. This will let
|
||||
// the caller act accordingly.
|
||||
walletLog.Errorf("ChannelPoint(%v), sync failed: "+
|
||||
"remote provided invalid commit secret!",
|
||||
lc.channelState.FundingOutpoint)
|
||||
lc.log.Errorf("sync failed: remote provided invalid " +
|
||||
"commit secret!")
|
||||
return nil, nil, nil, ErrInvalidLastCommitSecret
|
||||
}
|
||||
}
|
||||
@ -3455,15 +3460,12 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
|
||||
// If their reported height for our local chain tail is ahead of our
|
||||
// view, then we're behind!
|
||||
case msg.RemoteCommitTailHeight > localTailHeight || isRestoredChan:
|
||||
walletLog.Errorf("ChannelPoint(%v), sync failed with local "+
|
||||
"data loss: remote believes our tail height is %v, "+
|
||||
"while we have %v!", lc.channelState.FundingOutpoint,
|
||||
lc.log.Errorf("sync failed with local data loss: remote "+
|
||||
"believes our tail height is %v, while we have %v!",
|
||||
msg.RemoteCommitTailHeight, localTailHeight)
|
||||
|
||||
if isRestoredChan {
|
||||
walletLog.Warnf("ChannelPoint(%v): detected restored "+
|
||||
"triggering DLP",
|
||||
lc.channelState.FundingOutpoint)
|
||||
lc.log.Warnf("detected restored triggering DLP")
|
||||
}
|
||||
|
||||
// We must check that we had recovery options to ensure the
|
||||
@ -3475,9 +3477,9 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
|
||||
// doesn't support data loss protection. In either case
|
||||
// it is not safe for us to keep using the channel, so
|
||||
// we mark it borked and fail the channel.
|
||||
walletLog.Errorf("ChannelPoint(%v), sync failed: "+
|
||||
"local data loss, but no recovery option.",
|
||||
lc.channelState.FundingOutpoint)
|
||||
lc.log.Errorf("sync failed: local data loss, but no " +
|
||||
"recovery option.")
|
||||
|
||||
return nil, nil, nil, ErrCannotSyncCommitChains
|
||||
}
|
||||
|
||||
@ -3492,9 +3494,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
|
||||
// is behind our view of the chain, then they probably lost some state,
|
||||
// and we'll force close the channel.
|
||||
case msg.RemoteCommitTailHeight+1 < localTailHeight:
|
||||
walletLog.Errorf("ChannelPoint(%v), sync failed: remote "+
|
||||
"believes our tail height is %v, while we have %v!",
|
||||
lc.channelState.FundingOutpoint,
|
||||
lc.log.Errorf("sync failed: remote believes our tail height is "+
|
||||
"%v, while we have %v!",
|
||||
msg.RemoteCommitTailHeight, localTailHeight)
|
||||
return nil, nil, nil, ErrCommitSyncRemoteDataLoss
|
||||
|
||||
@ -3507,9 +3508,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
|
||||
// this case we'll re-send the last revocation message that we sent.
|
||||
// This will be the revocation message for our prior chain tail.
|
||||
case msg.RemoteCommitTailHeight+1 == localTailHeight:
|
||||
walletLog.Debugf("ChannelPoint(%v), sync: remote believes "+
|
||||
"our tail height is %v, while we have %v, we owe "+
|
||||
"them a revocation", lc.channelState.FundingOutpoint,
|
||||
lc.log.Debugf("sync: remote believes our tail height is %v, "+
|
||||
"while we have %v, we owe them a revocation",
|
||||
msg.RemoteCommitTailHeight, localTailHeight)
|
||||
|
||||
revocationMsg, err := lc.generateRevocation(
|
||||
@ -3555,9 +3555,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
|
||||
|
||||
// There should be no other possible states.
|
||||
default:
|
||||
walletLog.Errorf("ChannelPoint(%v), sync failed: remote "+
|
||||
"believes our tail height is %v, while we have %v!",
|
||||
lc.channelState.FundingOutpoint,
|
||||
lc.log.Errorf("sync failed: remote believes our tail height is "+
|
||||
"%v, while we have %v!",
|
||||
msg.RemoteCommitTailHeight, localTailHeight)
|
||||
return nil, nil, nil, ErrCannotSyncCommitChains
|
||||
}
|
||||
@ -3572,18 +3571,16 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
|
||||
// or not, we will fail the channel, but should not force close it
|
||||
// automatically.
|
||||
case msg.NextLocalCommitHeight > remoteTipHeight+1:
|
||||
walletLog.Errorf("ChannelPoint(%v), sync failed: remote's "+
|
||||
"next commit height is %v, while we believe it is %v!",
|
||||
lc.channelState.FundingOutpoint,
|
||||
lc.log.Errorf("sync failed: remote's next commit height is %v, "+
|
||||
"while we believe it is %v!",
|
||||
msg.NextLocalCommitHeight, remoteTipHeight)
|
||||
|
||||
return nil, nil, nil, ErrCannotSyncCommitChains
|
||||
|
||||
// They are waiting for a state they have already ACKed.
|
||||
case msg.NextLocalCommitHeight <= remoteTailHeight:
|
||||
walletLog.Errorf("ChannelPoint(%v), sync failed: remote's "+
|
||||
"next commit height is %v, while we believe it is %v!",
|
||||
lc.channelState.FundingOutpoint,
|
||||
lc.log.Errorf("sync failed: remote's next commit height is %v, "+
|
||||
"while we believe it is %v!",
|
||||
msg.NextLocalCommitHeight, remoteTipHeight)
|
||||
|
||||
// They previously ACKed our current tail, and now they are
|
||||
@ -3598,9 +3595,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
|
||||
// re-send all the updates necessary to recreate this state, along
|
||||
// with the commit sig.
|
||||
case msg.NextLocalCommitHeight == remoteTipHeight:
|
||||
walletLog.Debugf("ChannelPoint(%v), sync: remote's next "+
|
||||
"commit height is %v, while we believe it is %v, we "+
|
||||
"owe them a commitment", lc.channelState.FundingOutpoint,
|
||||
lc.log.Debugf("sync: remote's next commit height is %v, while "+
|
||||
"we believe it is %v, we owe them a commitment",
|
||||
msg.NextLocalCommitHeight, remoteTipHeight)
|
||||
|
||||
// Grab the current remote chain tip from the database. This
|
||||
@ -3629,9 +3625,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
|
||||
// can have at most two elements. If that's the case, something is
|
||||
// wrong.
|
||||
default:
|
||||
walletLog.Errorf("ChannelPoint(%v), sync failed: remote's "+
|
||||
"next commit height is %v, while we believe it is %v!",
|
||||
lc.channelState.FundingOutpoint,
|
||||
lc.log.Errorf("sync failed: remote's next commit height is %v, "+
|
||||
"while we believe it is %v!",
|
||||
msg.NextLocalCommitHeight, remoteTipHeight)
|
||||
return nil, nil, nil, ErrCannotSyncCommitChains
|
||||
}
|
||||
@ -3669,9 +3664,8 @@ func (lc *LightningChannel) ProcessChanSyncMsg(
|
||||
if !tweakless && commitPoint != nil &&
|
||||
!commitPoint.IsEqual(msg.LocalUnrevokedCommitPoint) {
|
||||
|
||||
walletLog.Errorf("ChannelPoint(%v), sync failed: remote "+
|
||||
"sent invalid commit point for height %v!",
|
||||
lc.channelState.FundingOutpoint,
|
||||
lc.log.Errorf("sync failed: remote sent invalid commit point "+
|
||||
"for height %v!",
|
||||
msg.NextLocalCommitHeight)
|
||||
return nil, nil, nil, ErrInvalidLocalUnrevokedCommitPoint
|
||||
}
|
||||
@ -4033,13 +4027,13 @@ func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig,
|
||||
return err
|
||||
}
|
||||
|
||||
walletLog.Tracef("ChannelPoint(%v): extending local chain to height %v, "+
|
||||
lc.log.Tracef("extending local chain to height %v, "+
|
||||
"local_log=%v, remote_log=%v",
|
||||
lc.channelState.FundingOutpoint, localCommitmentView.height,
|
||||
localCommitmentView.height,
|
||||
localACKedIndex, lc.remoteUpdateLog.logIndex)
|
||||
|
||||
walletLog.Tracef("ChannelPoint(%v): local chain: our_balance=%v, "+
|
||||
"their_balance=%v, commit_tx: %v", lc.channelState.FundingOutpoint,
|
||||
lc.log.Tracef("local chain: our_balance=%v, "+
|
||||
"their_balance=%v, commit_tx: %v",
|
||||
localCommitmentView.ourBalance, localCommitmentView.theirBalance,
|
||||
newLogClosure(func() string {
|
||||
return spew.Sdump(localCommitmentView.txn)
|
||||
@ -4181,8 +4175,8 @@ func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck, []c
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
walletLog.Tracef("ChannelPoint(%v): revoking height=%v, now at height=%v",
|
||||
lc.channelState.FundingOutpoint, lc.localCommitChain.tail().height,
|
||||
lc.log.Tracef("revoking height=%v, now at height=%v",
|
||||
lc.localCommitChain.tail().height,
|
||||
lc.currentHeight+1)
|
||||
|
||||
// Advance our tail, as we've revoked our previous state.
|
||||
@ -4198,9 +4192,9 @@ func (lc *LightningChannel) RevokeCurrentCommitment() (*lnwire.RevokeAndAck, []c
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
walletLog.Tracef("ChannelPoint(%v): state transition accepted: "+
|
||||
lc.log.Tracef("state transition accepted: "+
|
||||
"our_balance=%v, their_balance=%v",
|
||||
lc.channelState.FundingOutpoint, chainTail.ourBalance,
|
||||
chainTail.ourBalance,
|
||||
chainTail.theirBalance)
|
||||
|
||||
revocationMsg.ChanID = lnwire.NewChanIDFromOutPoint(
|
||||
@ -4260,8 +4254,8 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) (
|
||||
lc.channelState.RemoteCurrentRevocation = lc.channelState.RemoteNextRevocation
|
||||
lc.channelState.RemoteNextRevocation = revMsg.NextRevocationKey
|
||||
|
||||
walletLog.Tracef("ChannelPoint(%v): remote party accepted state transition, "+
|
||||
"revoked height %v, now at %v", lc.channelState.FundingOutpoint,
|
||||
lc.log.Tracef("remote party accepted state transition, revoked height "+
|
||||
"%v, now at %v",
|
||||
lc.remoteCommitChain.tail().height,
|
||||
lc.remoteCommitChain.tail().height+1)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user