multi: comprehensive typo fixes across all packages

This commit is contained in:
practicalswift 2018-02-07 04:11:11 +01:00 committed by Olaoluwa Osuntokun
parent 72a5bc8648
commit a93736d21e
103 changed files with 435 additions and 435 deletions

@ -137,11 +137,11 @@ func TestConstrainedPrefAttachmentNeedMoreChan(t *testing.T) {
}, },
} }
prefAttatch := NewConstrainedPrefAttachment(minChanSize, maxChanSize, prefAttach := NewConstrainedPrefAttachment(minChanSize, maxChanSize,
chanLimit, threshold) chanLimit, threshold)
for i, testCase := range testCases { for i, testCase := range testCases {
amtToAllocate, needMore := prefAttatch.NeedMoreChans(testCase.channels, amtToAllocate, needMore := prefAttach.NeedMoreChans(testCase.channels,
testCase.walletAmt) testCase.walletAmt)
if amtToAllocate != testCase.amtAvailable { if amtToAllocate != testCase.amtAvailable {
@ -228,7 +228,7 @@ func TestConstrainedPrefAttachmentSelectEmptyGraph(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("unable to generate self key: %v", err) t.Fatalf("unable to generate self key: %v", err)
} }
prefAttatch := NewConstrainedPrefAttachment(minChanSize, maxChanSize, prefAttach := NewConstrainedPrefAttachment(minChanSize, maxChanSize,
chanLimit, threshold) chanLimit, threshold)
skipNodes := make(map[NodeID]struct{}) skipNodes := make(map[NodeID]struct{})
@ -246,7 +246,7 @@ func TestConstrainedPrefAttachmentSelectEmptyGraph(t *testing.T) {
// attempt to select a set of candidates channel for // attempt to select a set of candidates channel for
// creation given the current state of the graph. // creation given the current state of the graph.
const walletFunds = btcutil.SatoshiPerBitcoin const walletFunds = btcutil.SatoshiPerBitcoin
directives, err := prefAttatch.Select(self, graph, directives, err := prefAttach.Select(self, graph,
walletFunds, skipNodes) walletFunds, skipNodes)
if err != nil { if err != nil {
t1.Fatalf("unable to select attachment "+ t1.Fatalf("unable to select attachment "+
@ -257,7 +257,7 @@ func TestConstrainedPrefAttachmentSelectEmptyGraph(t *testing.T) {
// started with an empty graph. // started with an empty graph.
if len(directives) != 0 { if len(directives) != 0 {
t1.Fatalf("zero attachment directives "+ t1.Fatalf("zero attachment directives "+
"should've been returned instead %v were", "should have been returned instead %v were",
len(directives)) len(directives))
} }
}) })
@ -300,7 +300,7 @@ func TestConstrainedPrefAttachmentSelectTwoVertexes(t *testing.T) {
if err != nil { if err != nil {
t1.Fatalf("unable to generate self key: %v", err) t1.Fatalf("unable to generate self key: %v", err)
} }
prefAttatch := NewConstrainedPrefAttachment(minChanSize, maxChanSize, prefAttach := NewConstrainedPrefAttachment(minChanSize, maxChanSize,
chanLimit, threshold) chanLimit, threshold)
// For this set, we'll load the memory graph with two // For this set, we'll load the memory graph with two
@ -315,7 +315,7 @@ func TestConstrainedPrefAttachmentSelectTwoVertexes(t *testing.T) {
// attempt to select a set of candidates channel for // attempt to select a set of candidates channel for
// creation given the current state of the graph. // creation given the current state of the graph.
const walletFunds = btcutil.SatoshiPerBitcoin * 10 const walletFunds = btcutil.SatoshiPerBitcoin * 10
directives, err := prefAttatch.Select(self, graph, directives, err := prefAttach.Select(self, graph,
walletFunds, skipNodes) walletFunds, skipNodes)
if err != nil { if err != nil {
t1.Fatalf("unable to select attachment directives: %v", err) t1.Fatalf("unable to select attachment directives: %v", err)
@ -324,7 +324,7 @@ func TestConstrainedPrefAttachmentSelectTwoVertexes(t *testing.T) {
// Two new directives should have been selected, one // Two new directives should have been selected, one
// for each node already present within the graph. // for each node already present within the graph.
if len(directives) != 2 { if len(directives) != 2 {
t1.Fatalf("two attachment directives should've been "+ t1.Fatalf("two attachment directives should have been "+
"returned instead %v were", len(directives)) "returned instead %v were", len(directives))
} }
@ -387,14 +387,14 @@ func TestConstrainedPrefAttachmentSelectInsufficientFunds(t *testing.T) {
if err != nil { if err != nil {
t1.Fatalf("unable to generate self key: %v", err) t1.Fatalf("unable to generate self key: %v", err)
} }
prefAttatch := NewConstrainedPrefAttachment( prefAttach := NewConstrainedPrefAttachment(
minChanSize, maxChanSize, chanLimit, threshold, minChanSize, maxChanSize, chanLimit, threshold,
) )
// Next, we'll attempt to select a set of candidates, // Next, we'll attempt to select a set of candidates,
// passing zero for the amount of wallet funds. This // passing zero for the amount of wallet funds. This
// should return an empty slice of directives. // should return an empty slice of directives.
directives, err := prefAttatch.Select(self, graph, 0, directives, err := prefAttach.Select(self, graph, 0,
skipNodes) skipNodes)
if err != nil { if err != nil {
t1.Fatalf("unable to select attachment "+ t1.Fatalf("unable to select attachment "+
@ -402,7 +402,7 @@ func TestConstrainedPrefAttachmentSelectInsufficientFunds(t *testing.T) {
} }
if len(directives) != 0 { if len(directives) != 0 {
t1.Fatalf("zero attachment directives "+ t1.Fatalf("zero attachment directives "+
"should've been returned instead %v were", "should have been returned instead %v were",
len(directives)) len(directives))
} }
}) })
@ -446,21 +446,21 @@ func TestConstrainedPrefAttachmentSelectGreedyAllocation(t *testing.T) {
if err != nil { if err != nil {
t1.Fatalf("unable to generate self key: %v", err) t1.Fatalf("unable to generate self key: %v", err)
} }
prefAttatch := NewConstrainedPrefAttachment( prefAttach := NewConstrainedPrefAttachment(
minChanSize, maxChanSize, chanLimit, threshold, minChanSize, maxChanSize, chanLimit, threshold,
) )
const chanCapcity = btcutil.SatoshiPerBitcoin const chanCapacity = btcutil.SatoshiPerBitcoin
// Next, we'll add 3 nodes to the graph, creating an // Next, we'll add 3 nodes to the graph, creating an
// "open triangle topology". // "open triangle topology".
edge1, _, err := graph.addRandChannel(nil, nil, edge1, _, err := graph.addRandChannel(nil, nil,
chanCapcity) chanCapacity)
if err != nil { if err != nil {
t1.Fatalf("unable to create channel: %v", err) t1.Fatalf("unable to create channel: %v", err)
} }
_, _, err = graph.addRandChannel( _, _, err = graph.addRandChannel(
edge1.Peer.PubKey(), nil, chanCapcity, edge1.Peer.PubKey(), nil, chanCapacity,
) )
if err != nil { if err != nil {
t1.Fatalf("unable to create channel: %v", err) t1.Fatalf("unable to create channel: %v", err)
@ -502,7 +502,7 @@ func TestConstrainedPrefAttachmentSelectGreedyAllocation(t *testing.T) {
// result, the heuristic should try to greedily // result, the heuristic should try to greedily
// allocate funds to channels. // allocate funds to channels.
const availableBalance = btcutil.SatoshiPerBitcoin * 2.5 const availableBalance = btcutil.SatoshiPerBitcoin * 2.5
directives, err := prefAttatch.Select(self, graph, directives, err := prefAttach.Select(self, graph,
availableBalance, skipNodes) availableBalance, skipNodes)
if err != nil { if err != nil {
t1.Fatalf("unable to select attachment "+ t1.Fatalf("unable to select attachment "+
@ -576,15 +576,15 @@ func TestConstrainedPrefAttachmentSelectSkipNodes(t *testing.T) {
if err != nil { if err != nil {
t1.Fatalf("unable to generate self key: %v", err) t1.Fatalf("unable to generate self key: %v", err)
} }
prefAttatch := NewConstrainedPrefAttachment( prefAttach := NewConstrainedPrefAttachment(
minChanSize, maxChanSize, chanLimit, threshold, minChanSize, maxChanSize, chanLimit, threshold,
) )
// Next, we'll create a simple topology of two nodes, // Next, we'll create a simple topology of two nodes,
// with a single channel connecting them. // with a single channel connecting them.
const chanCapcity = btcutil.SatoshiPerBitcoin const chanCapacity = btcutil.SatoshiPerBitcoin
_, _, err = graph.addRandChannel(nil, nil, _, _, err = graph.addRandChannel(nil, nil,
chanCapcity) chanCapacity)
if err != nil { if err != nil {
t1.Fatalf("unable to create channel: %v", err) t1.Fatalf("unable to create channel: %v", err)
} }
@ -593,7 +593,7 @@ func TestConstrainedPrefAttachmentSelectSkipNodes(t *testing.T) {
// function to recommend potential attachment // function to recommend potential attachment
// candidates. // candidates.
const availableBalance = btcutil.SatoshiPerBitcoin * 2.5 const availableBalance = btcutil.SatoshiPerBitcoin * 2.5
directives, err := prefAttatch.Select(self, graph, directives, err := prefAttach.Select(self, graph,
availableBalance, skipNodes) availableBalance, skipNodes)
if err != nil { if err != nil {
t1.Fatalf("unable to select attachment "+ t1.Fatalf("unable to select attachment "+
@ -617,7 +617,7 @@ func TestConstrainedPrefAttachmentSelectSkipNodes(t *testing.T) {
// without providing any new information, then we // without providing any new information, then we
// should get no new directives as both nodes has // should get no new directives as both nodes has
// already been attached to. // already been attached to.
directives, err = prefAttatch.Select(self, graph, directives, err = prefAttach.Select(self, graph,
availableBalance, skipNodes) availableBalance, skipNodes)
if err != nil { if err != nil {
t1.Fatalf("unable to select attachment "+ t1.Fatalf("unable to select attachment "+

@ -639,7 +639,7 @@ func TestChannelDBRetributionStore(t *testing.T) {
restartDb := func() RetributionStore { restartDb := func() RetributionStore {
// Close and reopen channeldb // Close and reopen channeldb
if err = db.Close(); err != nil { if err = db.Close(); err != nil {
t.Fatalf("unalbe to close channeldb during restart: %v", t.Fatalf("unable to close channeldb during restart: %v",
err) err)
} }
db, err = channeldb.Open(db.Path()) db, err = channeldb.Open(db.Path())
@ -688,7 +688,7 @@ func countRetributions(t *testing.T, rs RetributionStore) int {
// removes each one individually. Between each addition or removal, the number // removes each one individually. Between each addition or removal, the number
// of elements in the store is checked to ensure that it only changes by one. // of elements in the store is checked to ensure that it only changes by one.
func testRetributionStoreAddRemove(frs FailingRetributionStore, t *testing.T) { func testRetributionStoreAddRemove(frs FailingRetributionStore, t *testing.T) {
// Make sure that a new retribution store is actually emtpy. // Make sure that a new retribution store is actually empty.
if count := countRetributions(t, frs); count != 0 { if count := countRetributions(t, frs); count != 0 {
t.Fatalf("expected 0 retributions, found %v", count) t.Fatalf("expected 0 retributions, found %v", count)
} }
@ -704,7 +704,7 @@ func testRetributionStoreAddRemove(frs FailingRetributionStore, t *testing.T) {
// testRetributionStoreAddRemove, except that it also restarts the store between // testRetributionStoreAddRemove, except that it also restarts the store between
// each operation to ensure that the results are properly persisted. // each operation to ensure that the results are properly persisted.
func testRetributionStorePersistence(frs FailingRetributionStore, t *testing.T) { func testRetributionStorePersistence(frs FailingRetributionStore, t *testing.T) {
// Make sure that a new retribution store is still emtpy after failing // Make sure that a new retribution store is still empty after failing
// right off the bat. // right off the bat.
frs.Restart() frs.Restart()
if count := countRetributions(t, frs); count != 0 { if count := countRetributions(t, frs); count != 0 {
@ -870,9 +870,9 @@ func testRetributionStoreForAll(
var isRestart bool var isRestart bool
restartCheck: restartCheck:
// Construct a set of all channel points presented by the store. Entires // Construct a set of all channel points presented by the store. Entries
// are only be added to the set if their corresponding retribution // are only be added to the set if their corresponding retribution
// infromation matches the test vector. // information matches the test vector.
var foundSet = make(map[wire.OutPoint]struct{}) var foundSet = make(map[wire.OutPoint]struct{})
// Iterate through the stored retributions, checking to see if we have // Iterate through the stored retributions, checking to see if we have
@ -897,7 +897,7 @@ restartCheck:
foundSet[ret.chanPoint] = struct{}{} foundSet[ret.chanPoint] = struct{}{}
} else { } else {
return fmt.Errorf("unkwown retribution retrieved "+ return fmt.Errorf("unknown retribution retrieved "+
"from db: %v", ret) "from db: %v", ret)
} }

@ -112,7 +112,7 @@ func (c *Conn) Read(b []byte) (n int, err error) {
// In order to reconcile the differences between the record abstraction // In order to reconcile the differences between the record abstraction
// of our AEAD connection, and the stream abstraction of TCP, we // of our AEAD connection, and the stream abstraction of TCP, we
// maintain an intermediate read buffer. If this buffer becomes // maintain an intermediate read buffer. If this buffer becomes
// depleated, then we read the next record, and feed it into the // depleted, then we read the next record, and feed it into the
// buffer. Otherwise, we read directly from the buffer. // buffer. Otherwise, we read directly from the buffer.
if c.readBuf.Len() == 0 { if c.readBuf.Len() == 0 {
plaintext, err := c.noise.ReadMessage(c.conn) plaintext, err := c.noise.ReadMessage(c.conn)

@ -140,7 +140,7 @@ func TestMaxPayloadLength(t *testing.T) {
b := Machine{} b := Machine{}
b.split() b.split()
// Create a payload that's juust over the maximum allotted payload // Create a payload that's only *slightly* above the maximum allotted payload
// length. // length.
payloadToReject := make([]byte, math.MaxUint16+1) payloadToReject := make([]byte, math.MaxUint16+1)
@ -162,7 +162,7 @@ func TestMaxPayloadLength(t *testing.T) {
"accepted") "accepted")
} }
// Generate a final payload which is juuust over the max payload length // Generate a final payload which is only *slightly* above the max payload length
// when the MAC is accounted for. // when the MAC is accounted for.
payloadToReject = make([]byte, math.MaxUint16+1) payloadToReject = make([]byte, math.MaxUint16+1)
@ -190,7 +190,7 @@ func TestWriteMessageChunking(t *testing.T) {
// Launch a new goroutine to write the large message generated above in // Launch a new goroutine to write the large message generated above in
// chunks. We spawn a new goroutine because otherwise, we may block as // chunks. We spawn a new goroutine because otherwise, we may block as
// the kernal waits for the buffer to flush. // the kernel waits for the buffer to flush.
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(1) wg.Add(1)
go func() { go func() {
@ -364,7 +364,7 @@ func TestBolt0008TestVectors(t *testing.T) {
recvKey, err := hex.DecodeString("bb9020b8965f4df047e07f955f3c4b884" + recvKey, err := hex.DecodeString("bb9020b8965f4df047e07f955f3c4b884" +
"18984aadc5cdb35096b9ea8fa5c3442") "18984aadc5cdb35096b9ea8fa5c3442")
if err != nil { if err != nil {
t.Fatalf("unable to parse recv'ing key: %v", err) t.Fatalf("unable to parse receiving key: %v", err)
} }
chainKey, err := hex.DecodeString("919219dbb2920afa8db80f9a51787a840" + chainKey, err := hex.DecodeString("919219dbb2920afa8db80f9a51787a840" +

@ -36,12 +36,12 @@ type ChainNotifier interface {
heightHint uint32) (*ConfirmationEvent, error) heightHint uint32) (*ConfirmationEvent, error)
// RegisterSpendNtfn registers an intent to be notified once the target // RegisterSpendNtfn registers an intent to be notified once the target
// outpoint is succesfully spent within a confirmed transaction. The // outpoint is successfully spent within a confirmed transaction. The
// returned SpendEvent will receive a send on the 'Spend' transaction // returned SpendEvent will receive a send on the 'Spend' transaction
// once a transaction spending the input is detected on the blockchain. // once a transaction spending the input is detected on the blockchain.
// The heightHint parameter is provided as a convenience to light // The heightHint parameter is provided as a convenience to light
// clients. The heightHint denotes the earliest height in the blockchain // clients. The heightHint denotes the earliest height in the blockchain
// in which the target output could've been created. // in which the target output could have been created.
// //
// NOTE: This notifications should be triggered once the transaction is // NOTE: This notifications should be triggered once the transaction is
// *seen* on the network, not when it has received a single confirmation. // *seen* on the network, not when it has received a single confirmation.
@ -177,7 +177,7 @@ type NotifierDriver struct {
// New creates a new instance of a concrete ChainNotifier // New creates a new instance of a concrete ChainNotifier
// implementation given a variadic set up arguments. The function takes // implementation given a variadic set up arguments. The function takes
// a varidaic number of interface parameters in order to provide // a variadic number of interface parameters in order to provide
// initialization flexibility, thereby accommodating several potential // initialization flexibility, thereby accommodating several potential
// ChainNotifier implementations. // ChainNotifier implementations.
New func(args ...interface{}) (ChainNotifier, error) New func(args ...interface{}) (ChainNotifier, error)

@ -112,7 +112,7 @@ func testSingleConfirmationNotification(miner *rpctest.Harness,
// We'd like to test the case of being notified once a txid reaches // We'd like to test the case of being notified once a txid reaches
// a *single* confirmation. // a *single* confirmation.
// //
// So first, let's send some coins to "ourself", obtainig a txid. // So first, let's send some coins to "ourself", obtaining a txid.
// We're spending from a coinbase output here, so we use the dedicated // We're spending from a coinbase output here, so we use the dedicated
// function. // function.
@ -226,7 +226,7 @@ func testMultiConfirmationNotification(miner *rpctest.Harness,
func testBatchConfirmationNotification(miner *rpctest.Harness, func testBatchConfirmationNotification(miner *rpctest.Harness,
notifier chainntnfs.ChainNotifier, t *testing.T) { notifier chainntnfs.ChainNotifier, t *testing.T) {
// We'd like to test a case of serving notifiations to multiple // We'd like to test a case of serving notifications to multiple
// clients, each requesting to be notified once a txid receives // clients, each requesting to be notified once a txid receives
// various numbers of confirmations. // various numbers of confirmations.
confSpread := [6]uint32{1, 2, 3, 6, 20, 22} confSpread := [6]uint32{1, 2, 3, 6, 20, 22}
@ -887,7 +887,7 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness,
// Broadcast our spending transaction. // Broadcast our spending transaction.
spenderSha, err := miner.Node.SendRawTransaction(spendingTx, true) spenderSha, err := miner.Node.SendRawTransaction(spendingTx, true)
if err != nil { if err != nil {
t.Fatalf("unable to brodacst tx: %v", err) t.Fatalf("unable to broadcast tx: %v", err)
} }
err = waitForMempoolTx(miner, spenderSha) err = waitForMempoolTx(miner, spenderSha)
@ -928,7 +928,7 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness,
ntfn.SpentOutPoint, outpoint) ntfn.SpentOutPoint, outpoint)
} }
if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) { if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) {
t.Fatalf("ntfn includes wrong spender tx sha, reports %v intead of %v", t.Fatalf("ntfn includes wrong spender tx sha, reports %v instead of %v",
ntfn.SpenderTxHash[:], spenderSha[:]) ntfn.SpenderTxHash[:], spenderSha[:])
} }
if ntfn.SpenderInputIndex != 0 { if ntfn.SpenderInputIndex != 0 {
@ -980,7 +980,7 @@ func testCancelSpendNtfn(node *rpctest.Harness,
// Broadcast our spending transaction. // Broadcast our spending transaction.
spenderSha, err := node.Node.SendRawTransaction(spendingTx, true) spenderSha, err := node.Node.SendRawTransaction(spendingTx, true)
if err != nil { if err != nil {
t.Fatalf("unable to brodacst tx: %v", err) t.Fatalf("unable to broadcast tx: %v", err)
} }
err = waitForMempoolTx(node, spenderSha) err = waitForMempoolTx(node, spenderSha)
@ -1007,7 +1007,7 @@ func testCancelSpendNtfn(node *rpctest.Harness,
} }
if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) { if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) {
t.Fatalf("ntfn includes wrong spender tx sha, "+ t.Fatalf("ntfn includes wrong spender tx sha, "+
"reports %v intead of %v", "reports %v instead of %v",
ntfn.SpenderTxHash[:], spenderSha[:]) ntfn.SpenderTxHash[:], spenderSha[:])
} }
if ntfn.SpenderInputIndex != 0 { if ntfn.SpenderInputIndex != 0 {
@ -1063,7 +1063,7 @@ func testCancelEpochNtfn(node *rpctest.Harness, notifier chainntnfs.ChainNotifie
select { select {
case _, ok := <-epochClients[0].Epochs: case _, ok := <-epochClients[0].Epochs:
if ok { if ok {
t.Fatalf("epoch notification should've been cancelled") t.Fatalf("epoch notification should have been cancelled")
} }
case <-time.After(2 * time.Second): case <-time.After(2 * time.Second):
t.Fatalf("epoch notification not sent") t.Fatalf("epoch notification not sent")
@ -1303,7 +1303,7 @@ var ntfnTests = []testCase{
func TestInterfaces(t *testing.T) { func TestInterfaces(t *testing.T) {
// Initialize the harness around a btcd node which will serve as our // Initialize the harness around a btcd node which will serve as our
// dedicated miner to generate blocks, cause re-orgs, etc. We'll set up // dedicated miner to generate blocks, cause re-orgs, etc. We'll set up
// this node with a chain length of 125, so we have plentyyy of BTC to // this node with a chain length of 125, so we have plenty of BTC to
// play around with. // play around with.
miner, err := rpctest.New(netParams, nil, nil) miner, err := rpctest.New(netParams, nil, nil)
if err != nil { if err != nil {

@ -83,7 +83,7 @@ type chanCloseCfg struct {
broadcastTx func(*wire.MsgTx) error broadcastTx func(*wire.MsgTx) error
// quit is a channel that should be sent upon in the occasion the state // quit is a channel that should be sent upon in the occasion the state
// machine shouldk cease all progress and shutdown. // machine should cease all progress and shutdown.
quit chan struct{} quit chan struct{}
} }
@ -247,7 +247,7 @@ func (c *channelCloser) ShutdownChan() (*lnwire.Shutdown, error) {
// ClosingTx returns the fully signed, final closing transaction. // ClosingTx returns the fully signed, final closing transaction.
// //
// NOTE: THis transaction is only available if the state machine is in the // NOTE: This transaction is only available if the state machine is in the
// closeFinished state. // closeFinished state.
func (c *channelCloser) ClosingTx() (*wire.MsgTx, error) { func (c *channelCloser) ClosingTx() (*wire.MsgTx, error) {
// If the state machine hasn't finished closing the channel then we'll // If the state machine hasn't finished closing the channel then we'll

@ -265,7 +265,7 @@ type ChannelCommitment struct {
Htlcs []HTLC Htlcs []HTLC
// TODO(roasbeef): pending commit pointer? // TODO(roasbeef): pending commit pointer?
// * lets just walk thru // * lets just walk through
} }
// OpenChannel encapsulates the persistent and dynamic state of an open channel // OpenChannel encapsulates the persistent and dynamic state of an open channel
@ -399,7 +399,7 @@ func (c *OpenChannel) FullSync() error {
return c.Db.Update(c.fullSync) return c.Db.Update(c.fullSync)
} }
// updateChanBucket is a helper function that returns a writeable bucket that a // updateChanBucket is a helper function that returns a writable bucket that a
// channel's data resides in given: the public key for the node, the outpoint, // channel's data resides in given: the public key for the node, the outpoint,
// and the chainhash that the channel resides on. // and the chainhash that the channel resides on.
func updateChanBucket(tx *bolt.Tx, nodeKey *btcec.PublicKey, func updateChanBucket(tx *bolt.Tx, nodeKey *btcec.PublicKey,
@ -474,7 +474,7 @@ func readChanBucket(tx *bolt.Tx, nodeKey *btcec.PublicKey,
} }
// With the bucket for the node fetched, we can now go down another // With the bucket for the node fetched, we can now go down another
// level, for this channel iteslf. // level, for this channel itself.
var chanPointBuf bytes.Buffer var chanPointBuf bytes.Buffer
chanPointBuf.Grow(outPointSize) chanPointBuf.Grow(outPointSize)
if err := writeOutpoint(&chanPointBuf, outPoint); err != nil { if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
@ -924,7 +924,7 @@ func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error {
defer c.Unlock() defer c.Unlock()
return c.Db.Update(func(tx *bolt.Tx) error { return c.Db.Update(func(tx *bolt.Tx) error {
// First, we'll grab the writeable bucket where this channel's // First, we'll grab the writable bucket where this channel's
// data resides. // data resides.
chanBucket, err := updateChanBucket(tx, c.IdentityPub, chanBucket, err := updateChanBucket(tx, c.IdentityPub,
&c.FundingOutpoint, c.ChainHash) &c.FundingOutpoint, c.ChainHash)
@ -1099,7 +1099,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
c.RLock() c.RLock()
defer c.RUnlock() defer c.RUnlock()
// If we haven't created any state updates yet, then we'll exit erly as // If we haven't created any state updates yet, then we'll exit early as
// there's nothing to be found on disk in the revocation bucket. // there's nothing to be found on disk in the revocation bucket.
if c.RemoteCommitment.CommitHeight == 0 { if c.RemoteCommitment.CommitHeight == 0 {
return nil, nil return nil, nil
@ -1121,7 +1121,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
// Once we have the bucket that stores the revocation log from // Once we have the bucket that stores the revocation log from
// this channel, we'll jump to the _last_ key in bucket. As we // this channel, we'll jump to the _last_ key in bucket. As we
// store the update number on disk in a big-endian format, // store the update number on disk in a big-endian format,
// this'll retrieve the latest entry. // this will retrieve the latest entry.
cursor := logBucket.Cursor() cursor := logBucket.Cursor()
_, tailLogEntry := cursor.Last() _, tailLogEntry := cursor.Last()
logEntryReader := bytes.NewReader(tailLogEntry) logEntryReader := bytes.NewReader(tailLogEntry)

@ -490,7 +490,7 @@ func TestChannelStateTransition(t *testing.T) {
t.Fatalf("unable to append to revocation log: %v", err) t.Fatalf("unable to append to revocation log: %v", err)
} }
// At this point, the remote commit chain shuold be nil, and the posted // At this point, the remote commit chain should be nil, and the posted
// remote commitment should match the one we added as a diff above. // remote commitment should match the one we added as a diff above.
if _, err := channel.RemoteCommitChainTip(); err != ErrNoPendingCommit { if _, err := channel.RemoteCommitChainTip(); err != ErrNoPendingCommit {
t.Fatalf("expected ErrNoPendingCommit, instead got %v", err) t.Fatalf("expected ErrNoPendingCommit, instead got %v", err)
@ -591,7 +591,7 @@ func TestChannelStateTransition(t *testing.T) {
// revocation log has been deleted. // revocation log has been deleted.
_, err = updatedChannel[0].FindPreviousState(oldRemoteCommit.CommitHeight) _, err = updatedChannel[0].FindPreviousState(oldRemoteCommit.CommitHeight)
if err == nil { if err == nil {
t.Fatal("revocation log search should've failed") t.Fatal("revocation log search should have failed")
} }
} }
@ -600,7 +600,7 @@ func TestFetchPendingChannels(t *testing.T) {
cdb, cleanUp, err := makeTestDB() cdb, cleanUp, err := makeTestDB()
if err != nil { if err != nil {
t.Fatalf("uanble to make test database: %v", err) t.Fatalf("unable to make test database: %v", err)
} }
defer cleanUp() defer cleanUp()
@ -630,7 +630,7 @@ func TestFetchPendingChannels(t *testing.T) {
"got %v", 1, len(pendingChannels)) "got %v", 1, len(pendingChannels))
} }
// The broadcast height of the pending channel should've been set // The broadcast height of the pending channel should have been set
// properly. // properly.
if pendingChannels[0].FundingBroadcastHeight != broadcastHeight { if pendingChannels[0].FundingBroadcastHeight != broadcastHeight {
t.Fatalf("broadcast height mismatch: expected %v, got %v", t.Fatalf("broadcast height mismatch: expected %v, got %v",
@ -736,7 +736,7 @@ func TestFetchClosedChannels(t *testing.T) {
// channels only, or not. // channels only, or not.
pendingClosed, err := cdb.FetchClosedChannels(true) pendingClosed, err := cdb.FetchClosedChannels(true)
if err != nil { if err != nil {
t.Fatalf("failed fetcing closed channels: %v", err) t.Fatalf("failed fetching closed channels: %v", err)
} }
if len(pendingClosed) != 1 { if len(pendingClosed) != 1 {
t.Fatalf("incorrect number of pending closed channels: expecting %v,"+ t.Fatalf("incorrect number of pending closed channels: expecting %v,"+
@ -769,7 +769,7 @@ func TestFetchClosedChannels(t *testing.T) {
// be retrieved when fetching all the closed channels. // be retrieved when fetching all the closed channels.
closed, err = cdb.FetchClosedChannels(false) closed, err = cdb.FetchClosedChannels(false)
if err != nil { if err != nil {
t.Fatalf("failed fetcing closed channels: %v", err) t.Fatalf("failed fetching closed channels: %v", err)
} }
if len(closed) != 1 { if len(closed) != 1 {
t.Fatalf("incorrect number of closed channels: expecting %v, "+ t.Fatalf("incorrect number of closed channels: expecting %v, "+

@ -28,7 +28,7 @@ func TestOpenWithCreate(t *testing.T) {
t.Fatalf("unable to close channeldb: %v", err) t.Fatalf("unable to close channeldb: %v", err)
} }
// The path should have been succesfully created. // The path should have been successfully created.
if !fileExists(dbPath) { if !fileExists(dbPath) {
t.Fatalf("channeldb failed to create data directory") t.Fatalf("channeldb failed to create data directory")
} }

@ -180,7 +180,7 @@ func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePoli
// The targeted edge may have not been advertised // The targeted edge may have not been advertised
// within the network, so we ensure it's non-nil before // within the network, so we ensure it's non-nil before
// deferencing its attributes. // dereferencing its attributes.
if edge1 != nil { if edge1 != nil {
edge1.db = c.db edge1.db = c.db
if edge1.Node != nil { if edge1.Node != nil {
@ -199,7 +199,7 @@ func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePoli
// The targeted edge may have not been advertised // The targeted edge may have not been advertised
// within the network, so we ensure it's non-nil before // within the network, so we ensure it's non-nil before
// deferencing its attributes. // dereferencing its attributes.
if edge2 != nil { if edge2 != nil {
edge2.db = c.db edge2.db = c.db
if edge2.Node != nil { if edge2.Node != nil {
@ -499,7 +499,7 @@ func (c *ChannelGraph) HasChannelEdge(chanID uint64) (time.Time, time.Time, bool
// If the channel has been found in the graph, then retrieve // If the channel has been found in the graph, then retrieve
// the edges itself so we can return the last updated // the edges itself so we can return the last updated
// timestmaps. // timestamps.
nodes := tx.Bucket(nodeBucket) nodes := tx.Bucket(nodeBucket)
if nodes == nil { if nodes == nil {
return ErrGraphNodeNotFound return ErrGraphNodeNotFound
@ -595,7 +595,7 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
return err return err
} }
// For each of the outpoints that've been spent within the // For each of the outpoints that have been spent within the
// block, we attempt to delete them from the graph as if that // block, we attempt to delete them from the graph as if that
// outpoint was a channel, then it has now been closed. // outpoint was a channel, then it has now been closed.
for _, chanPoint := range spentOutputs { for _, chanPoint := range spentOutputs {
@ -1271,7 +1271,7 @@ type ChannelEdgeInfo struct {
// ChannelAuthProof is the authentication proof (the signature portion) for a // ChannelAuthProof is the authentication proof (the signature portion) for a
// channel. Using the four signatures contained in the struct, and some // channel. Using the four signatures contained in the struct, and some
// axillary knowledge (the funding script, node identities, and outpoint) nodes // auxillary knowledge (the funding script, node identities, and outpoint) nodes
// on the network are able to validate the authenticity and existence of a // on the network are able to validate the authenticity and existence of a
// channel. Each of these signatures signs the following digest: chanID || // channel. Each of these signatures signs the following digest: chanID ||
// nodeID1 || nodeID2 || bitcoinKey1|| bitcoinKey2 || 2-byte-feature-len || // nodeID1 || nodeID2 || bitcoinKey1|| bitcoinKey2 || 2-byte-feature-len ||

@ -120,7 +120,7 @@ func TestNodeInsertionAndDeletion(t *testing.T) {
} }
// Finally, attempt to fetch the node again. This should fail as the // Finally, attempt to fetch the node again. This should fail as the
// node should've been deleted from the database. // node should have been deleted from the database.
_, err = graph.FetchLightningNode(testPub) _, err = graph.FetchLightningNode(testPub)
if err != ErrGraphNodeNotFound { if err != ErrGraphNodeNotFound {
t.Fatalf("fetch after delete should fail!") t.Fatalf("fetch after delete should fail!")
@ -185,7 +185,7 @@ func TestPartialNode(t *testing.T) {
} }
// Finally, attempt to fetch the node again. This should fail as the // Finally, attempt to fetch the node again. This should fail as the
// node should've been deleted from the database. // node should have been deleted from the database.
_, err = graph.FetchLightningNode(testPub) _, err = graph.FetchLightningNode(testPub)
if err != ErrGraphNodeNotFound { if err != ErrGraphNodeNotFound {
t.Fatalf("fetch after delete should fail!") t.Fatalf("fetch after delete should fail!")
@ -355,9 +355,9 @@ func TestEdgeInsertionDeletion(t *testing.T) {
} }
} }
// TestDisconnecteBlockAtHeight checks that the pruned state of the channel // TestDisconnectBlockAtHeight checks that the pruned state of the channel
// database is what we expect after calling DisconnectBlockAtHeight. // database is what we expect after calling DisconnectBlockAtHeight.
func TestDisconnecteBlockAtHeight(t *testing.T) { func TestDisconnectBlockAtHeight(t *testing.T) {
t.Parallel() t.Parallel()
db, cleanUp, err := makeTestDB() db, cleanUp, err := makeTestDB()
@ -927,7 +927,7 @@ func assertPruneTip(t *testing.T, graph *ChannelGraph, blockHash *chainhash.Hash
} }
} }
func asserNumChans(t *testing.T, graph *ChannelGraph, n int) { func assertNumChans(t *testing.T, graph *ChannelGraph, n int) {
numChans := 0 numChans := 0
if err := graph.ForEachChannel(func(*ChannelEdgeInfo, *ChannelEdgePolicy, if err := graph.ForEachChannel(func(*ChannelEdgeInfo, *ChannelEdgePolicy,
*ChannelEdgePolicy) error { *ChannelEdgePolicy) error {
@ -948,7 +948,7 @@ func asserNumChans(t *testing.T, graph *ChannelGraph, n int) {
func assertChanViewEqual(t *testing.T, a []wire.OutPoint, b []*wire.OutPoint) { func assertChanViewEqual(t *testing.T, a []wire.OutPoint, b []*wire.OutPoint) {
if len(a) != len(b) { if len(a) != len(b) {
_, _, line, _ := runtime.Caller(1) _, _, line, _ := runtime.Caller(1)
t.Fatalf("line %v: chan views dont match", line) t.Fatalf("line %v: chan views don't match", line)
} }
chanViewSet := make(map[wire.OutPoint]struct{}) chanViewSet := make(map[wire.OutPoint]struct{})
@ -1080,7 +1080,7 @@ func TestGraphPruning(t *testing.T) {
// Count up the number of channels known within the graph, only 2 // Count up the number of channels known within the graph, only 2
// should be remaining. // should be remaining.
asserNumChans(t, graph, 2) assertNumChans(t, graph, 2)
// Those channels should also be missing from the channel view. // Those channels should also be missing from the channel view.
channelView, err = graph.ChannelView() channelView, err = graph.ChannelView()
@ -1104,14 +1104,14 @@ func TestGraphPruning(t *testing.T) {
t.Fatalf("unable to prune graph: %v", err) t.Fatalf("unable to prune graph: %v", err)
} }
// No channels should've been detected as pruned. // No channels should have been detected as pruned.
if len(prunedChans) != 0 { if len(prunedChans) != 0 {
t.Fatalf("channels were pruned but shouldn't have been") t.Fatalf("channels were pruned but shouldn't have been")
} }
// Once again, the prune tip should've been updated. // Once again, the prune tip should have been updated.
assertPruneTip(t, graph, &blockHash, blockHeight) assertPruneTip(t, graph, &blockHash, blockHeight)
asserNumChans(t, graph, 2) assertNumChans(t, graph, 2)
// Finally, create a block that prunes the remainder of the channels // Finally, create a block that prunes the remainder of the channels
// from the graph. // from the graph.
@ -1123,7 +1123,7 @@ func TestGraphPruning(t *testing.T) {
t.Fatalf("unable to prune graph: %v", err) t.Fatalf("unable to prune graph: %v", err)
} }
// The remainder of the channels should've been pruned from the graph. // The remainder of the channels should have been pruned from the graph.
if len(prunedChans) != 2 { if len(prunedChans) != 2 {
t.Fatalf("incorrect number of channels pruned: expected %v, got %v", t.Fatalf("incorrect number of channels pruned: expected %v, got %v",
2, len(prunedChans)) 2, len(prunedChans))
@ -1132,7 +1132,7 @@ func TestGraphPruning(t *testing.T) {
// The prune tip should be updated, and no channels should be found // The prune tip should be updated, and no channels should be found
// within the current graph. // within the current graph.
assertPruneTip(t, graph, &blockHash, blockHeight) assertPruneTip(t, graph, &blockHash, blockHeight)
asserNumChans(t, graph, 0) assertNumChans(t, graph, 0)
// Finally, the channel view at this point in the graph should now be // Finally, the channel view at this point in the graph should now be
// completely empty. // completely empty.

@ -27,7 +27,7 @@ func randInvoice(value lnwire.MilliSatoshi) (*Invoice, error) {
}, },
} }
i.Memo = []byte("memo") i.Memo = []byte("memo")
i.Receipt = []byte("recipt") i.Receipt = []byte("receipt")
// Create a random byte slice of MaxPaymentRequestSize bytes to be used // Create a random byte slice of MaxPaymentRequestSize bytes to be used
// as a dummy paymentrequest, and determine if it should be set based // as a dummy paymentrequest, and determine if it should be set based
@ -62,12 +62,12 @@ func TestInvoiceWorkflow(t *testing.T) {
CreationDate: time.Unix(time.Now().Unix(), 0), CreationDate: time.Unix(time.Now().Unix(), 0),
} }
fakeInvoice.Memo = []byte("memo") fakeInvoice.Memo = []byte("memo")
fakeInvoice.Receipt = []byte("recipt") fakeInvoice.Receipt = []byte("receipt")
fakeInvoice.PaymentRequest = []byte("") fakeInvoice.PaymentRequest = []byte("")
copy(fakeInvoice.Terms.PaymentPreimage[:], rev[:]) copy(fakeInvoice.Terms.PaymentPreimage[:], rev[:])
fakeInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000) fakeInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000)
// Add the invoice to the database, this should suceed as there aren't // Add the invoice to the database, this should succeed as there aren't
// any existing invoices within the database with the same payment // any existing invoices within the database with the same payment
// hash. // hash.
if err := db.AddInvoice(fakeInvoice); err != nil { if err := db.AddInvoice(fakeInvoice); err != nil {
@ -144,12 +144,12 @@ func TestInvoiceWorkflow(t *testing.T) {
} }
// The retrieve list of invoices should be identical as since we're // The retrieve list of invoices should be identical as since we're
// using big endian, the invoices should be retrieved in asecending // using big endian, the invoices should be retrieved in ascending
// order (and the primary key should be incremented with each // order (and the primary key should be incremented with each
// insertion). // insertion).
for i := 0; i < len(invoices)-1; i++ { for i := 0; i < len(invoices)-1; i++ {
if !reflect.DeepEqual(invoices[i], dbInvoices[i]) { if !reflect.DeepEqual(invoices[i], dbInvoices[i]) {
t.Fatalf("retrived invoices don't match %v vs %v", t.Fatalf("retrieved invoices don't match %v vs %v",
spew.Sdump(invoices[i]), spew.Sdump(invoices[i]),
spew.Sdump(dbInvoices[i])) spew.Sdump(dbInvoices[i]))
} }

@ -61,7 +61,7 @@ type ContractTerm struct {
// extended. // extended.
PaymentPreimage [32]byte PaymentPreimage [32]byte
// Value is the expected amount of milli-satoshis to be payed to an // Value is the expected amount of milli-satoshis to be paid to an
// HTLC which can be satisfied by the above preimage. // HTLC which can be satisfied by the above preimage.
Value lnwire.MilliSatoshi Value lnwire.MilliSatoshi
@ -301,7 +301,7 @@ func putInvoice(invoices *bolt.Bucket, invoiceIndex *bolt.Bucket,
return err return err
} }
// Add the payment hash to the invoice index. This'll let us quickly // Add the payment hash to the invoice index. This will let us quickly
// identify if we can settle an incoming payment, and also to possibly // identify if we can settle an incoming payment, and also to possibly
// allow a single invoice to have multiple payment installations. // allow a single invoice to have multiple payment installations.
paymentHash := sha256.Sum256(i.Terms.PaymentPreimage[:]) paymentHash := sha256.Sum256(i.Terms.PaymentPreimage[:])

@ -192,7 +192,7 @@ func TestMigrationWithPanic(t *testing.T) {
}) })
} }
// Create migration function which changes the initialy created data and // Create migration function which changes the initially created data and
// throw the panic, in this case we pretending that something goes. // throw the panic, in this case we pretending that something goes.
migrationWithPanic := func(tx *bolt.Tx) error { migrationWithPanic := func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
@ -212,7 +212,7 @@ func TestMigrationWithPanic(t *testing.T) {
} }
if meta.DbVersionNumber != 0 { if meta.DbVersionNumber != 0 {
t.Fatal("migration paniced but version is changed") t.Fatal("migration panicked but version is changed")
} }
err = d.Update(func(tx *bolt.Tx) error { err = d.Update(func(tx *bolt.Tx) error {
@ -261,8 +261,8 @@ func TestMigrationWithFatal(t *testing.T) {
}) })
} }
// Create migration function which changes the initialy created data and // Create migration function which changes the initially created data and
// return the error, in this case we pretending that somthing goes // return the error, in this case we pretending that something goes
// wrong. // wrong.
migrationWithFatal := func(tx *bolt.Tx) error { migrationWithFatal := func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
@ -332,7 +332,7 @@ func TestMigrationWithoutErrors(t *testing.T) {
}) })
} }
// Create migration function which changes the initialy created data. // Create migration function which changes the initially created data.
migrationWithoutErrors := func(tx *bolt.Tx) error { migrationWithoutErrors := func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
if err != nil { if err != nil {
@ -352,7 +352,7 @@ func TestMigrationWithoutErrors(t *testing.T) {
if meta.DbVersionNumber != 1 { if meta.DbVersionNumber != 1 {
t.Fatal("version number isn't changed after " + t.Fatal("version number isn't changed after " +
"succesfully aplied migration") "successfully applied migration")
} }
err = d.Update(func(tx *bolt.Tx) error { err = d.Update(func(tx *bolt.Tx) error {
@ -363,7 +363,7 @@ func TestMigrationWithoutErrors(t *testing.T) {
value := bucket.Get(keyPrefix) value := bucket.Get(keyPrefix)
if !bytes.Equal(value, afterMigration) { if !bytes.Equal(value, afterMigration) {
return errors.New("migration wasn't applyied " + return errors.New("migration wasn't applied " +
"properly") "properly")
} }

@ -23,7 +23,7 @@ var (
// channel open with. Information such as the Bitcoin network the node // channel open with. Information such as the Bitcoin network the node
// advertised, and its identity public key are also stored. Additionally, this // advertised, and its identity public key are also stored. Additionally, this
// struct and the bucket its stored within have store data similar to that of // struct and the bucket its stored within have store data similar to that of
// Bitcion's addrmanager. The TCP address information stored within the struct // Bitcoin's addrmanager. The TCP address information stored within the struct
// can be used to establish persistent connections will all channel // can be used to establish persistent connections will all channel
// counterparties on daemon startup. // counterparties on daemon startup.
// //

@ -15,7 +15,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) {
cdb, cleanUp, err := makeTestDB() cdb, cleanUp, err := makeTestDB()
if err != nil { if err != nil {
t.Fatalf("uanble to make test database: %v", err) t.Fatalf("unable to make test database: %v", err)
} }
defer cleanUp() defer cleanUp()
@ -72,7 +72,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) {
} }
} }
// Next, we'll excercise the methods to append additionall IP // Next, we'll exercise the methods to append additional IP
// addresses, and also to update the last seen time. // addresses, and also to update the last seen time.
if err := node1.UpdateLastSeen(time.Now()); err != nil { if err := node1.UpdateLastSeen(time.Now()); err != nil {
t.Fatalf("unable to update last seen: %v", err) t.Fatalf("unable to update last seen: %v", err)
@ -81,7 +81,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) {
t.Fatalf("unable to update addr: %v", err) t.Fatalf("unable to update addr: %v", err)
} }
// Fetch the same node from the databse according to its public key. // Fetch the same node from the database according to its public key.
node1DB, err := cdb.FetchLinkNode(pub1) node1DB, err := cdb.FetchLinkNode(pub1)
if err != nil { if err != nil {
t.Fatalf("unable to find node: %v", err) t.Fatalf("unable to find node: %v", err)
@ -94,7 +94,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) {
node1.LastSeen.Unix(), node1DB.LastSeen.Unix()) node1.LastSeen.Unix(), node1DB.LastSeen.Unix())
} }
if len(node1DB.Addresses) != 2 { if len(node1DB.Addresses) != 2 {
t.Fatalf("wrong length for node1 addrsses: expected %v, got %v", t.Fatalf("wrong length for node1 addresses: expected %v, got %v",
2, len(node1DB.Addresses)) 2, len(node1DB.Addresses))
} }
if node1DB.Addresses[0].String() != addr1.String() { if node1DB.Addresses[0].String() != addr1.String() {

@ -647,7 +647,7 @@ func closeChannel(ctx *cli.Context) error {
err error err error
) )
// Show command help if no arguments provieded // Show command help if no arguments provided
if ctx.NArg() == 0 && ctx.NumFlags() == 0 { if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
cli.ShowCommandHelp(ctx, "closechannel") cli.ShowCommandHelp(ctx, "closechannel")
return nil return nil
@ -1020,7 +1020,7 @@ var sendPaymentCommand = cli.Command{
} }
func sendPayment(ctx *cli.Context) error { func sendPayment(ctx *cli.Context) error {
// Show command help if no arguments provieded // Show command help if no arguments provided
if ctx.NArg() == 0 && ctx.NumFlags() == 0 { if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
cli.ShowCommandHelp(ctx, "sendpayment") cli.ShowCommandHelp(ctx, "sendpayment")
return nil return nil
@ -1836,7 +1836,7 @@ func debugLevel(ctx *cli.Context) error {
return nil return nil
} }
var decodePayReqComamnd = cli.Command{ var decodePayReqCommand = cli.Command{
Name: "decodepayreq", Name: "decodepayreq",
Usage: "Decode a payment request.", Usage: "Decode a payment request.",
Description: "Decode the passed payment request revealing the destination, payment hash and value of the payment request", Description: "Decode the passed payment request revealing the destination, payment hash and value of the payment request",

@ -188,7 +188,7 @@ func main() {
queryRoutesCommand, queryRoutesCommand,
getNetworkInfoCommand, getNetworkInfoCommand,
debugLevelCommand, debugLevelCommand,
decodePayReqComamnd, decodePayReqCommand,
listChainTxnsCommand, listChainTxnsCommand,
stopCommand, stopCommand,
signMessageCommand, signMessageCommand,

@ -628,7 +628,7 @@ func parseAndSetDebugLevels(debugLevel string) error {
// Validate subsystem. // Validate subsystem.
if _, exists := subsystemLoggers[subsysID]; !exists { if _, exists := subsystemLoggers[subsysID]; !exists {
str := "The specified subsystem [%v] is invalid -- " + str := "The specified subsystem [%v] is invalid -- " +
"supported subsytems %v" "supported subsystems %v"
return fmt.Errorf(str, subsysID, supportedSubsystems()) return fmt.Errorf(str, subsysID, supportedSubsystems())
} }

@ -160,7 +160,7 @@ func (a ArbitratorState) String() string {
} }
// resolverType is an enum that enumerates the various types of resolvers. When // resolverType is an enum that enumerates the various types of resolvers. When
// writing resolvers to disk, we prepend this to the raw bytes stroed. This // writing resolvers to disk, we prepend this to the raw bytes stored. This
// allows us to properly decode the resolver into the proper type. // allows us to properly decode the resolver into the proper type.
type resolverType uint8 type resolverType uint8

@ -90,7 +90,7 @@ type ChainArbitratorConfig struct {
// both to the utxo nursery. Once this function returns, the nursery // both to the utxo nursery. Once this function returns, the nursery
// should have safely persisted the outputs to disk, and should start // should have safely persisted the outputs to disk, and should start
// the process of incubation. This is used when a resolver wishes to // the process of incubation. This is used when a resolver wishes to
// pass off the output to the nursery as we're inly waiting on an // pass off the output to the nursery as we're only waiting on an
// absolute/relative item block. // absolute/relative item block.
IncubateOutputs func(wire.OutPoint, *lnwallet.CommitOutputResolution, IncubateOutputs func(wire.OutPoint, *lnwallet.CommitOutputResolution,
*lnwallet.OutgoingHtlcResolution, *lnwallet.OutgoingHtlcResolution,

@ -41,7 +41,7 @@ type ChainEventSubscription struct {
// material required to bring the cheating channel peer to justice. // material required to bring the cheating channel peer to justice.
ContractBreach chan *lnwallet.BreachRetribution ContractBreach chan *lnwallet.BreachRetribution
// ProcessACK is a channel that'll be used by the chainWatcher to // ProcessACK is a channel that will be used by the chainWatcher to
// synchronize dispatch and processing of the notification with the act // synchronize dispatch and processing of the notification with the act
// of updating the state of the channel on disk. This ensures that the // of updating the state of the channel on disk. This ensures that the
// event can be reliably handed off. // event can be reliably handed off.
@ -73,7 +73,7 @@ type chainWatcher struct {
// database to ensure that we act using the most up to date state. // database to ensure that we act using the most up to date state.
chanState *channeldb.OpenChannel chanState *channeldb.OpenChannel
// stateHintObfuscator is a 48-bit state hint that's used to obfsucate // stateHintObfuscator is a 48-bit state hint that's used to obfuscate
// the current state number on the commitment transactions. // the current state number on the commitment transactions.
stateHintObfuscator [lnwallet.StateHintSize]byte stateHintObfuscator [lnwallet.StateHintSize]byte

@ -24,21 +24,21 @@ const (
broadcastRedeemMultiplier = 2 broadcastRedeemMultiplier = 2
) )
// WitnessSubcription represents an intent to be notified once new witnesses // WitnessSubscription represents an intent to be notified once new witnesses
// are discovered by various active contract resolvers. A contract resolver may // are discovered by various active contract resolvers. A contract resolver may
// use this to be notified of when it can satisfy an incoming contract after we // use this to be notified of when it can satisfy an incoming contract after we
// discover the witness for an outgoing contract. // discover the witness for an outgoing contract.
type WitnessSubcription struct { type WitnessSubscription struct {
// WitnessUpdates is a channel that newly discovered witnesses will be // WitnessUpdates is a channel that newly discovered witnesses will be
// sent over. // sent over.
// //
// TODO(roasbef): couple with WitnessType? // TODO(roasbeef): couple with WitnessType?
WitnessUpdates <-chan []byte WitnessUpdates <-chan []byte
// CancelSubcription is a function closure that should be used by a // CancelSubscription is a function closure that should be used by a
// client to cancel the subscription once they are no longer interested // client to cancel the subscription once they are no longer interested
// in receiving new updates. // in receiving new updates.
CancelSubcription func() CancelSubscription func()
} }
// WitnessBeacon is a global beacon of witnesses. Contract resolvers will use // WitnessBeacon is a global beacon of witnesses. Contract resolvers will use
@ -49,9 +49,9 @@ type WitnessSubcription struct {
// TODO(roasbeef): need to delete the pre-images once we've used them // TODO(roasbeef): need to delete the pre-images once we've used them
// and have been sufficiently confirmed? // and have been sufficiently confirmed?
type WitnessBeacon interface { type WitnessBeacon interface {
// SubcribeUpdates returns a channel that will be sent upon *each* time // SubscribeUpdates returns a channel that will be sent upon *each* time
// a new preimage is discovered. // a new preimage is discovered.
SubcribeUpdates() *WitnessSubcription SubscribeUpdates() *WitnessSubscription
// LookupPreImage attempts to lookup a preimage in the global cache. // LookupPreImage attempts to lookup a preimage in the global cache.
// True is returned for the second argument if the preimage is found. // True is returned for the second argument if the preimage is found.
@ -254,7 +254,7 @@ func (c *ChannelArbitrator) Start() error {
return err return err
} }
// If we start and ended at the awiting full resolution state, then // If we start and ended at the awaiting full resolution state, then
// we'll relaunch our set of unresolved contracts. // we'll relaunch our set of unresolved contracts.
if startingState == StateWaitingFullResolution && if startingState == StateWaitingFullResolution &&
nextState == StateWaitingFullResolution { nextState == StateWaitingFullResolution {
@ -1144,7 +1144,7 @@ func (c *ChannelArbitrator) prepContractResolutions(htlcActions ChainActionMap,
return htlcResolvers, msgsToSend, nil return htlcResolvers, msgsToSend, nil
} }
// resolveContract is a goroutien tasked with fully resolving an unresolved // resolveContract is a goroutine tasked with fully resolving an unresolved
// contract. Either the initial contract will be resolved after a single step, // contract. Either the initial contract will be resolved after a single step,
// or the contract will itself create another contract to be resolved. In // or the contract will itself create another contract to be resolved. In
// either case, one the contract has been fully resolved, we'll signal back to // either case, one the contract has been fully resolved, we'll signal back to

@ -69,7 +69,7 @@ type ContractResolver interface {
// given ContractResolver implementation. It contains all the items that a // given ContractResolver implementation. It contains all the items that a
// resolver requires to carry out its duties. // resolver requires to carry out its duties.
type ResolverKit struct { type ResolverKit struct {
// ChannelArbiratorConfig contains all the interfaces and closures // ChannelArbitratorConfig contains all the interfaces and closures
// required for the resolver to interact with outside sub-systems. // required for the resolver to interact with outside sub-systems.
ChannelArbitratorConfig ChannelArbitratorConfig
@ -960,7 +960,7 @@ var _ ContractResolver = (*htlcOutgoingContestResolver)(nil)
// it hasn't expired. In this case, we can resolve the HTLC if we learn of the // it hasn't expired. In this case, we can resolve the HTLC if we learn of the
// preimage, otherwise the remote party will sweep it after it expires. // preimage, otherwise the remote party will sweep it after it expires.
// //
// TODO(roabseef): just embed the other resolver? // TODO(roasbeef): just embed the other resolver?
type htlcIncomingContestResolver struct { type htlcIncomingContestResolver struct {
// htlcExpiry is the absolute expiry of this incoming HTLC. We use this // htlcExpiry is the absolute expiry of this incoming HTLC. We use this
// value to determine if we can exit early as if the HTLC times out, // value to determine if we can exit early as if the HTLC times out,
@ -1055,13 +1055,13 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
// If the HTLC hasn't expired yet, then we may still be able to claim // If the HTLC hasn't expired yet, then we may still be able to claim
// it if we learn of the pre-image, so we'll wait and see if it pops // it if we learn of the pre-image, so we'll wait and see if it pops
// up, or the HTLC times out. // up, or the HTLC times out.
preimageSubscription := h.PreimageDB.SubcribeUpdates() preimageSubscription := h.PreimageDB.SubscribeUpdates()
blockEpochs, err := h.Notifier.RegisterBlockEpochNtfn() blockEpochs, err := h.Notifier.RegisterBlockEpochNtfn()
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer func() { defer func() {
preimageSubscription.CancelSubcription() preimageSubscription.CancelSubscription()
blockEpochs.Cancel() blockEpochs.Cancel()
}() }()
for { for {

@ -18,7 +18,7 @@ import (
) )
// NetworkPeerBootstrapper is an interface that represents an initial peer // NetworkPeerBootstrapper is an interface that represents an initial peer
// boostrap mechanism. This interface is to be used to bootstrap a new peer to // bootstrap mechanism. This interface is to be used to bootstrap a new peer to
// the connection by providing it with the pubkey+address of a set of existing // the connection by providing it with the pubkey+address of a set of existing
// peers on the network. Several bootstrap mechanisms can be implemented such // peers on the network. Several bootstrap mechanisms can be implemented such
// as DNS, in channel graph, DHT's, etc. // as DNS, in channel graph, DHT's, etc.
@ -48,7 +48,7 @@ func MultiSourceBootstrap(ignore map[autopilot.NodeID]struct{}, numAddrs uint32,
var addrs []*lnwire.NetAddress var addrs []*lnwire.NetAddress
for _, bootStrapper := range bootStrappers { for _, bootStrapper := range bootStrappers {
// If we already have enough addresses, then we can exit early // If we already have enough addresses, then we can exit early
// w/o querying the additional boostrappers. // w/o querying the additional bootstrappers.
if uint32(len(addrs)) >= numAddrs { if uint32(len(addrs)) >= numAddrs {
break break
} }
@ -85,7 +85,7 @@ type ChannelGraphBootstrapper struct {
chanGraph autopilot.ChannelGraph chanGraph autopilot.ChannelGraph
// hashAccumulator is a set of 32 random bytes that are read upon the // hashAccumulator is a set of 32 random bytes that are read upon the
// creation of the channel graph boostrapper. We use this value to // creation of the channel graph bootstrapper. We use this value to
// randomly select nodes within the known graph to connect to. After // randomly select nodes within the known graph to connect to. After
// each selection, we rotate the accumulator by hashing it with itself. // each selection, we rotate the accumulator by hashing it with itself.
hashAccumulator [32]byte hashAccumulator [32]byte
@ -257,7 +257,7 @@ var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil)
// NewDNSSeedBootstrapper returns a new instance of the DNSSeedBootstrapper. // NewDNSSeedBootstrapper returns a new instance of the DNSSeedBootstrapper.
// The set of passed seeds should point to DNS servers that properly implement // The set of passed seeds should point to DNS servers that properly implement
// Lighting's DNS peer bootstrapping protocol as defined in BOLT-0010. The set // Lightning's DNS peer bootstrapping protocol as defined in BOLT-0010. The set
// of passed DNS seeds should come in pairs, with the second host name to be // of passed DNS seeds should come in pairs, with the second host name to be
// used as a fallback for manual TCP resolution in the case of an error // used as a fallback for manual TCP resolution in the case of an error
// receiving the UDP response. The second host should return a single A record // receiving the UDP response. The second host should return a single A record

@ -27,7 +27,7 @@ var (
// messageStoreKey is a key used to create a top level bucket in // messageStoreKey is a key used to create a top level bucket in
// the gossiper database, used for storing messages that are to // the gossiper database, used for storing messages that are to
// be sent to peers. Currently this is used for reliably sending // be sent to peers. Currently this is used for reliably sending
// AnnounceSignatures messages, by peristing them until a send // AnnounceSignatures messages, by persisting them until a send
// operation has succeeded. // operation has succeeded.
messageStoreKey = []byte("message-store") messageStoreKey = []byte("message-store")
) )
@ -184,7 +184,7 @@ type AuthenticatedGossiper struct {
// as we know it. // as we know it.
bestHeight uint32 bestHeight uint32
// selfKey is the identity public key of the backing Lighting node. // selfKey is the identity public key of the backing Lightning node.
selfKey *btcec.PublicKey selfKey *btcec.PublicKey
// channelMtx is used to restrict the database access to one // channelMtx is used to restrict the database access to one
@ -315,7 +315,7 @@ func (d *AuthenticatedGossiper) SynchronizeNode(pub *btcec.PublicKey) error {
// channel forwarding policies for the specified channels. If no channels are // channel forwarding policies for the specified channels. If no channels are
// specified, then the update will be applied to all outgoing channels from the // specified, then the update will be applied to all outgoing channels from the
// source node. Policy updates are done in two stages: first, the // source node. Policy updates are done in two stages: first, the
// AuthenticatedGossiper ensures the update has been committed by dependant // AuthenticatedGossiper ensures the update has been committed by dependent
// sub-systems, then it signs and broadcasts new updates to the network. // sub-systems, then it signs and broadcasts new updates to the network.
func (d *AuthenticatedGossiper) PropagateChanPolicyUpdate( func (d *AuthenticatedGossiper) PropagateChanPolicyUpdate(
newSchema routing.ChannelPolicy, chanPoints ...wire.OutPoint) error { newSchema routing.ChannelPolicy, chanPoints ...wire.OutPoint) error {
@ -493,7 +493,7 @@ func (d *deDupedAnnouncements) Reset() {
// reset is the private version of the Reset method. We have this so we can // reset is the private version of the Reset method. We have this so we can
// call this method within method that are already holding the lock. // call this method within method that are already holding the lock.
func (d *deDupedAnnouncements) reset() { func (d *deDupedAnnouncements) reset() {
// Storage of each type of announcement (channel anouncements, channel // Storage of each type of announcement (channel announcements, channel
// updates, node announcements) is set to an empty map where the // updates, node announcements) is set to an empty map where the
// appropriate key points to the corresponding lnwire.Message. // appropriate key points to the corresponding lnwire.Message.
d.channelAnnouncements = make(map[lnwire.ShortChannelID]msgWithSenders) d.channelAnnouncements = make(map[lnwire.ShortChannelID]msgWithSenders)
@ -502,7 +502,7 @@ func (d *deDupedAnnouncements) reset() {
} }
// addMsg adds a new message to the current batch. If the message is already // addMsg adds a new message to the current batch. If the message is already
// persent in the current batch, then this new instance replaces the latter, // present in the current batch, then this new instance replaces the latter,
// and the set of senders is updated to reflect which node sent us this // and the set of senders is updated to reflect which node sent us this
// message. // message.
func (d *deDupedAnnouncements) addMsg(message networkMsg) { func (d *deDupedAnnouncements) addMsg(message networkMsg) {
@ -590,7 +590,7 @@ func (d *deDupedAnnouncements) addMsg(message networkMsg) {
sender := routing.NewVertex(message.peer) sender := routing.NewVertex(message.peer)
deDupKey := routing.NewVertex(msg.NodeID) deDupKey := routing.NewVertex(msg.NodeID)
// We do the same for node annonuncements as we did for channel // We do the same for node announcements as we did for channel
// updates, as they also carry a timestamp. // updates, as they also carry a timestamp.
oldTimestamp := uint32(0) oldTimestamp := uint32(0)
mws, ok := d.nodeAnnouncements[deDupKey] mws, ok := d.nodeAnnouncements[deDupKey]
@ -823,7 +823,7 @@ func (d *AuthenticatedGossiper) networkHandler() {
trickleTimer := time.NewTicker(d.cfg.TrickleDelay) trickleTimer := time.NewTicker(d.cfg.TrickleDelay)
defer trickleTimer.Stop() defer trickleTimer.Stop()
// To start, we'll first check to see if there're any stale channels // To start, we'll first check to see if there are any stale channels
// that we need to re-transmit. // that we need to re-transmit.
if err := d.retransmitStaleChannels(); err != nil { if err := d.retransmitStaleChannels(); err != nil {
log.Errorf("unable to rebroadcast stale channels: %v", log.Errorf("unable to rebroadcast stale channels: %v",
@ -861,7 +861,7 @@ func (d *AuthenticatedGossiper) networkHandler() {
policyUpdate.errResp <- nil policyUpdate.errResp <- nil
case announcement := <-d.networkMsgs: case announcement := <-d.networkMsgs:
// Channel annoucnement signatures are the only message // Channel announcement signatures are the only message
// that we'll process serially. // that we'll process serially.
if _, ok := announcement.msg.(*lnwire.AnnounceSignatures); ok { if _, ok := announcement.msg.(*lnwire.AnnounceSignatures); ok {
emittedAnnouncements := d.processNetworkAnnouncement( emittedAnnouncements := d.processNetworkAnnouncement(
@ -875,10 +875,10 @@ func (d *AuthenticatedGossiper) networkHandler() {
continue continue
} }
// We'll set up any dependant, and wait until a free // We'll set up any dependent, and wait until a free
// slot for this job opens up, this allow us to not // slot for this job opens up, this allow us to not
// have thousands of goroutines active. // have thousands of goroutines active.
validationBarrier.InitJobDependancies(announcement.msg) validationBarrier.InitJobDependencies(announcement.msg)
go func() { go func() {
defer validationBarrier.CompleteJob() defer validationBarrier.CompleteJob()
@ -1147,11 +1147,11 @@ func (d *AuthenticatedGossiper) processChanPolicyUpdate(
return chanUpdates, nil return chanUpdates, nil
} }
// processRejectedEdge examines a rejected edge to see if we can eexrtact any // processRejectedEdge examines a rejected edge to see if we can extract any
// new announcements from it. An edge will get rejected if we already added // new announcements from it. An edge will get rejected if we already added
// the same edge without AuthProof to the graph. If the received announcement // the same edge without AuthProof to the graph. If the received announcement
// contains a proof, we can add this proof to our edge. We can end up in this // contains a proof, we can add this proof to our edge. We can end up in this
// situatation in the case where we create a channel, but for some reason fail // situation in the case where we create a channel, but for some reason fail
// to receive the remote peer's proof, while the remote peer is able to fully // to receive the remote peer's proof, while the remote peer is able to fully
// assemble the proof and craft the ChannelAnnouncement. // assemble the proof and craft the ChannelAnnouncement.
func (d *AuthenticatedGossiper) processRejectedEdge(chanAnnMsg *lnwire.ChannelAnnouncement, func (d *AuthenticatedGossiper) processRejectedEdge(chanAnnMsg *lnwire.ChannelAnnouncement,
@ -1938,7 +1938,7 @@ func (d *AuthenticatedGossiper) sendAnnSigReliably(
// we do not succeed in sending it to the peer, we'll fetch it // we do not succeed in sending it to the peer, we'll fetch it
// from the DB next time we start, and retry. We use the peer ID // from the DB next time we start, and retry. We use the peer ID
// + shortChannelID as key, as there possibly is more than one // + shortChannelID as key, as there possibly is more than one
// channel oepning in progress to the same peer. // channel opening in progress to the same peer.
var key [41]byte var key [41]byte
copy(key[:33], remotePeer.SerializeCompressed()) copy(key[:33], remotePeer.SerializeCompressed())
binary.BigEndian.PutUint64(key[33:], msg.ShortChannelID.ToUint64()) binary.BigEndian.PutUint64(key[33:], msg.ShortChannelID.ToUint64())

@ -534,7 +534,7 @@ func TestProcessAnnouncement(t *testing.T) {
case msg := <-ctx.broadcastedMessage: case msg := <-ctx.broadcastedMessage:
assertSenderExistence(na.NodeID, msg) assertSenderExistence(na.NodeID, msg)
case <-time.After(2 * trickleDelay): case <-time.After(2 * trickleDelay):
t.Fatal("announcememt wasn't proceeded") t.Fatal("announcement wasn't proceeded")
} }
if len(ctx.router.nodes) != 1 { if len(ctx.router.nodes) != 1 {
@ -562,7 +562,7 @@ func TestProcessAnnouncement(t *testing.T) {
case msg := <-ctx.broadcastedMessage: case msg := <-ctx.broadcastedMessage:
assertSenderExistence(na.NodeID, msg) assertSenderExistence(na.NodeID, msg)
case <-time.After(2 * trickleDelay): case <-time.After(2 * trickleDelay):
t.Fatal("announcememt wasn't proceeded") t.Fatal("announcement wasn't proceeded")
} }
if len(ctx.router.infos) != 1 { if len(ctx.router.infos) != 1 {
@ -590,7 +590,7 @@ func TestProcessAnnouncement(t *testing.T) {
case msg := <-ctx.broadcastedMessage: case msg := <-ctx.broadcastedMessage:
assertSenderExistence(na.NodeID, msg) assertSenderExistence(na.NodeID, msg)
case <-time.After(2 * trickleDelay): case <-time.After(2 * trickleDelay):
t.Fatal("announcememt wasn't proceeded") t.Fatal("announcement wasn't proceeded")
} }
if len(ctx.router.edges) != 1 { if len(ctx.router.edges) != 1 {
@ -663,17 +663,17 @@ func TestPrematureAnnouncement(t *testing.T) {
select { select {
case <-ctx.broadcastedMessage: case <-ctx.broadcastedMessage:
case <-time.After(2 * trickleDelay): case <-time.After(2 * trickleDelay):
t.Fatal("announcememt wasn't broadcasted") t.Fatal("announcement wasn't broadcasted")
} }
if len(ctx.router.infos) != 1 { if len(ctx.router.infos) != 1 {
t.Fatalf("edge was't added to router: %v", err) t.Fatalf("edge wasn't added to router: %v", err)
} }
select { select {
case <-ctx.broadcastedMessage: case <-ctx.broadcastedMessage:
case <-time.After(2 * trickleDelay): case <-time.After(2 * trickleDelay):
t.Fatal("announcememt wasn't broadcasted") t.Fatal("announcement wasn't broadcasted")
} }
if len(ctx.router.edges) != 1 { if len(ctx.router.edges) != 1 {
@ -1132,7 +1132,7 @@ func TestSignatureAnnouncementRetry(t *testing.T) {
} }
// When the peer comes online, the gossiper gets notified, and should // When the peer comes online, the gossiper gets notified, and should
// retry sending the AnnnounceSignatures. We make the SendToPeer // retry sending the AnnounceSignatures. We make the SendToPeer
// method work again. // method work again.
sentToPeer := make(chan lnwire.Message, 1) sentToPeer := make(chan lnwire.Message, 1)
ctx.gossiper.cfg.SendToPeer = func(target *btcec.PublicKey, ctx.gossiper.cfg.SendToPeer = func(target *btcec.PublicKey,
@ -1141,7 +1141,7 @@ func TestSignatureAnnouncementRetry(t *testing.T) {
return nil return nil
} }
// Notify that peer is now online. THis should trigger a new call // Notify that peer is now online. This should trigger a new call
// to SendToPeer. // to SendToPeer.
close(conChan) close(conChan)
@ -1369,7 +1369,7 @@ func TestSignatureAnnouncementRetryAtStartup(t *testing.T) {
t.Fatalf("gossiper did not send message when peer came online") t.Fatalf("gossiper did not send message when peer came online")
} }
// Now exchanging the remote channel proof, the channel annoncement // Now exchanging the remote channel proof, the channel announcement
// broadcast should continue as normal. // broadcast should continue as normal.
select { select {
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.remoteProofAnn, case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.remoteProofAnn,
@ -1562,7 +1562,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) {
case msg := <-sentToPeer: case msg := <-sentToPeer:
_, ok := msg.(*lnwire.ChannelAnnouncement) _, ok := msg.(*lnwire.ChannelAnnouncement)
if !ok { if !ok {
t.Fatalf("expected ChannelAnnouncement, intead got %T", msg) t.Fatalf("expected ChannelAnnouncement, instead got %T", msg)
} }
case <-time.After(2 * time.Second): case <-time.After(2 * time.Second):
t.Fatal("did not send local proof to peer") t.Fatal("did not send local proof to peer")
@ -1638,7 +1638,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
t.Fatal("channel update not replaced in batch") t.Fatal("channel update not replaced in batch")
} }
// Adding an announcment with a later timestamp should replace the // Adding an announcement with a later timestamp should replace the
// stored one. // stored one.
ua3, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp+1) ua3, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp+1)
if err != nil { if err != nil {
@ -1790,7 +1790,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
} }
// TestReceiveRemoteChannelUpdateFirst tests that if we receive a // TestReceiveRemoteChannelUpdateFirst tests that if we receive a
// CHannelUpdate from the remote before we have processed our // ChannelUpdate from the remote before we have processed our
// own ChannelAnnouncement, it will be reprocessed later, after // own ChannelAnnouncement, it will be reprocessed later, after
// our ChannelAnnouncement. // our ChannelAnnouncement.
func TestReceiveRemoteChannelUpdateFirst(t *testing.T) { func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
@ -1822,7 +1822,7 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
localKey := batch.nodeAnn1.NodeID localKey := batch.nodeAnn1.NodeID
remoteKey := batch.nodeAnn2.NodeID remoteKey := batch.nodeAnn2.NodeID
// Recreate the case where the remote node is snding us its ChannelUpdate // Recreate the case where the remote node is sending us its ChannelUpdate
// before we have been able to process our own ChannelAnnouncement and // before we have been able to process our own ChannelAnnouncement and
// ChannelUpdate. // ChannelUpdate.
err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanUpdAnn2, remoteKey) err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanUpdAnn2, remoteKey)

@ -10,8 +10,8 @@ import (
// createChanAnnouncement is a helper function which creates all channel // createChanAnnouncement is a helper function which creates all channel
// announcements given the necessary channel related database items. This // announcements given the necessary channel related database items. This
// function is used to transform out databse structs into the coresponding wire // function is used to transform out database structs into the corresponding wire
// sturcts for announcing new channels to other peers, or simply syncing up a // structs for announcing new channels to other peers, or simply syncing up a
// peer's initial routing table upon connect. // peer's initial routing table upon connect.
func createChanAnnouncement(chanProof *channeldb.ChannelAuthProof, func createChanAnnouncement(chanProof *channeldb.ChannelAuthProof,
chanInfo *channeldb.ChannelEdgeInfo, chanInfo *channeldb.ChannelEdgeInfo,

@ -280,7 +280,7 @@ bitcoins. The schema will be following:
+ --------------- + + --------------- +
(1) You may connect an additinal node "Bob" and make the multihop (1) You may connect an additional node "Bob" and make the multihop
payment Alice->Faucet->Bob payment Alice->Faucet->Bob
(2) "Faucet", "Alice" and "Bob" are the lightning network daemons which (2) "Faucet", "Alice" and "Bob" are the lightning network daemons which

@ -34,7 +34,7 @@ RUN "/go/bin/gencerts" --host="*" --directory="/rpc" --force
# shared with any lnd, btcctl containers so they can securely query btcd's RPC # shared with any lnd, btcctl containers so they can securely query btcd's RPC
# server. # server.
# You should NOT do this before certificate generation! # You should NOT do this before certificate generation!
# Otherwise manually generated certificate will be overriden with shared # Otherwise manually generated certificate will be overridden with shared
# mounted volume! For more info read dockerfile "VOLUME" documentation. # mounted volume! For more info read dockerfile "VOLUME" documentation.
VOLUME ["/rpc"] VOLUME ["/rpc"]

@ -95,12 +95,12 @@ volumes:
shared: shared:
driver: local driver: local
# bitcoin volume is needed for maintaining blockchain persistance # bitcoin volume is needed for maintaining blockchain persistence
# during btcd container recreation. # during btcd container recreation.
bitcoin: bitcoin:
driver: local driver: local
# litecoin volume is needed for maintaining blockchain persistance # litecoin volume is needed for maintaining blockchain persistence
# during ltcd container recreation. # during ltcd container recreation.
litecoin: litecoin:
driver: local driver: local

@ -18,7 +18,7 @@ RUN git clone https://github.com/lightningnetwork/lnd $GOPATH/src/github.com/lig
# Make lnd folder default. # Make lnd folder default.
WORKDIR $GOPATH/src/github.com/lightningnetwork/lnd WORKDIR $GOPATH/src/github.com/lightningnetwork/lnd
# Instll dependency and install/build lnd. # Install dependency and install/build lnd.
RUN glide install RUN glide install
RUN go install . ./cmd/... RUN go install . ./cmd/...

@ -4,7 +4,7 @@
set -e set -e
# error function is used within a bash function in order to send the error # error function is used within a bash function in order to send the error
# mesage directly to the stderr output and exit. # message directly to the stderr output and exit.
error() { error() {
echo "$1" > /dev/stderr echo "$1" > /dev/stderr
exit 0 exit 0

@ -32,7 +32,7 @@ RUN "/go/bin/gencerts" --host="*" --directory="/rpc" --force
# shared with any lnd, btcctl containers so they can securely query ltcd's RPC # shared with any lnd, btcctl containers so they can securely query ltcd's RPC
# server. # server.
# You should NOT do this before certificate generation! # You should NOT do this before certificate generation!
# Otherwise manually generated certificate will be overriden with shared # Otherwise manually generated certificate will be overridden with shared
# mounted volume! For more info read dockerfile "VOLUME" documentation. # mounted volume! For more info read dockerfile "VOLUME" documentation.
VOLUME ["/rpc"] VOLUME ["/rpc"]

@ -271,7 +271,7 @@ Further paragraphs come after blank lines.
Here are some of the reasons why wrapping your commit messages to 72 columns is Here are some of the reasons why wrapping your commit messages to 72 columns is
a good thing. a good thing.
- git log doesnt do any special wrapping of the commit messages. With - git log doesn't do any special wrapping of the commit messages. With
the default pager of less -S, this means your paragraphs flow far off the edge the default pager of less -S, this means your paragraphs flow far off the edge
of the screen, making them difficult to read. On an 80 column terminal, if we of the screen, making them difficult to read. On an 80 column terminal, if we
subtract 4 columns for the indent on the left and 4 more for symmetry on the subtract 4 columns for the indent on the left and 4 more for symmetry on the
@ -298,7 +298,7 @@ Blocks of code within `lnd` should be segmented into logical stanzas of
operation. Such spacing makes the code easier to follow at a skim, and reduces operation. Such spacing makes the code easier to follow at a skim, and reduces
unnecessary line noise. Coupled with the commenting scheme specified above, unnecessary line noise. Coupled with the commenting scheme specified above,
proper spacing allows readers to quickly scan code, extracting semantics quickly. proper spacing allows readers to quickly scan code, extracting semantics quickly.
Functions should _not_ just be layed out as a bare contiguous block of code. Functions should _not_ just be laid out as a bare contiguous block of code.
**WRONG** **WRONG**
```go ```go

@ -131,7 +131,7 @@ Execute the following command in the directory where the **pom.xml** file is loc
``` ```
mvn compile exec:java -Dexec.mainClass="Main" -Dexec.cleanupDaemonThreads=false mvn compile exec:java -Dexec.mainClass="Main" -Dexec.cleanupDaemonThreads=false
``` ```
##### Sample ouput ##### Sample output
``` ```
[INFO] Scanning for projects... [INFO] Scanning for projects...
[INFO] ------------------------------------------------------------------------ [INFO] ------------------------------------------------------------------------

@ -44,7 +44,7 @@ Python gRPC.
#### Imports and Client #### Imports and Client
Everytime you use Python gRPC, you will have to import the generated rpc modules Every time you use Python gRPC, you will have to import the generated rpc modules
and set up a channel and stub to your connect to your `lnd` node: and set up a channel and stub to your connect to your `lnd` node:
```python ```python

@ -186,7 +186,7 @@ type fundingConfig struct {
SignMessage func(pubKey *btcec.PublicKey, msg []byte) (*btcec.Signature, error) SignMessage func(pubKey *btcec.PublicKey, msg []byte) (*btcec.Signature, error)
// CurrentNodeAnnouncement should return the latest, fully signed node // CurrentNodeAnnouncement should return the latest, fully signed node
// announcement from the backing Lighting Network node. // announcement from the backing Lightning Network node.
CurrentNodeAnnouncement func() (lnwire.NodeAnnouncement, error) CurrentNodeAnnouncement func() (lnwire.NodeAnnouncement, error)
// SendAnnouncement is used by the FundingManager to send // SendAnnouncement is used by the FundingManager to send
@ -981,7 +981,7 @@ func (f *fundingManager) processFundingAccept(msg *lnwire.AcceptChannel,
} }
} }
// handleFundingAceept processes a response to the workflow initiation sent by // handleFundingAccept processes a response to the workflow initiation sent by
// the remote peer. This message then queues a message with the funding // the remote peer. This message then queues a message with the funding
// outpoint, and a commitment signature to the remote peer. // outpoint, and a commitment signature to the remote peer.
func (f *fundingManager) handleFundingAccept(fmsg *fundingAcceptMsg) { func (f *fundingManager) handleFundingAccept(fmsg *fundingAcceptMsg) {

@ -891,7 +891,7 @@ func TestFundingManagerRestartBehavior(t *testing.T) {
return fmt.Errorf("intentional error in SendToPeer") return fmt.Errorf("intentional error in SendToPeer")
} }
alice.fundingMgr.cfg.NotifyWhenOnline = func(peer *btcec.PublicKey, con chan<- struct{}) { alice.fundingMgr.cfg.NotifyWhenOnline = func(peer *btcec.PublicKey, con chan<- struct{}) {
// Intetionally empty. // Intentionally empty.
} }
// Notify that transaction was mined // Notify that transaction was mined
@ -966,7 +966,7 @@ func TestFundingManagerRestartBehavior(t *testing.T) {
// Check that the state machine is updated accordingly // Check that the state machine is updated accordingly
assertAddedToRouterGraph(t, alice, bob, fundingOutPoint) assertAddedToRouterGraph(t, alice, bob, fundingOutPoint)
// Next, we check that Alice sends the annnouncement signatures // Next, we check that Alice sends the announcement signatures
// on restart after six confirmations. Bob should as expected send // on restart after six confirmations. Bob should as expected send
// them as well. // them as well.
recreateAliceFundingManager(t, alice) recreateAliceFundingManager(t, alice)

@ -42,7 +42,7 @@ type PaymentCircuit struct {
} }
// circuitKey is a channel ID, HTLC ID tuple used as an identifying key for a // circuitKey is a channel ID, HTLC ID tuple used as an identifying key for a
// payment circuit. The circuit map is keyed with the idenitifer for the // payment circuit. The circuit map is keyed with the identifier for the
// outgoing HTLC // outgoing HTLC
type circuitKey struct { type circuitKey struct {
chanID lnwire.ShortChannelID chanID lnwire.ShortChannelID

@ -9,7 +9,7 @@ import (
) )
// NetworkHop indicates the blockchain network that is intended to be the next // NetworkHop indicates the blockchain network that is intended to be the next
// hop for a forwarded HTLC. The existnce of this field within the // hop for a forwarded HTLC. The existence of this field within the
// ForwardingInfo struct enables the ability for HTLC to cross chain-boundaries // ForwardingInfo struct enables the ability for HTLC to cross chain-boundaries
// at will. // at will.
type NetworkHop uint8 type NetworkHop uint8

@ -150,8 +150,8 @@ type ChannelLinkConfig struct {
// in thread-safe manner. // in thread-safe manner.
Registry InvoiceDatabase Registry InvoiceDatabase
// PreimageCache is a global witness baacon that houses any new // PreimageCache is a global witness beacon that houses any new
// preimges discovered by other links. We'll use this to add new // preimages discovered by other links. We'll use this to add new
// witnesses that we discover which will notify any sub-systems // witnesses that we discover which will notify any sub-systems
// subscribed to new events. // subscribed to new events.
PreimageCache contractcourt.WitnessBeacon PreimageCache contractcourt.WitnessBeacon
@ -532,7 +532,7 @@ func (l *channelLink) syncChanStates() error {
// a duplicate settle. // a duplicate settle.
htlcsSettled := make(map[uint64]struct{}) htlcsSettled := make(map[uint64]struct{})
for _, msg := range msgsToReSend { for _, msg := range msgsToReSend {
settleMsg, ok := msg.(*lnwire.UpdateFufillHTLC) settleMsg, ok := msg.(*lnwire.UpdateFulfillHTLC)
if !ok { if !ok {
// If this isn't a settle message, then we'll skip it. // If this isn't a settle message, then we'll skip it.
continue continue
@ -588,7 +588,7 @@ func (l *channelLink) syncChanStates() error {
return err return err
} }
l.batchCounter++ l.batchCounter++
l.cfg.Peer.SendMessage(&lnwire.UpdateFufillHTLC{ l.cfg.Peer.SendMessage(&lnwire.UpdateFulfillHTLC{
ChanID: l.ChanID(), ChanID: l.ChanID(),
ID: htlc.HtlcIndex, ID: htlc.HtlcIndex,
PaymentPreimage: p, PaymentPreimage: p,
@ -896,7 +896,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
htlc.ID = index htlc.ID = index
l.cfg.Peer.SendMessage(htlc) l.cfg.Peer.SendMessage(htlc)
case *lnwire.UpdateFufillHTLC: case *lnwire.UpdateFulfillHTLC:
// An HTLC we forward to the switch has just settled somewhere // An HTLC we forward to the switch has just settled somewhere
// upstream. Therefore we settle the HTLC within the our local // upstream. Therefore we settle the HTLC within the our local
// state machine. // state machine.
@ -971,7 +971,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
log.Tracef("Receive upstream htlc with payment hash(%x), "+ log.Tracef("Receive upstream htlc with payment hash(%x), "+
"assigning index: %v", msg.PaymentHash[:], index) "assigning index: %v", msg.PaymentHash[:], index)
case *lnwire.UpdateFufillHTLC: case *lnwire.UpdateFulfillHTLC:
pre := msg.PaymentPreimage pre := msg.PaymentPreimage
idx := msg.ID idx := msg.ID
if err := l.channel.ReceiveHTLCSettle(pre, idx); err != nil { if err := l.channel.ReceiveHTLCSettle(pre, idx); err != nil {
@ -1344,7 +1344,7 @@ func (l *channelLink) updateChannelFee(feePerKw btcutil.Amount) error {
feePerKw) feePerKw)
// We skip sending the UpdateFee message if the channel is not // We skip sending the UpdateFee message if the channel is not
// currently eligable to forward messages. // currently eligible to forward messages.
if !l.EligibleToForward() { if !l.EligibleToForward() {
log.Debugf("ChannelPoint(%v): skipping fee update for " + log.Debugf("ChannelPoint(%v): skipping fee update for " +
"inactive channel") "inactive channel")
@ -1391,7 +1391,7 @@ func (l *channelLink) processLockedInHtlcs(
outgoingChanID: l.ShortChanID(), outgoingChanID: l.ShortChanID(),
outgoingHTLCID: pd.ParentIndex, outgoingHTLCID: pd.ParentIndex,
amount: pd.Amount, amount: pd.Amount,
htlc: &lnwire.UpdateFufillHTLC{ htlc: &lnwire.UpdateFulfillHTLC{
PaymentPreimage: pd.RPreimage, PaymentPreimage: pd.RPreimage,
}, },
} }
@ -1644,7 +1644,7 @@ func (l *channelLink) processLockedInHtlcs(
// HTLC was successfully settled locally send // HTLC was successfully settled locally send
// notification about it remote peer. // notification about it remote peer.
l.cfg.Peer.SendMessage(&lnwire.UpdateFufillHTLC{ l.cfg.Peer.SendMessage(&lnwire.UpdateFulfillHTLC{
ChanID: l.ChanID(), ChanID: l.ChanID(),
ID: pd.HtlcIndex, ID: pd.HtlcIndex,
PaymentPreimage: preimage, PaymentPreimage: preimage,

@ -225,7 +225,7 @@ func TestChannelLinkSingleHopPayment(t *testing.T) {
// Wait for Bob to receive the revocation. // Wait for Bob to receive the revocation.
// //
// TODO(roasbef); replace with select over returned err chan // TODO(roasbeef); replace with select over returned err chan
time.Sleep(100 * time.Millisecond) time.Sleep(100 * time.Millisecond)
// Check that alice invoice was settled and bandwidth of HTLC // Check that alice invoice was settled and bandwidth of HTLC
@ -1338,7 +1338,7 @@ func TestChannelLinkSingleHopMessageOrdering(t *testing.T) {
{"bob", "alice", &lnwire.CommitSig{}, false}, {"bob", "alice", &lnwire.CommitSig{}, false},
{"alice", "bob", &lnwire.RevokeAndAck{}, false}, {"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"bob", "alice", &lnwire.UpdateFufillHTLC{}, false}, {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
{"bob", "alice", &lnwire.CommitSig{}, false}, {"bob", "alice", &lnwire.CommitSig{}, false},
{"alice", "bob", &lnwire.RevokeAndAck{}, false}, {"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"alice", "bob", &lnwire.CommitSig{}, false}, {"alice", "bob", &lnwire.CommitSig{}, false},
@ -1429,7 +1429,7 @@ func newSingleLinkTestHarness(chanAmt btcutil.Amount) (ChannelLink,
} }
var ( var (
invoiveRegistry = newMockRegistry() invoiceRegistry = newMockRegistry()
decoder = &mockIteratorDecoder{} decoder = &mockIteratorDecoder{}
obfuscator = newMockObfuscator() obfuscator = newMockObfuscator()
alicePeer = &mockPeer{ alicePeer = &mockPeer{
@ -1464,7 +1464,7 @@ func newSingleLinkTestHarness(chanAmt btcutil.Amount) (ChannelLink,
UpdateContractSignals: func(*contractcourt.ContractSignals) error { UpdateContractSignals: func(*contractcourt.ContractSignals) error {
return nil return nil
}, },
Registry: invoiveRegistry, Registry: invoiceRegistry,
ChainEvents: &contractcourt.ChainEventSubscription{}, ChainEvents: &contractcourt.ChainEventSubscription{},
BlockEpochs: globalEpoch, BlockEpochs: globalEpoch,
BatchTicker: ticker, BatchTicker: ticker,
@ -1754,7 +1754,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) {
if err != nil { if err != nil {
t.Fatalf("unable to settle htlc: %v", err) t.Fatalf("unable to settle htlc: %v", err)
} }
htlcSettle := &lnwire.UpdateFufillHTLC{ htlcSettle := &lnwire.UpdateFulfillHTLC{
ID: bobIndex, ID: bobIndex,
PaymentPreimage: invoice.Terms.PaymentPreimage, PaymentPreimage: invoice.Terms.PaymentPreimage,
} }
@ -1880,7 +1880,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) {
// we eventually learn (simulating a multi-hop payment). The bandwidth // we eventually learn (simulating a multi-hop payment). The bandwidth
// of the channel should now be re-balanced to the starting point. // of the channel should now be re-balanced to the starting point.
settlePkt := htlcPacket{ settlePkt := htlcPacket{
htlc: &lnwire.UpdateFufillHTLC{ htlc: &lnwire.UpdateFulfillHTLC{
ID: bobIndex, ID: bobIndex,
PaymentPreimage: invoice.Terms.PaymentPreimage, PaymentPreimage: invoice.Terms.PaymentPreimage,
}, },
@ -2143,7 +2143,7 @@ func TestChannelLinkBandwidthConsistencyOverflow(t *testing.T) {
t.Fatalf("unable to settle htlc: %v", err) t.Fatalf("unable to settle htlc: %v", err)
} }
htlcSettle := &lnwire.UpdateFufillHTLC{ htlcSettle := &lnwire.UpdateFulfillHTLC{
ID: uint64(i), ID: uint64(i),
PaymentPreimage: preImages[i], PaymentPreimage: preImages[i],
} }
@ -2235,13 +2235,13 @@ func TestChannelRetransmission(t *testing.T) {
// Alice should resend the revoke_and_ack // Alice should resend the revoke_and_ack
// message to Bob because Bob claimed it in the // message to Bob because Bob claimed it in the
// reestbalish message. // re-establish message.
{"alice", "bob", &lnwire.RevokeAndAck{}, false}, {"alice", "bob", &lnwire.RevokeAndAck{}, false},
// Proceed the payment farther by sending the // Proceed the payment farther by sending the
// fulfilment message and trigger the state // fulfilment message and trigger the state
// update. // update.
{"bob", "alice", &lnwire.UpdateFufillHTLC{}, false}, {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
{"bob", "alice", &lnwire.CommitSig{}, false}, {"bob", "alice", &lnwire.CommitSig{}, false},
{"alice", "bob", &lnwire.RevokeAndAck{}, false}, {"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"alice", "bob", &lnwire.CommitSig{}, false}, {"alice", "bob", &lnwire.CommitSig{}, false},
@ -2283,7 +2283,7 @@ func TestChannelRetransmission(t *testing.T) {
// fulfilment message and trigger the state // fulfilment message and trigger the state
// update. // update.
{"alice", "bob", &lnwire.RevokeAndAck{}, false}, {"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"bob", "alice", &lnwire.UpdateFufillHTLC{}, false}, {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
{"bob", "alice", &lnwire.CommitSig{}, false}, {"bob", "alice", &lnwire.CommitSig{}, false},
{"alice", "bob", &lnwire.RevokeAndAck{}, false}, {"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"alice", "bob", &lnwire.CommitSig{}, false}, {"alice", "bob", &lnwire.CommitSig{}, false},
@ -2325,7 +2325,7 @@ func TestChannelRetransmission(t *testing.T) {
{"bob", "alice", &lnwire.CommitSig{}, false}, {"bob", "alice", &lnwire.CommitSig{}, false},
{"alice", "bob", &lnwire.RevokeAndAck{}, false}, {"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"bob", "alice", &lnwire.UpdateFufillHTLC{}, false}, {"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
{"bob", "alice", &lnwire.CommitSig{}, false}, {"bob", "alice", &lnwire.CommitSig{}, false},
{"alice", "bob", &lnwire.RevokeAndAck{}, false}, {"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"alice", "bob", &lnwire.CommitSig{}, false}, {"alice", "bob", &lnwire.CommitSig{}, false},

@ -52,7 +52,7 @@ func (m *mockPreimageCache) AddPreimage(preimage []byte) error {
return nil return nil
} }
func (m *mockPreimageCache) SubcribeUpdates() *contractcourt.WitnessSubcription { func (m *mockPreimageCache) SubscribeUpdates() *contractcourt.WitnessSubscription {
return nil return nil
} }
@ -356,7 +356,7 @@ func (s *mockServer) readHandler(message lnwire.Message) error {
switch msg := message.(type) { switch msg := message.(type) {
case *lnwire.UpdateAddHTLC: case *lnwire.UpdateAddHTLC:
targetChan = msg.ChanID targetChan = msg.ChanID
case *lnwire.UpdateFufillHTLC: case *lnwire.UpdateFulfillHTLC:
targetChan = msg.ChanID targetChan = msg.ChanID
case *lnwire.UpdateFailHTLC: case *lnwire.UpdateFailHTLC:
targetChan = msg.ChanID targetChan = msg.ChanID

@ -469,7 +469,7 @@ func (s *Switch) handleLocalDispatch(packet *htlcPacket) error {
// We've just received a settle update which means we can finalize the // We've just received a settle update which means we can finalize the
// user payment and return successful response. // user payment and return successful response.
case *lnwire.UpdateFufillHTLC: case *lnwire.UpdateFulfillHTLC:
// Notify the user that his payment was successfully proceed. // Notify the user that his payment was successfully proceed.
payment.err <- nil payment.err <- nil
payment.preimage <- htlc.PaymentPreimage payment.preimage <- htlc.PaymentPreimage
@ -652,7 +652,7 @@ func (s *Switch) handlePacketForward(packet *htlcPacket) error {
// We've just received a settle packet which means we can finalize the // We've just received a settle packet which means we can finalize the
// payment circuit by forwarding the settle msg to the channel from // payment circuit by forwarding the settle msg to the channel from
// which htlc add packet was initially received. // which htlc add packet was initially received.
case *lnwire.UpdateFufillHTLC, *lnwire.UpdateFailHTLC: case *lnwire.UpdateFulfillHTLC, *lnwire.UpdateFailHTLC:
if !packet.isRouted { if !packet.isRouted {
// Use circuit map to find the link to forward settle/fail to. // Use circuit map to find the link to forward settle/fail to.
circuit := s.circuits.LookupByHTLC(packet.outgoingChanID, circuit := s.circuits.LookupByHTLC(packet.outgoingChanID,
@ -835,7 +835,7 @@ func (s *Switch) htlcForwarder() {
if resolutionMsg.Failure != nil { if resolutionMsg.Failure != nil {
pkt.htlc = &lnwire.UpdateFailHTLC{} pkt.htlc = &lnwire.UpdateFailHTLC{}
} else { } else {
pkt.htlc = &lnwire.UpdateFufillHTLC{ pkt.htlc = &lnwire.UpdateFulfillHTLC{
PaymentPreimage: *resolutionMsg.PreImage, PaymentPreimage: *resolutionMsg.PreImage,
} }
} }

@ -89,7 +89,7 @@ func TestSwitchForward(t *testing.T) {
outgoingChanID: bobChannelLink.ShortChanID(), outgoingChanID: bobChannelLink.ShortChanID(),
outgoingHTLCID: 0, outgoingHTLCID: 0,
amount: 1, amount: 1,
htlc: &lnwire.UpdateFufillHTLC{ htlc: &lnwire.UpdateFulfillHTLC{
PaymentPreimage: preimage, PaymentPreimage: preimage,
}, },
} }

@ -361,7 +361,7 @@ func getChanID(msg lnwire.Message) (lnwire.ChannelID, error) {
switch msg := msg.(type) { switch msg := msg.(type) {
case *lnwire.UpdateAddHTLC: case *lnwire.UpdateAddHTLC:
chanID = msg.ChanID chanID = msg.ChanID
case *lnwire.UpdateFufillHTLC: case *lnwire.UpdateFulfillHTLC:
chanID = msg.ChanID chanID = msg.ChanID
case *lnwire.UpdateFailHTLC: case *lnwire.UpdateFailHTLC:
chanID = msg.ChanID chanID = msg.ChanID

2
lnd.go

@ -304,7 +304,7 @@ func lndMain() error {
for _, channel := range dbChannels { for _, channel := range dbChannels {
if chanID.IsChanPoint(&channel.FundingOutpoint) { if chanID.IsChanPoint(&channel.FundingOutpoint) {
// TODO(rosbeef): populate baecon // TODO(roasbeef): populate beacon
return lnwallet.NewLightningChannel( return lnwallet.NewLightningChannel(
activeChainControl.signer, activeChainControl.signer,
server.witnessBeacon, server.witnessBeacon,

@ -86,7 +86,7 @@ func (h *harnessTest) RunTestCase(testCase *testCase,
defer func() { defer func() {
if err := recover(); err != nil { if err := recover(); err != nil {
description := errors.Wrap(err, 2).ErrorStack() description := errors.Wrap(err, 2).ErrorStack()
h.t.Fatalf("Failed: (%v) paniced with: \n%v", h.t.Fatalf("Failed: (%v) panicked with: \n%v",
h.testCase.name, description) h.testCase.name, description)
} }
}() }()
@ -192,7 +192,7 @@ func openChannelAndAssert(ctx context.Context, t *harnessTest,
} }
// closeChannelAndAssert attempts to close a channel identified by the passed // closeChannelAndAssert attempts to close a channel identified by the passed
// channel point owned by the passed lighting node. A fully blocking channel // channel point owned by the passed Lightning node. A fully blocking channel
// closure is attempted, therefore the passed context should be a child derived // closure is attempted, therefore the passed context should be a child derived
// via timeout from a base parent. Additionally, once the channel has been // via timeout from a base parent. Additionally, once the channel has been
// detected as closed, an assertion checks that the transaction is found within // detected as closed, an assertion checks that the transaction is found within
@ -1346,7 +1346,7 @@ func assertNumForceClosedChannels(t *harnessTest,
} }
// assertPendingHtlcStageAndMaturity uniformly tests all pending htlc's // assertPendingHtlcStageAndMaturity uniformly tests all pending htlc's
// belonging to a force closed channel, testing for the expeced stage number, // belonging to a force closed channel, testing for the expected stage number,
// blocks till maturity, and the maturity height. // blocks till maturity, and the maturity height.
func assertPendingHtlcStageAndMaturity(t *harnessTest, func assertPendingHtlcStageAndMaturity(t *harnessTest,
forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel, forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel,
@ -2783,7 +2783,7 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) {
// Finally, we make sure Dave and Bob does not know about the // Finally, we make sure Dave and Bob does not know about the
// private channel between Carol and Alice. We first mine // private channel between Carol and Alice. We first mine
// plenty of blocks, such that the channel would have been // plenty of blocks, such that the channel would have been
// announceed in case it was public. // announced in case it was public.
mineBlocks(t, net, 10) mineBlocks(t, net, 10)
// We create a helper method to check how many edges each of the // We create a helper method to check how many edges each of the
@ -2890,7 +2890,7 @@ func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) {
// The invoice update should exactly match the invoice created // The invoice update should exactly match the invoice created
// above, but should now be settled and have SettleDate // above, but should now be settled and have SettleDate
if !invoiceUpdate.Settled { if !invoiceUpdate.Settled {
t.Fatalf("invoice not settled but shoudl be") t.Fatalf("invoice not settled but should be")
} }
if invoiceUpdate.SettleDate == 0 { if invoiceUpdate.SettleDate == 0 {
t.Fatalf("invoice should have non zero settle date, but doesn't") t.Fatalf("invoice should have non zero settle date, but doesn't")
@ -3029,7 +3029,7 @@ func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) {
// Mine 6 blocks, then wait for node's to notify us that the channel has // Mine 6 blocks, then wait for node's to notify us that the channel has
// been opened. The funding transactions should be found within the // been opened. The funding transactions should be found within the
// first newly mined block. 6 blocks make sure the funding transaction // first newly mined block. 6 blocks make sure the funding transaction
// has enouught confirmations to be announced publicly. // has enough confirmations to be announced publicly.
block := mineBlocks(t, net, 6)[0] block := mineBlocks(t, net, 6)[0]
chanPoints := make([]*lnrpc.ChannelPoint, maxPendingChannels) chanPoints := make([]*lnrpc.ChannelPoint, maxPendingChannels)
@ -3166,7 +3166,7 @@ func waitForNTxsInMempool(miner *rpcclient.Client, n int,
} }
} }
// testRevokedCloseRetributinPostBreachConf tests that Alice is able carry out // testRevokedCloseRetribution tests that Alice is able carry out
// retribution in the event that she fails immediately after detecting Bob's // retribution in the event that she fails immediately after detecting Bob's
// breach txn in the mempool. // breach txn in the mempool.
func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) { func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
@ -3700,7 +3700,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
// We'll introduce another closure to validate that Carol's current // We'll introduce another closure to validate that Carol's current
// number of updates is at least as large as the provided minimum // number of updates is at least as large as the provided minimum
// number. // number.
checkCarolNumUpdatesAtleast := func(minimum uint64) { checkCarolNumUpdatesAtLeast := func(minimum uint64) {
carolChan, err := getCarolChanInfo() carolChan, err := getCarolChanInfo()
if err != nil { if err != nil {
t.Fatalf("unable to get carol's channel info: %v", err) t.Fatalf("unable to get carol's channel info: %v", err)
@ -3748,7 +3748,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
checkCarolBalance(pushAmt) checkCarolBalance(pushAmt)
// Since Carol has not settled, she should only see at least one update // Since Carol has not settled, she should only see at least one update
// to her channel. // to her channel.
checkCarolNumUpdatesAtleast(1) checkCarolNumUpdatesAtLeast(1)
// Create a temporary file to house Carol's database state at this // Create a temporary file to house Carol's database state at this
// particular point in history. // particular point in history.
@ -3778,7 +3778,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
// pushed to her, and that at least one more update has occurred. // pushed to her, and that at least one more update has occurred.
time.Sleep(500 * time.Millisecond) time.Sleep(500 * time.Millisecond)
checkCarolBalance(pushAmt) checkCarolBalance(pushAmt)
checkCarolNumUpdatesAtleast(carolStateNumPreCopy + 1) checkCarolNumUpdatesAtLeast(carolStateNumPreCopy + 1)
// Now we shutdown Carol, copying over the her temporary database state // Now we shutdown Carol, copying over the her temporary database state
// which has the *prior* channel state over her current most up to date // which has the *prior* channel state over her current most up to date
@ -3795,7 +3795,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
// Ensure that Carol's view of the channel is consistent with the // Ensure that Carol's view of the channel is consistent with the
// state of the channel just before it was snapshotted. // state of the channel just before it was snapshotted.
checkCarolBalance(pushAmt) checkCarolBalance(pushAmt)
checkCarolNumUpdatesAtleast(1) checkCarolNumUpdatesAtLeast(1)
// Now query for Carol's channel state, it should show that she's at a // Now query for Carol's channel state, it should show that she's at a
// state number in the past, *not* the latest state. // state number in the past, *not* the latest state.
@ -4045,7 +4045,7 @@ out:
t.Fatalf("unable to send payment: %v", err) t.Fatalf("unable to send payment: %v", err)
} }
// The payment should've resulted in an error since we sent it with the // The payment should have resulted in an error since we sent it with the
// wrong payment hash. // wrong payment hash.
if resp.PaymentError == "" { if resp.PaymentError == "" {
t.Fatalf("payment should have been rejected due to invalid " + t.Fatalf("payment should have been rejected due to invalid " +
@ -4222,7 +4222,7 @@ func subscribeGraphNotifications(t *harnessTest, ctxb context.Context,
t.Fatalf("unable to create topology client: %v", err) t.Fatalf("unable to create topology client: %v", err)
} }
// We'll launch a goroutine that'll be responsible for proxying all // We'll launch a goroutine that will be responsible for proxying all
// notifications recv'd from the client into the channel below. // notifications recv'd from the client into the channel below.
quit := make(chan struct{}) quit := make(chan struct{})
graphUpdates := make(chan *lnrpc.GraphTopologyUpdate, 20) graphUpdates := make(chan *lnrpc.GraphTopologyUpdate, 20)
@ -4270,7 +4270,7 @@ func testGraphTopologyNotifications(net *lntest.NetworkHarness, t *harnessTest)
chanPoint := openChannelAndAssert(ctxt, t, net, net.Alice, net.Bob, chanPoint := openChannelAndAssert(ctxt, t, net, net.Alice, net.Bob,
chanAmt, 0) chanAmt, 0)
// The channel opening above should've triggered a few notifications // The channel opening above should have triggered a few notifications
// sent to the notification client. We'll expect two channel updates, // sent to the notification client. We'll expect two channel updates,
// and two node announcements. // and two node announcements.
const numExpectedUpdates = 4 const numExpectedUpdates = 4

@ -9,7 +9,7 @@ protoc -I/usr/local/include -I. \
# Generate the REST reverse prozxy. # Generate the REST reverse proxy.
protoc -I/usr/local/include -I. \ protoc -I/usr/local/include -I. \
-I$GOPATH/src \ -I$GOPATH/src \
-I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \ -I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \

@ -299,7 +299,7 @@ service Lightning {
} }
/** lncli: `lookupinvoice` /** lncli: `lookupinvoice`
LookupInvoice attemps to look up an invoice according to its payment hash. LookupInvoice attempts to look up an invoice according to its payment hash.
The passed payment hash *must* be exactly 32 bytes, if not, an error is The passed payment hash *must* be exactly 32 bytes, if not, an error is
returned. returned.
*/ */
@ -389,7 +389,7 @@ service Lightning {
route to a target destination capable of carrying a specific amount of route to a target destination capable of carrying a specific amount of
satoshis. The retuned route contains the full details required to craft and satoshis. The retuned route contains the full details required to craft and
send an HTLC, also including the necessary information that should be send an HTLC, also including the necessary information that should be
present within the Sphinx packet encapsualted within the HTLC. present within the Sphinx packet encapsulated within the HTLC.
*/ */
rpc QueryRoutes(QueryRoutesRequest) returns (QueryRoutesResponse) { rpc QueryRoutes(QueryRoutesRequest) returns (QueryRoutesResponse) {
option (google.api.http) = { option (google.api.http) = {
@ -610,7 +610,7 @@ message VerifyMessageRequest {
/// The message over which the signature is to be verified /// The message over which the signature is to be verified
bytes msg = 1 [ json_name = "msg" ]; bytes msg = 1 [ json_name = "msg" ];
/// The signature to be verifed over the given message /// The signature to be verified over the given message
string signature = 2 [ json_name = "signature" ]; string signature = 2 [ json_name = "signature" ];
} }
message VerifyMessageResponse { message VerifyMessageResponse {
@ -864,7 +864,7 @@ message OpenChannelRequest {
/// The pubkey of the node to open a channel with /// The pubkey of the node to open a channel with
bytes node_pubkey = 2 [json_name = "node_pubkey"]; bytes node_pubkey = 2 [json_name = "node_pubkey"];
/// The hex encorded pubkey of the node to open a channel with /// The hex encoded pubkey of the node to open a channel with
string node_pubkey_string = 3 [json_name = "node_pubkey_string"]; string node_pubkey_string = 3 [json_name = "node_pubkey_string"];
/// The number of satoshis the wallet should commit to the channel /// The number of satoshis the wallet should commit to the channel

@ -352,7 +352,7 @@
}, },
"/v1/graph/routes/{pub_key}/{amt}": { "/v1/graph/routes/{pub_key}/{amt}": {
"get": { "get": {
"summary": "* lncli: `queryroutes`\nQueryRoutes attempts to query the daemon's Channel Router for a possible\nroute to a target destination capable of carrying a specific amount of\nsatoshis. The retuned route contains the full details required to craft and\nsend an HTLC, also including the necessary information that should be\npresent within the Sphinx packet encapsualted within the HTLC.", "summary": "* lncli: `queryroutes`\nQueryRoutes attempts to query the daemon's Channel Router for a possible\nroute to a target destination capable of carrying a specific amount of\nsatoshis. The retuned route contains the full details required to craft and\nsend an HTLC, also including the necessary information that should be\npresent within the Sphinx packet encapsulated within the HTLC.",
"operationId": "QueryRoutes", "operationId": "QueryRoutes",
"responses": { "responses": {
"200": { "200": {
@ -384,7 +384,7 @@
}, },
"/v1/invoice/{r_hash_str}": { "/v1/invoice/{r_hash_str}": {
"get": { "get": {
"summary": "* lncli: `lookupinvoice`\nLookupInvoice attemps to look up an invoice according to its payment hash.\nThe passed payment hash *must* be exactly 32 bytes, if not, an error is\nreturned.", "summary": "* lncli: `lookupinvoice`\nLookupInvoice attempts to look up an invoice according to its payment hash.\nThe passed payment hash *must* be exactly 32 bytes, if not, an error is\nreturned.",
"operationId": "LookupInvoice", "operationId": "LookupInvoice",
"responses": { "responses": {
"200": { "200": {
@ -1563,7 +1563,7 @@
}, },
"node_pubkey_string": { "node_pubkey_string": {
"type": "string", "type": "string",
"title": "/ The hex encorded pubkey of the node to open a channel with" "title": "/ The hex encoded pubkey of the node to open a channel with"
}, },
"local_funding_amount": { "local_funding_amount": {
"type": "string", "type": "string",

@ -53,7 +53,7 @@ type NetworkHarness struct {
// NewNetworkHarness creates a new network test harness. // NewNetworkHarness creates a new network test harness.
// TODO(roasbeef): add option to use golang's build library to a binary of the // TODO(roasbeef): add option to use golang's build library to a binary of the
// current repo. This'll save developers from having to manually `go install` // current repo. This will save developers from having to manually `go install`
// within the repo each time before changes // within the repo each time before changes
func NewNetworkHarness(r *rpctest.Harness) (*NetworkHarness, error) { func NewNetworkHarness(r *rpctest.Harness) (*NetworkHarness, error) {
n := NetworkHarness{ n := NetworkHarness{
@ -232,7 +232,7 @@ func (n *NetworkHarness) TearDownAll() error {
return nil return nil
} }
// NewNode fully initializes a returns a new HarnessNode binded to the // NewNode fully initializes a returns a new HarnessNode bound to the
// current instance of the network harness. The created node is running, but // current instance of the network harness. The created node is running, but
// not yet connected to other nodes within the network. // not yet connected to other nodes within the network.
func (n *NetworkHarness) NewNode(extraArgs []string) (*HarnessNode, error) { func (n *NetworkHarness) NewNode(extraArgs []string) (*HarnessNode, error) {
@ -681,7 +681,7 @@ func (n *NetworkHarness) CloseChannel(ctx context.Context,
} }
// Next, we'll fetch the target channel in order to get the // Next, we'll fetch the target channel in order to get the
// harness node that'll be receiving the channel close request. // harness node that will be receiving the channel close request.
targetChan, err := filterChannel(lnNode, chanPoint) targetChan, err := filterChannel(lnNode, chanPoint)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err

@ -35,7 +35,7 @@ var (
// defaultNodePort is the initial p2p port which will be used by the // defaultNodePort is the initial p2p port which will be used by the
// first created lightning node to listen on for incoming p2p // first created lightning node to listen on for incoming p2p
// connections. Subsequent allocated ports for future lighting nodes // connections. Subsequent allocated ports for future Lightning nodes
// instances will be monotonically increasing numbers calculated as // instances will be monotonically increasing numbers calculated as
// such: defaultP2pPort + (3 * harness.nodeNum). // such: defaultP2pPort + (3 * harness.nodeNum).
defaultNodePort = 19555 defaultNodePort = 19555
@ -317,7 +317,7 @@ func (hn *HarnessNode) start(lndError chan<- error) error {
} }
copy(hn.PubKey[:], pubkey) copy(hn.PubKey[:], pubkey)
// Launch the watcher that'll hook into graph related topology change // Launch the watcher that will hook into graph related topology change
// from the PoV of this node. // from the PoV of this node.
hn.wg.Add(1) hn.wg.Add(1)
go hn.lightningNetworkWatcher() go hn.lightningNetworkWatcher()

@ -86,7 +86,7 @@ func New(cfg Config) (*BtcWallet, error) {
} }
} else { } else {
// Wallet has been created and been initialized at this point, // Wallet has been created and been initialized at this point,
// open it along with all the required DB namepsaces, and the // open it along with all the required DB namespaces, and the
// DB itself. // DB itself.
wallet, err = loader.OpenExistingWallet(pubPass, false) wallet, err = loader.OpenExistingWallet(pubPass, false)
if err != nil { if err != nil {

@ -33,7 +33,7 @@ func (b *BtcWallet) FetchInputInfo(prevOut *wire.OutPoint) (*wire.TxOut, error)
} }
b.cacheMtx.RUnlock() b.cacheMtx.RUnlock()
// Otherwse, we manually look up the output within the tx store. // Otherwise, we manually look up the output within the tx store.
txid := &prevOut.Hash txid := &prevOut.Hash
txDetail, err := base.UnstableAPI(b.wallet).TxDetails(txid) txDetail, err := base.UnstableAPI(b.wallet).TxDetails(txid)
if err != nil { if err != nil {
@ -180,7 +180,7 @@ func (b *BtcWallet) ComputeInputScript(tx *wire.MsgTx,
pubKey := privKey.PubKey() pubKey := privKey.PubKey()
pubKeyHash := btcutil.Hash160(pubKey.SerializeCompressed()) pubKeyHash := btcutil.Hash160(pubKey.SerializeCompressed())
// Next, we'll generate a valid sigScript that'll allow us to // Next, we'll generate a valid sigScript that will allow us to
// spend the p2sh output. The sigScript will contain only a // spend the p2sh output. The sigScript will contain only a
// single push of the p2wkh witness program corresponding to // single push of the p2wkh witness program corresponding to
// the matching public key of this address. // the matching public key of this address.

@ -31,7 +31,7 @@ var (
// that has already been closed or is in the process of being closed. // that has already been closed or is in the process of being closed.
ErrChanClosing = fmt.Errorf("channel is being closed, operation disallowed") ErrChanClosing = fmt.Errorf("channel is being closed, operation disallowed")
// ErrNoWindow is returned when revocation window is exausted. // ErrNoWindow is returned when revocation window is exhausted.
ErrNoWindow = fmt.Errorf("unable to sign new commitment, the current" + ErrNoWindow = fmt.Errorf("unable to sign new commitment, the current" +
" revocation window is exhausted") " revocation window is exhausted")
@ -941,7 +941,7 @@ type updateLog struct {
// htlcCounter is a monotonically increasing integer that tracks the // htlcCounter is a monotonically increasing integer that tracks the
// total number of offered HTLC's by the owner of this update log. We // total number of offered HTLC's by the owner of this update log. We
// use a distinct index for this purpose, as update's that remove // use a distinct index for this purpose, as update's that remove
// entires from the log will be indexed using this counter. // entries from the log will be indexed using this counter.
htlcCounter uint64 htlcCounter uint64
// List is the updatelog itself, we embed this value so updateLog has // List is the updatelog itself, we embed this value so updateLog has
@ -953,7 +953,7 @@ type updateLog struct {
updateIndex map[uint64]*list.Element updateIndex map[uint64]*list.Element
// offerIndex is an index that maps the counter for offered HTLC's to // offerIndex is an index that maps the counter for offered HTLC's to
// their list elemtn within the main list.List. // their list element within the main list.List.
htlcIndex map[uint64]*list.Element htlcIndex map[uint64]*list.Element
} }
@ -1130,7 +1130,7 @@ type LightningChannel struct {
// Capacity is the total capacity of this channel. // Capacity is the total capacity of this channel.
Capacity btcutil.Amount Capacity btcutil.Amount
// stateHintObfuscator is a 48-bit state hint that's used to obfsucate // stateHintObfuscator is a 48-bit state hint that's used to obfuscate
// the current state number on the commitment transactions. // the current state number on the commitment transactions.
stateHintObfuscator [StateHintSize]byte stateHintObfuscator [StateHintSize]byte
@ -1387,7 +1387,7 @@ func (lc *LightningChannel) logUpdateToPayDesc(logUpdate *channeldb.LogUpdate,
// For HTLC's we we're offered we'll fetch the original offered HTLc // For HTLC's we we're offered we'll fetch the original offered HTLc
// from the remote party's update log so we can retrieve the same // from the remote party's update log so we can retrieve the same
// PaymentDescriptor that SettleHTLC would produce. // PaymentDescriptor that SettleHTLC would produce.
case *lnwire.UpdateFufillHTLC: case *lnwire.UpdateFulfillHTLC:
ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID) ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
pd = &PaymentDescriptor{ pd = &PaymentDescriptor{
@ -2620,7 +2620,7 @@ func (lc *LightningChannel) createCommitDiff(
logUpdate.UpdateMsg = htlc logUpdate.UpdateMsg = htlc
case Settle: case Settle:
logUpdate.UpdateMsg = &lnwire.UpdateFufillHTLC{ logUpdate.UpdateMsg = &lnwire.UpdateFulfillHTLC{
ChanID: chanID, ChanID: chanID,
ID: pd.ParentIndex, ID: pd.ParentIndex,
PaymentPreimage: pd.RPreimage, PaymentPreimage: pd.RPreimage,
@ -3568,9 +3568,9 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) ([]*P
continue continue
} }
uncomitted := (htlc.addCommitHeightRemote == 0 || uncommitted := (htlc.addCommitHeightRemote == 0 ||
htlc.addCommitHeightLocal == 0) htlc.addCommitHeightLocal == 0)
if htlc.EntryType == Add && uncomitted { if htlc.EntryType == Add && uncommitted {
continue continue
} }
@ -4290,14 +4290,14 @@ func newOutgoingHtlcResolution(signer Signer, localChanCfg *channeldb.ChannelCon
if !localCommit { if !localCommit {
// First, we'll re-generate the script used to send the HTLC to // First, we'll re-generate the script used to send the HTLC to
// the remote party within their commitment transaction. // the remote party within their commitment transaction.
htlcReciverScript, err := receiverHTLCScript(htlc.RefundTimeout, htlcReceiverScript, err := receiverHTLCScript(htlc.RefundTimeout,
keyRing.LocalHtlcKey, keyRing.RemoteHtlcKey, keyRing.LocalHtlcKey, keyRing.RemoteHtlcKey,
keyRing.RevocationKey, htlc.RHash[:], keyRing.RevocationKey, htlc.RHash[:],
) )
if err != nil { if err != nil {
return nil, err return nil, err
} }
htlcScriptHash, err := witnessScriptHash(htlcReciverScript) htlcScriptHash, err := witnessScriptHash(htlcReceiverScript)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -4310,7 +4310,7 @@ func newOutgoingHtlcResolution(signer Signer, localChanCfg *channeldb.ChannelCon
SweepSignDesc: SignDescriptor{ SweepSignDesc: SignDescriptor{
PubKey: localChanCfg.HtlcBasePoint, PubKey: localChanCfg.HtlcBasePoint,
SingleTweak: keyRing.LocalHtlcKeyTweak, SingleTweak: keyRing.LocalHtlcKeyTweak,
WitnessScript: htlcReciverScript, WitnessScript: htlcReceiverScript,
Output: &wire.TxOut{ Output: &wire.TxOut{
PkScript: htlcScriptHash, PkScript: htlcScriptHash,
Value: int64(htlc.Amt.ToSatoshis()), Value: int64(htlc.Amt.ToSatoshis()),
@ -4621,8 +4621,8 @@ type ForceCloseSummary struct {
ChanPoint wire.OutPoint ChanPoint wire.OutPoint
// CloseTx is the transaction which closed the channel on-chain. If we // CloseTx is the transaction which closed the channel on-chain. If we
// initiate the force close, then this'll be our latest commitment // initiate the force close, then this will be our latest commitment
// state. Otherwise, this'll be the state that the remote peer // state. Otherwise, this will be the state that the remote peer
// broadcasted on-chain. // broadcasted on-chain.
CloseTx *wire.MsgTx CloseTx *wire.MsgTx
@ -5327,7 +5327,7 @@ func (lc *LightningChannel) ActiveHtlcs() []channeldb.HTLC {
// We'll only return HTLC's that are locked into *both* commitment // We'll only return HTLC's that are locked into *both* commitment
// transactions. So we'll iterate through their set of HTLC's to note // transactions. So we'll iterate through their set of HTLC's to note
// which ones are present on thir commitment. // which ones are present on their commitment.
remoteHtlcs := make(map[[32]byte]struct{}) remoteHtlcs := make(map[[32]byte]struct{})
for _, htlc := range lc.channelState.RemoteCommitment.Htlcs { for _, htlc := range lc.channelState.RemoteCommitment.Htlcs {
onionHash := sha256.Sum256(htlc.OnionBlob[:]) onionHash := sha256.Sum256(htlc.OnionBlob[:])

@ -2023,7 +2023,7 @@ func TestUpdateFeeFail(t *testing.T) {
} }
// TestUpdateFeeSenderCommits veriefies that the state machine progresses as // TestUpdateFeeSenderCommits verifies that the state machine progresses as
// expected if we send a fee update, and then the sender of the fee update // expected if we send a fee update, and then the sender of the fee update
// sends a commitment signature. // sends a commitment signature.
func TestUpdateFeeSenderCommits(t *testing.T) { func TestUpdateFeeSenderCommits(t *testing.T) {
@ -2103,7 +2103,7 @@ func TestUpdateFeeSenderCommits(t *testing.T) {
// that Bob's received everything up to the signature she sent, // that Bob's received everything up to the signature she sent,
// including the HTLC and fee update. // including the HTLC and fee update.
if _, err := aliceChannel.ReceiveRevocation(bobRevocation); err != nil { if _, err := aliceChannel.ReceiveRevocation(bobRevocation); err != nil {
t.Fatalf("alice unable to rocess bob's revocation: %v", err) t.Fatalf("alice unable to process bob's revocation: %v", err)
} }
// Alice receives new signature from Bob, and assumes this covers the // Alice receives new signature from Bob, and assumes this covers the
@ -2198,7 +2198,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) {
// Bob receives the revocation of the old commitment // Bob receives the revocation of the old commitment
if _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil { if _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil {
t.Fatalf("alice unable to rocess bob's revocation: %v", err) t.Fatalf("alice unable to process bob's revocation: %v", err)
} }
// Alice will sign next commitment. Since she sent the revocation, she // Alice will sign next commitment. Since she sent the revocation, she
@ -2239,7 +2239,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) {
t.Fatalf("alice unable to sign commitment: %v", err) t.Fatalf("alice unable to sign commitment: %v", err)
} }
// Alice receives revokation from Bob, and can now be sure that Bob // Alice receives revocation from Bob, and can now be sure that Bob
// received the two updates, and they are considered locked in. // received the two updates, and they are considered locked in.
if _, err := aliceChannel.ReceiveRevocation(bobRevocation); err != nil { if _, err := aliceChannel.ReceiveRevocation(bobRevocation); err != nil {
t.Fatalf("bob unable to process alice's revocation: %v", err) t.Fatalf("bob unable to process alice's revocation: %v", err)
@ -2383,7 +2383,7 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) {
// Bob's received everything up to the signature she sent, including the // Bob's received everything up to the signature she sent, including the
// HTLC and fee update. // HTLC and fee update.
if _, err := aliceChannel.ReceiveRevocation(bobRevocation); err != nil { if _, err := aliceChannel.ReceiveRevocation(bobRevocation); err != nil {
t.Fatalf("alice unable to rocess bob's revocation: %v", err) t.Fatalf("alice unable to process bob's revocation: %v", err)
} }
// Alice receives new signature from Bob, and assumes this covers the // Alice receives new signature from Bob, and assumes this covers the
@ -2731,7 +2731,7 @@ func TestChanSyncOweCommitment(t *testing.T) {
// Each of the settle messages that Alice sent should match her // Each of the settle messages that Alice sent should match her
// original intent. // original intent.
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
settleMsg, ok := aliceMsgsToSend[i].(*lnwire.UpdateFufillHTLC) settleMsg, ok := aliceMsgsToSend[i].(*lnwire.UpdateFulfillHTLC)
if !ok { if !ok {
t.Fatalf("expected a htlc settle message, "+ t.Fatalf("expected a htlc settle message, "+
"instead have %v", spew.Sdump(settleMsg)) "instead have %v", spew.Sdump(settleMsg))
@ -3489,7 +3489,7 @@ func TestFeeUpdateRejectInsaneFee(t *testing.T) {
// Both Alice and Bob should reject this new fee rate as it it far too // Both Alice and Bob should reject this new fee rate as it it far too
// large. // large.
if err := aliceChannel.UpdateFee(newFeeRate); err == nil { if err := aliceChannel.UpdateFee(newFeeRate); err == nil {
t.Fatalf("alice should've rejected fee update") t.Fatalf("alice should have rejected fee update")
} }
} }

@ -205,7 +205,7 @@ var _ FeeEstimator = (*BtcdFeeEstimator)(nil)
// BitcoindFeeEstimator is an implementation of the FeeEstimator interface // BitcoindFeeEstimator is an implementation of the FeeEstimator interface
// backed by the RPC interface of an active bitcoind node. This implementation // backed by the RPC interface of an active bitcoind node. This implementation
// will proxy any fee estimation requests to bitcoind's RPC interace. // will proxy any fee estimation requests to bitcoind's RPC interface.
type BitcoindFeeEstimator struct { type BitcoindFeeEstimator struct {
// fallBackFeeRate is the fall back fee rate in satoshis per byte that // fallBackFeeRate is the fall back fee rate in satoshis per byte that
// is returned if the fee estimator does not yet have enough data to // is returned if the fee estimator does not yet have enough data to

@ -503,7 +503,7 @@ func testFundingCancellationNotEnoughFunds(miner *rpctest.Harness,
lnwire.FFAnnounceChannel, lnwire.FFAnnounceChannel,
) )
if _, ok := err.(*lnwallet.ErrInsufficientFunds); !ok { if _, ok := err.(*lnwallet.ErrInsufficientFunds); !ok {
t.Fatalf("coin selection succeded should have insufficient funds: %v", t.Fatalf("coin selection succeeded should have insufficient funds: %v",
err) err)
} }
@ -537,7 +537,7 @@ func testFundingCancellationNotEnoughFunds(miner *rpctest.Harness,
} }
} }
func testCancelNonExistantReservation(miner *rpctest.Harness, func testCancelNonExistentReservation(miner *rpctest.Harness,
alice, _ *lnwallet.LightningWallet, t *testing.T) { alice, _ *lnwallet.LightningWallet, t *testing.T) {
feeRate, err := alice.Cfg.FeeEstimator.EstimateFeePerWeight(1) feeRate, err := alice.Cfg.FeeEstimator.EstimateFeePerWeight(1)
@ -574,7 +574,7 @@ func testReservationInitiatorBalanceBelowDustCancel(miner *rpctest.Harness,
) )
switch { switch {
case err == nil: case err == nil:
t.Fatalf("initialization should've failed due to " + t.Fatalf("initialization should have failed due to " +
"insufficient local amount") "insufficient local amount")
case !strings.Contains(err.Error(), "local output is too small"): case !strings.Contains(err.Error(), "local output is too small"):
@ -984,7 +984,7 @@ func testListTransactionDetails(miner *rpctest.Harness,
} }
// We assert that the value is greater than the amount we // We assert that the value is greater than the amount we
// attempted to send, as the wallet should've paid some amount // attempted to send, as the wallet should have paid some amount
// of network fees. // of network fees.
if txDetail.Value >= -outputAmt { if txDetail.Value >= -outputAmt {
fmt.Println(spew.Sdump(txDetail)) fmt.Println(spew.Sdump(txDetail))
@ -1477,7 +1477,7 @@ var walletTests = []walletTestCase{
}, },
{ {
name: "test cancel non-existent reservation", name: "test cancel non-existent reservation",
test: testCancelNonExistantReservation, test: testCancelNonExistentReservation,
}, },
{ {
name: "reorg wallet balance", name: "reorg wallet balance",
@ -1573,7 +1573,7 @@ func waitForWalletSync(r *rpctest.Harness, w *lnwallet.LightningWallet) error {
} }
// TestInterfaces tests all registered interfaces with a unified set of tests // TestInterfaces tests all registered interfaces with a unified set of tests
// which excersie each of the required methods found within the WalletController // which exercise each of the required methods found within the WalletController
// interface. // interface.
// //
// NOTE: In the future, when additional implementations of the WalletController // NOTE: In the future, when additional implementations of the WalletController
@ -1590,7 +1590,7 @@ func TestLightningWallet(t *testing.T) {
// Initialize the harness around a btcd node which will serve as our // Initialize the harness around a btcd node which will serve as our
// dedicated miner to generate blocks, cause re-orgs, etc. We'll set // dedicated miner to generate blocks, cause re-orgs, etc. We'll set
// up this node with a chain length of 125, so we have plentyyy of BTC // up this node with a chain length of 125, so we have plenty of BTC
// to play around with. // to play around with.
miningNode, err := rpctest.New(netParams, nil, nil) miningNode, err := rpctest.New(netParams, nil, nil)
if err != nil { if err != nil {

@ -400,7 +400,7 @@ func senderHtlcSpendTimeout(receiverSig []byte, signer Signer,
// OP_CHECKSIG // OP_CHECKSIG
// OP_ENDIF // OP_ENDIF
// OP_ENDIF // OP_ENDIF
func receiverHTLCScript(cltvExipiry uint32, senderHtlcKey, func receiverHTLCScript(cltvExpiry uint32, senderHtlcKey,
receiverHtlcKey, revocationKey *btcec.PublicKey, receiverHtlcKey, revocationKey *btcec.PublicKey,
paymentHash []byte) ([]byte, error) { paymentHash []byte) ([]byte, error) {
@ -477,7 +477,7 @@ func receiverHTLCScript(cltvExipiry uint32, senderHtlcKey,
// lock-time required to timeout the HTLC. If the time has passed, then // lock-time required to timeout the HTLC. If the time has passed, then
// we'll proceed with a checksig to ensure that this is actually the // we'll proceed with a checksig to ensure that this is actually the
// sender of he original HTLC. // sender of he original HTLC.
builder.AddInt64(int64(cltvExipiry)) builder.AddInt64(int64(cltvExpiry))
builder.AddOp(txscript.OP_CHECKLOCKTIMEVERIFY) builder.AddOp(txscript.OP_CHECKLOCKTIMEVERIFY)
builder.AddOp(txscript.OP_DROP) builder.AddOp(txscript.OP_DROP)
builder.AddOp(txscript.OP_CHECKSIG) builder.AddOp(txscript.OP_CHECKSIG)
@ -574,7 +574,7 @@ func ReceiverHtlcSpendRevoke(signer Signer, signDesc *SignDescriptor,
// an HTLC to recover the pending funds after an absolute timeout in the // an HTLC to recover the pending funds after an absolute timeout in the
// scenario that the receiver of the HTLC broadcasts their version of the // scenario that the receiver of the HTLC broadcasts their version of the
// commitment transaction. If the caller has already set the lock time on the // commitment transaction. If the caller has already set the lock time on the
// spending transaction, than a value of -1 can be passed for the cltvExipiry // spending transaction, than a value of -1 can be passed for the cltvExpiry
// value. // value.
// //
// NOTE: The target input of the passed transaction MUST NOT have a final // NOTE: The target input of the passed transaction MUST NOT have a final
@ -665,7 +665,7 @@ func createHtlcTimeoutTx(htlcOutput wire.OutPoint, htlcAmt btcutil.Amount,
return timeoutTx, nil return timeoutTx, nil
} }
// createHtlcSuccessTx creats a transaction that spends the output on the // createHtlcSuccessTx creates a transaction that spends the output on the
// commitment transaction of the peer that receives an HTLC. This transaction // commitment transaction of the peer that receives an HTLC. This transaction
// essentially acts as an off-chain covenant as it's only permitted to spend // essentially acts as an off-chain covenant as it's only permitted to spend
// the designated HTLC output, and also that spend can _only_ be used as a // the designated HTLC output, and also that spend can _only_ be used as a
@ -1305,7 +1305,7 @@ func GetStateNumHint(commitTx *wire.MsgTx, obfuscator [StateHintSize]byte) uint6
stateNumXor := uint64(commitTx.TxIn[0].Sequence&0xFFFFFF) << 24 stateNumXor := uint64(commitTx.TxIn[0].Sequence&0xFFFFFF) << 24
stateNumXor |= uint64(commitTx.LockTime & 0xFFFFFF) stateNumXor |= uint64(commitTx.LockTime & 0xFFFFFF)
// Finally, to obtain the final state number, we XOR by the obfuscater // Finally, to obtain the final state number, we XOR by the obfuscator
// value to de-obfuscate the state number. // value to de-obfuscate the state number.
return stateNumXor ^ xorInt return stateNumXor ^ xorInt
} }

@ -181,18 +181,18 @@ func TestCommitmentSpendValidation(t *testing.T) {
// Finally, we test bob sweeping his output as normal in the case that // Finally, we test bob sweeping his output as normal in the case that
// Alice broadcasts this commitment transaction. // Alice broadcasts this commitment transaction.
bobScriptp2wkh, err := commitScriptUnencumbered(bobPayKey) bobScriptP2WKH, err := commitScriptUnencumbered(bobPayKey)
if err != nil { if err != nil {
t.Fatalf("unable to create bob p2wkh script: %v", err) t.Fatalf("unable to create bob p2wkh script: %v", err)
} }
signDesc = &SignDescriptor{ signDesc = &SignDescriptor{
PubKey: bobKeyPub, PubKey: bobKeyPub,
SingleTweak: bobCommitTweak, SingleTweak: bobCommitTweak,
WitnessScript: bobScriptp2wkh, WitnessScript: bobScriptP2WKH,
SigHashes: txscript.NewTxSigHashes(sweepTx), SigHashes: txscript.NewTxSigHashes(sweepTx),
Output: &wire.TxOut{ Output: &wire.TxOut{
Value: int64(channelBalance), Value: int64(channelBalance),
PkScript: bobScriptp2wkh, PkScript: bobScriptP2WKH,
}, },
HashType: txscript.SigHashAll, HashType: txscript.SigHashAll,
InputIndex: 0, InputIndex: 0,
@ -391,7 +391,7 @@ func TestHTLCSenderSpendValidation(t *testing.T) {
aliceSigner := &mockSigner{privkeys: []*btcec.PrivateKey{aliceKeyPriv}} aliceSigner := &mockSigner{privkeys: []*btcec.PrivateKey{aliceKeyPriv}}
// We'll also generate a signature on the sweep transaction above // We'll also generate a signature on the sweep transaction above
// that'll act as Bob's signature to Alice for the second level HTLC // that will act as Bob's signature to Alice for the second level HTLC
// transaction. // transaction.
bobSignDesc := SignDescriptor{ bobSignDesc := SignDescriptor{
PubKey: bobKeyPub, PubKey: bobKeyPub,
@ -452,7 +452,7 @@ func TestHTLCSenderSpendValidation(t *testing.T) {
}, },
{ {
// HTLC with valid preimage size + sig // HTLC with valid preimage size + sig
// TODO(roabeef): invalid preimage // TODO(roasbeef): invalid preimage
makeWitnessTestCase(t, func() (wire.TxWitness, error) { makeWitnessTestCase(t, func() (wire.TxWitness, error) {
signDesc := &SignDescriptor{ signDesc := &SignDescriptor{
PubKey: bobKeyPub, PubKey: bobKeyPub,
@ -636,7 +636,7 @@ func TestHTLCReceiverSpendValidation(t *testing.T) {
aliceSigner := &mockSigner{privkeys: []*btcec.PrivateKey{aliceKeyPriv}} aliceSigner := &mockSigner{privkeys: []*btcec.PrivateKey{aliceKeyPriv}}
// We'll also generate a signature on the sweep transaction above // We'll also generate a signature on the sweep transaction above
// that'll act as Alice's signature to Bob for the second level HTLC // that will act as Alice's signature to Bob for the second level HTLC
// transaction. // transaction.
aliceSignDesc := SignDescriptor{ aliceSignDesc := SignDescriptor{
PubKey: aliceKeyPub, PubKey: aliceKeyPub,
@ -863,7 +863,7 @@ func TestSecondLevelHtlcSpends(t *testing.T) {
Value: int64(htlcAmt), Value: int64(htlcAmt),
} }
// TODO(roasbeef): make actually use timeout/sucess txns? // TODO(roasbeef): make actually use timeout/success txns?
// Finally, we'll create mock signers for both of them based on their // Finally, we'll create mock signers for both of them based on their
// private keys. This test simplifies a bit and uses the same key as // private keys. This test simplifies a bit and uses the same key as

@ -165,7 +165,7 @@ func (s *sigPool) Stop() error {
return nil return nil
} }
// poolWorker is the main worker goroutine wtihin the sigPool. Individual // poolWorker is the main worker goroutine within the sigPool. Individual
// batches are distributed amongst each of the active workers. The workers then // batches are distributed amongst each of the active workers. The workers then
// execute the task based on the type of job, and return the result back to // execute the task based on the type of job, and return the result back to
// caller. // caller.

@ -184,7 +184,7 @@ type addCounterPartySigsMsg struct {
// https://github.com/bitcoin/bips/blob/master/bip-0069.mediawiki. // https://github.com/bitcoin/bips/blob/master/bip-0069.mediawiki.
theirFundingInputScripts []*InputScript theirFundingInputScripts []*InputScript
// This should be 1/2 of the signatures needed to succesfully spend our // This should be 1/2 of the signatures needed to successfully spend our
// version of the commitment transaction. // version of the commitment transaction.
theirCommitmentSig []byte theirCommitmentSig []byte
@ -210,7 +210,7 @@ type addSingleFunderSigsMsg struct {
fundingOutpoint *wire.OutPoint fundingOutpoint *wire.OutPoint
// theirCommitmentSig are the 1/2 of the signatures needed to // theirCommitmentSig are the 1/2 of the signatures needed to
// succesfully spend our version of the commitment transaction. // successfully spend our version of the commitment transaction.
theirCommitmentSig []byte theirCommitmentSig []byte
// This channel is used to return the completed channel after the wallet // This channel is used to return the completed channel after the wallet
@ -1366,7 +1366,7 @@ func (l *LightningWallet) deriveMasterRevocationRoot() (*btcec.PrivateKey, error
} }
// DeriveStateHintObfuscator derives the bytes to be used for obfuscating the // DeriveStateHintObfuscator derives the bytes to be used for obfuscating the
// state hints from the root to be used for a new channel. The obsfucsator is // state hints from the root to be used for a new channel. The obfuscator is
// generated via the following computation: // generated via the following computation:
// //
// * sha256(initiatorKey || responderKey)[26:] // * sha256(initiatorKey || responderKey)[26:]
@ -1386,7 +1386,7 @@ func DeriveStateHintObfuscator(key1, key2 *btcec.PublicKey) [StateHintSize]byte
return obfuscator return obfuscator
} }
// initStateHints properly sets the obsfucated state hints on both commitment // initStateHints properly sets the obfuscated state hints on both commitment
// transactions using the passed obfuscator. // transactions using the passed obfuscator.
func initStateHints(commit1, commit2 *wire.MsgTx, func initStateHints(commit1, commit2 *wire.MsgTx,
obfuscator [StateHintSize]byte) error { obfuscator [StateHintSize]byte) error {

@ -29,7 +29,7 @@ const (
// HtlcOfferedRevoke is a witness that allows us to sweep an HTLC which // HtlcOfferedRevoke is a witness that allows us to sweep an HTLC which
// we offered to the remote party in the case that they broadcast a // we offered to the remote party in the case that they broadcast a
// revoked commitmetn state. // revoked commitment state.
HtlcOfferedRevoke WitnessType = 3 HtlcOfferedRevoke WitnessType = 3
// HtlcAcceptedRevoke is a witness that allows us to sweep an HTLC // HtlcAcceptedRevoke is a witness that allows us to sweep an HTLC
@ -46,7 +46,7 @@ const (
// HtlcAcceptedSuccessSecondLevel is a witness that allows us to sweep // HtlcAcceptedSuccessSecondLevel is a witness that allows us to sweep
// an HTLC output that was offered to us, and for which we have a // an HTLC output that was offered to us, and for which we have a
// payment preimage. This HTLC output isn't diretly on our commitment // payment preimage. This HTLC output isn't directly on our commitment
// transaction, but is the result of confirmed second-level HTLC // transaction, but is the result of confirmed second-level HTLC
// transaction. As a result, we can only spend this after a CSV delay. // transaction. As a result, we can only spend this after a CSV delay.
HtlcAcceptedSuccessSecondLevel WitnessType = 6 HtlcAcceptedSuccessSecondLevel WitnessType = 6

@ -10,7 +10,7 @@ import (
// receiver's pending set into a new commitment state. Implicitly, the new // receiver's pending set into a new commitment state. Implicitly, the new
// commitment transaction constructed which has been signed by CommitSig // commitment transaction constructed which has been signed by CommitSig
// includes all HTLC's in the remote node's pending set. A CommitSig message // includes all HTLC's in the remote node's pending set. A CommitSig message
// may be sent after a series of UpdateAddHTLC/UpdateFufillHTLC messages in // may be sent after a series of UpdateAddHTLC/UpdateFulfillHTLC messages in
// order to batch add several HTLC's with a single signature covering all // order to batch add several HTLC's with a single signature covering all
// implicitly accepted HTLC's. // implicitly accepted HTLC's.
type CommitSig struct { type CommitSig struct {

@ -84,7 +84,7 @@ func TestFeatureVectorSetUnset(t *testing.T) {
for j, expectedSet := range test.expectedFeatures { for j, expectedSet := range test.expectedFeatures {
if fv.HasFeature(FeatureBit(j)) != expectedSet { if fv.HasFeature(FeatureBit(j)) != expectedSet {
t.Errorf("Expection failed in case %d, bit %d", i, j) t.Errorf("Expectation failed in case %d, bit %d", i, j)
break break
} }
} }

@ -563,8 +563,8 @@ func TestLightningWireProtocol(t *testing.T) {
}, },
}, },
{ {
msgType: MsgUpdateFufillHTLC, msgType: MsgUpdateFulfillHTLC,
scenario: func(m UpdateFufillHTLC) bool { scenario: func(m UpdateFulfillHTLC) bool {
return mainScenario(&m) return mainScenario(&m)
}, },
}, },

@ -19,7 +19,7 @@ const MaxMessagePayload = 65535 // 65KB
// MessageType is the unique 2 byte big-endian integer that indicates the type // MessageType is the unique 2 byte big-endian integer that indicates the type
// of message on the wire. All messages have a very simple header which // of message on the wire. All messages have a very simple header which
// consists simply of 2-byte message type. We omit a length field, and checksum // consists simply of 2-byte message type. We omit a length field, and checksum
// as the Lighting Protocol is intended to be encapsulated within a // as the Lightning Protocol is intended to be encapsulated within a
// confidential+authenticated cryptographic messaging protocol. // confidential+authenticated cryptographic messaging protocol.
type MessageType uint16 type MessageType uint16
@ -38,7 +38,7 @@ const (
MsgShutdown = 38 MsgShutdown = 38
MsgClosingSigned = 39 MsgClosingSigned = 39
MsgUpdateAddHTLC = 128 MsgUpdateAddHTLC = 128
MsgUpdateFufillHTLC = 130 MsgUpdateFulfillHTLC = 130
MsgUpdateFailHTLC = 131 MsgUpdateFailHTLC = 131
MsgCommitSig = 132 MsgCommitSig = 132
MsgRevokeAndAck = 133 MsgRevokeAndAck = 133
@ -74,8 +74,8 @@ func (t MessageType) String() string {
return "UpdateAddHTLC" return "UpdateAddHTLC"
case MsgUpdateFailHTLC: case MsgUpdateFailHTLC:
return "UpdateFailHTLC" return "UpdateFailHTLC"
case MsgUpdateFufillHTLC: case MsgUpdateFulfillHTLC:
return "UpdateFufillHTLC" return "UpdateFulfillHTLC"
case MsgCommitSig: case MsgCommitSig:
return "CommitSig" return "CommitSig"
case MsgRevokeAndAck: case MsgRevokeAndAck:
@ -165,8 +165,8 @@ func makeEmptyMessage(msgType MessageType) (Message, error) {
msg = &UpdateAddHTLC{} msg = &UpdateAddHTLC{}
case MsgUpdateFailHTLC: case MsgUpdateFailHTLC:
msg = &UpdateFailHTLC{} msg = &UpdateFailHTLC{}
case MsgUpdateFufillHTLC: case MsgUpdateFulfillHTLC:
msg = &UpdateFufillHTLC{} msg = &UpdateFulfillHTLC{}
case MsgCommitSig: case MsgCommitSig:
msg = &CommitSig{} msg = &CommitSig{}
case MsgRevokeAndAck: case MsgRevokeAndAck:

@ -46,7 +46,7 @@ func (n *NetAddress) String() string {
return fmt.Sprintf("%x@%v", pubkey, n.Address) return fmt.Sprintf("%x@%v", pubkey, n.Address)
} }
// Network returns the name of the network this address is binded to. // Network returns the name of the network this address is bound to.
// //
// This part of the net.Addr interface. // This part of the net.Addr interface.
func (n *NetAddress) Network() string { func (n *NetAddress) Network() string {

@ -26,11 +26,11 @@ type FailureMessage interface {
} }
// failureMessageLength is the size of the failure message plus the size of // failureMessageLength is the size of the failure message plus the size of
// padding. The FailureMessage message should always be EXACLTY this size. // padding. The FailureMessage message should always be EXACTLY this size.
const failureMessageLength = 256 const failureMessageLength = 256
const ( const (
// FlagBadOnion error flag describes an unparseable, encrypted by // FlagBadOnion error flag describes an unparsable, encrypted by
// previous node. // previous node.
FlagBadOnion FailCode = 0x8000 FlagBadOnion FailCode = 0x8000

@ -17,7 +17,7 @@ func SerializeSigToWire(b *[64]byte, e *btcec.Signature) error {
// 0x30 <length> 0x02 <length r> r 0x02 <length s> s // 0x30 <length> 0x02 <length r> r 0x02 <length s> s
// which means the length of R is the 4th byte and the length of S // which means the length of R is the 4th byte and the length of S
// is the second byte after R ends. 0x02 signifies a length-prefixed, // is the second byte after R ends. 0x02 signifies a length-prefixed,
// zero-padded, big-endian bigint. 0x30 sigifies a DER signature. // zero-padded, big-endian bigint. 0x30 signifies a DER signature.
// See the Serialize() method for btcec.Signature for details. // See the Serialize() method for btcec.Signature for details.
rLen := sig[3] rLen := sig[3]
sLen := sig[5+rLen] sLen := sig[5+rLen]

@ -2,12 +2,12 @@ package lnwire
import "io" import "io"
// UpdateFufillHTLC is sent by Alice to Bob when she wishes to settle a // UpdateFulfillHTLC is sent by Alice to Bob when she wishes to settle a
// particular HTLC referenced by its HTLCKey within a specific active channel // particular HTLC referenced by its HTLCKey within a specific active channel
// referenced by ChannelPoint. A subsequent CommitSig message will be sent by // referenced by ChannelPoint. A subsequent CommitSig message will be sent by
// Alice to "lock-in" the removal of the specified HTLC, possible containing a // Alice to "lock-in" the removal of the specified HTLC, possible containing a
// batch signature covering several settled HTLC's. // batch signature covering several settled HTLC's.
type UpdateFufillHTLC struct { type UpdateFulfillHTLC struct {
// ChanID references an active channel which holds the HTLC to be // ChanID references an active channel which holds the HTLC to be
// settled. // settled.
ChanID ChannelID ChanID ChannelID
@ -21,26 +21,26 @@ type UpdateFufillHTLC struct {
PaymentPreimage [32]byte PaymentPreimage [32]byte
} }
// NewUpdateFufillHTLC returns a new empty UpdateFufillHTLC. // NewUpdateFulfillHTLC returns a new empty UpdateFulfillHTLC.
func NewUpdateFufillHTLC(chanID ChannelID, id uint64, func NewUpdateFulfillHTLC(chanID ChannelID, id uint64,
preimage [32]byte) *UpdateFufillHTLC { preimage [32]byte) *UpdateFulfillHTLC {
return &UpdateFufillHTLC{ return &UpdateFulfillHTLC{
ChanID: chanID, ChanID: chanID,
ID: id, ID: id,
PaymentPreimage: preimage, PaymentPreimage: preimage,
} }
} }
// A compile time check to ensure UpdateFufillHTLC implements the lnwire.Message // A compile time check to ensure UpdateFulfillHTLC implements the lnwire.Message
// interface. // interface.
var _ Message = (*UpdateFufillHTLC)(nil) var _ Message = (*UpdateFulfillHTLC)(nil)
// Decode deserializes a serialized UpdateFufillHTLC message stored in the passed // Decode deserializes a serialized UpdateFulfillHTLC message stored in the passed
// io.Reader observing the specified protocol version. // io.Reader observing the specified protocol version.
// //
// This is part of the lnwire.Message interface. // This is part of the lnwire.Message interface.
func (c *UpdateFufillHTLC) Decode(r io.Reader, pver uint32) error { func (c *UpdateFulfillHTLC) Decode(r io.Reader, pver uint32) error {
return readElements(r, return readElements(r,
&c.ChanID, &c.ChanID,
&c.ID, &c.ID,
@ -48,11 +48,11 @@ func (c *UpdateFufillHTLC) Decode(r io.Reader, pver uint32) error {
) )
} }
// Encode serializes the target UpdateFufillHTLC into the passed io.Writer // Encode serializes the target UpdateFulfillHTLC into the passed io.Writer
// observing the protocol version specified. // observing the protocol version specified.
// //
// This is part of the lnwire.Message interface. // This is part of the lnwire.Message interface.
func (c *UpdateFufillHTLC) Encode(w io.Writer, pver uint32) error { func (c *UpdateFulfillHTLC) Encode(w io.Writer, pver uint32) error {
return writeElements(w, return writeElements(w,
c.ChanID, c.ChanID,
c.ID, c.ID,
@ -64,15 +64,15 @@ func (c *UpdateFufillHTLC) Encode(w io.Writer, pver uint32) error {
// wire. // wire.
// //
// This is part of the lnwire.Message interface. // This is part of the lnwire.Message interface.
func (c *UpdateFufillHTLC) MsgType() MessageType { func (c *UpdateFulfillHTLC) MsgType() MessageType {
return MsgUpdateFufillHTLC return MsgUpdateFulfillHTLC
} }
// MaxPayloadLength returns the maximum allowed payload size for a UpdateFufillHTLC // MaxPayloadLength returns the maximum allowed payload size for a UpdateFulfillHTLC
// complete message observing the specified protocol version. // complete message observing the specified protocol version.
// //
// This is part of the lnwire.Message interface. // This is part of the lnwire.Message interface.
func (c *UpdateFufillHTLC) MaxPayloadLength(uint32) uint32 { func (c *UpdateFulfillHTLC) MaxPayloadLength(uint32) uint32 {
// 32 + 8 + 32 // 32 + 8 + 32
return 72 return 72
} }

2
log.go

@ -32,7 +32,7 @@ func (logWriter) Write(p []byte) (n int, err error) {
return len(p), nil return len(p), nil
} }
// Loggers per subsystem. A single backend logger is created and all subsytem // Loggers per subsystem. A single backend logger is created and all subsystem
// loggers created from it will write to the backend. When adding new // loggers created from it will write to the backend. When adding new
// subsystems, add the subsystem logger variable here and to the // subsystems, add the subsystem logger variable here and to the
// subsystemLoggers map. // subsystemLoggers map.

@ -221,7 +221,7 @@ var (
// kndrPrefix is the state prefix given to all CSV delayed outputs, // kndrPrefix is the state prefix given to all CSV delayed outputs,
// either from the commitment transaction, or a stage-one htlc // either from the commitment transaction, or a stage-one htlc
// transaction, whose maturity height has solidified. Outputs marked in // transaction, whose maturity height has solidified. Outputs marked in
// this state are in their final stage of incubation withn the nursery, // this state are in their final stage of incubation within the nursery,
// and will be swept into the wallet after waiting out the relative // and will be swept into the wallet after waiting out the relative
// timelock. // timelock.
kndrPrefix = []byte("kndr") kndrPrefix = []byte("kndr")
@ -1355,7 +1355,7 @@ func (ns *nurseryStore) getLastFinalizedHeight(tx *bolt.Tx) (uint32, error) {
return byteOrder.Uint32(heightBytes), nil return byteOrder.Uint32(heightBytes), nil
} }
// finalizeKinder records a finalized kingergarten sweep txn to the given height // finalizeKinder records a finalized kindergarten sweep txn to the given height
// bucket. It also updates the nursery store's last finalized height, so that we // bucket. It also updates the nursery store's last finalized height, so that we
// do not finalize the same height twice. If the finalized txn is nil, i.e. if // do not finalize the same height twice. If the finalized txn is nil, i.e. if
// the height has no kindergarten outputs, the height will be marked as // the height has no kindergarten outputs, the height will be marked as
@ -1463,7 +1463,7 @@ func (ns *nurseryStore) putLastGraduatedHeight(tx *bolt.Tx, height uint32) error
return err return err
} }
// Serialize the provided last-gradauted height, and store it in the // Serialize the provided last-graduated height, and store it in the
// top-level chain bucket for this nursery store. // top-level chain bucket for this nursery store.
var lastHeightBytes [4]byte var lastHeightBytes [4]byte
byteOrder.PutUint32(lastHeightBytes[:], height) byteOrder.PutUint32(lastHeightBytes[:], height)

@ -341,7 +341,7 @@ func TestNurseryStoreIncubate(t *testing.T) {
} }
// TestNurseryStoreFinalize tests that kindergarten sweep transactions are // TestNurseryStoreFinalize tests that kindergarten sweep transactions are
// properly persistted, and that the last finalized height is being set // properly persisted, and that the last finalized height is being set
// accordingly. // accordingly.
func TestNurseryStoreFinalize(t *testing.T) { func TestNurseryStoreFinalize(t *testing.T) {
cdb, cleanUp, err := makeTestDB() cdb, cleanUp, err := makeTestDB()

24
peer.go

@ -45,10 +45,10 @@ const (
outgoingQueueLen = 50 outgoingQueueLen = 50
) )
// outgoinMsg packages an lnwire.Message to be sent out on the wire, along with // outgoingMsg packages an lnwire.Message to be sent out on the wire, along with
// a buffered channel which will be sent upon once the write is complete. This // a buffered channel which will be sent upon once the write is complete. This
// buffered channel acts as a semaphore to be used for synchronization purposes. // buffered channel acts as a semaphore to be used for synchronization purposes.
type outgoinMsg struct { type outgoingMsg struct {
msg lnwire.Message msg lnwire.Message
errChan chan error // MUST be buffered. errChan chan error // MUST be buffered.
} }
@ -116,11 +116,11 @@ type peer struct {
// sendQueue is the channel which is used to queue outgoing to be // sendQueue is the channel which is used to queue outgoing to be
// written onto the wire. Note that this channel is unbuffered. // written onto the wire. Note that this channel is unbuffered.
sendQueue chan outgoinMsg sendQueue chan outgoingMsg
// outgoingQueue is a buffered channel which allows second/third party // outgoingQueue is a buffered channel which allows second/third party
// objects to queue messages to be sent out on the wire. // objects to queue messages to be sent out on the wire.
outgoingQueue chan outgoinMsg outgoingQueue chan outgoingMsg
// activeChannels is a map which stores the state machines of all // activeChannels is a map which stores the state machines of all
// active channels. Channels are indexed into the map by the txid of // active channels. Channels are indexed into the map by the txid of
@ -187,8 +187,8 @@ func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server,
localFeatures: localFeatures, localFeatures: localFeatures,
sendQueue: make(chan outgoinMsg), sendQueue: make(chan outgoingMsg),
outgoingQueue: make(chan outgoinMsg), outgoingQueue: make(chan outgoingMsg),
activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel), activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
newChannels: make(chan *newChannelMsg, 1), newChannels: make(chan *newChannelMsg, 1),
@ -748,7 +748,7 @@ out:
case *lnwire.UpdateAddHTLC: case *lnwire.UpdateAddHTLC:
isChanUpdate = true isChanUpdate = true
targetChan = msg.ChanID targetChan = msg.ChanID
case *lnwire.UpdateFufillHTLC: case *lnwire.UpdateFulfillHTLC:
isChanUpdate = true isChanUpdate = true
targetChan = msg.ChanID targetChan = msg.ChanID
case *lnwire.UpdateFailMalformedHTLC: case *lnwire.UpdateFailMalformedHTLC:
@ -864,7 +864,7 @@ func messageSummary(msg lnwire.Message) string {
return fmt.Sprintf("chan_id=%v, id=%v, reason=%x", msg.ChanID, return fmt.Sprintf("chan_id=%v, id=%v, reason=%x", msg.ChanID,
msg.ID, msg.Reason) msg.ID, msg.Reason)
case *lnwire.UpdateFufillHTLC: case *lnwire.UpdateFulfillHTLC:
return fmt.Sprintf("chan_id=%v, id=%v, pre_image=%x", return fmt.Sprintf("chan_id=%v, id=%v, pre_image=%x",
msg.ChanID, msg.ID, msg.PaymentPreimage[:]) msg.ChanID, msg.ID, msg.PaymentPreimage[:])
@ -1092,7 +1092,7 @@ func (p *peer) queueHandler() {
// writeHandler cannot accept messages on the // writeHandler cannot accept messages on the
// sendQueue. // sendQueue.
select { select {
case p.sendQueue <- elem.Value.(outgoinMsg): case p.sendQueue <- elem.Value.(outgoingMsg):
pendingMsgs.Remove(elem) pendingMsgs.Remove(elem)
case msg := <-p.outgoingQueue: case msg := <-p.outgoingQueue:
pendingMsgs.PushBack(msg) pendingMsgs.PushBack(msg)
@ -1149,7 +1149,7 @@ func (p *peer) PingTime() int64 {
// nil otherwise. // nil otherwise.
func (p *peer) queueMsg(msg lnwire.Message, errChan chan error) { func (p *peer) queueMsg(msg lnwire.Message, errChan chan error) {
select { select {
case p.outgoingQueue <- outgoinMsg{msg, errChan}: case p.outgoingQueue <- outgoingMsg{msg, errChan}:
case <-p.quit: case <-p.quit:
peerLog.Tracef("Peer shutting down, could not enqueue msg.") peerLog.Tracef("Peer shutting down, could not enqueue msg.")
if errChan != nil { if errChan != nil {
@ -1437,7 +1437,7 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (*channelCloser, e
// cooperative channel closure transaction from the chain arb. // cooperative channel closure transaction from the chain arb.
// Wtih this context, we'll ensure that we're able to respond // Wtih this context, we'll ensure that we're able to respond
// if *any* of the transactions we sign off on are ever // if *any* of the transactions we sign off on are ever
// braodacast. // broadcast.
closeCtx, err := p.server.chainArb.BeginCoopChanClose( closeCtx, err := p.server.chainArb.BeginCoopChanClose(
*channel.ChannelPoint(), *channel.ChannelPoint(),
) )
@ -1503,7 +1503,7 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) {
// cooperative channel closure transaction from the chain arb. // cooperative channel closure transaction from the chain arb.
// Wtih this context, we'll ensure that we're able to respond // Wtih this context, we'll ensure that we're able to respond
// if *any* of the transactions we sign off on are ever // if *any* of the transactions we sign off on are ever
// braodacast. // broadcast.
closeCtx, err := p.server.chainArb.BeginCoopChanClose( closeCtx, err := p.server.chainArb.BeginCoopChanClose(
*channel.ChannelPoint(), *channel.ChannelPoint(),
) )

@ -322,7 +322,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
} }
// The fee sent by the responder should be less than the fee we just // The fee sent by the responder should be less than the fee we just
// sent as it should attempt to comrpomise. // sent as it should attempt to compromise.
peerFee := responderClosingSigned.FeeSatoshis peerFee := responderClosingSigned.FeeSatoshis
if peerFee > increasedFee { if peerFee > increasedFee {
t.Fatalf("new fee should be less than our fee: new=%v, "+ t.Fatalf("new fee should be less than our fee: new=%v, "+

@ -228,7 +228,7 @@ func (b *BitcoindFilteredChainView) FilterBlock(blockHash *chainhash.Hash) (*Fil
} }
// chainFilterer is the primary goroutine which: listens for new blocks coming // chainFilterer is the primary goroutine which: listens for new blocks coming
// and dispatches the relevent FilteredBlock notifications, updates the filter // and dispatches the relevant FilteredBlock notifications, updates the filter
// due to requests by callers, and finally is able to preform targeted block // due to requests by callers, and finally is able to preform targeted block
// filtration. // filtration.
// //
@ -236,7 +236,7 @@ func (b *BitcoindFilteredChainView) FilterBlock(blockHash *chainhash.Hash) (*Fil
func (b *BitcoindFilteredChainView) chainFilterer() { func (b *BitcoindFilteredChainView) chainFilterer() {
defer b.wg.Done() defer b.wg.Done()
// filterBlock is a helper funciton that scans the given block, and // filterBlock is a helper function that scans the given block, and
// notes which transactions spend outputs which are currently being // notes which transactions spend outputs which are currently being
// watched. Additionally, the chain filter will also be updated by // watched. Additionally, the chain filter will also be updated by
// removing any spent outputs. // removing any spent outputs.

@ -241,7 +241,7 @@ func (b *BtcdFilteredChainView) FilterBlock(blockHash *chainhash.Hash) (*Filtere
} }
// chainFilterer is the primary goroutine which: listens for new blocks coming // chainFilterer is the primary goroutine which: listens for new blocks coming
// and dispatches the relevent FilteredBlock notifications, updates the filter // and dispatches the relevant FilteredBlock notifications, updates the filter
// due to requests by callers, and finally is able to preform targeted block // due to requests by callers, and finally is able to preform targeted block
// filtration. // filtration.
// //
@ -249,7 +249,7 @@ func (b *BtcdFilteredChainView) FilterBlock(blockHash *chainhash.Hash) (*Filtere
func (b *BtcdFilteredChainView) chainFilterer() { func (b *BtcdFilteredChainView) chainFilterer() {
defer b.wg.Done() defer b.wg.Done()
// filterBlock is a helper funciton that scans the given block, and // filterBlock is a helper function that scans the given block, and
// notes which transactions spend outputs which are currently being // notes which transactions spend outputs which are currently being
// watched. Additionally, the chain filter will also be updated by // watched. Additionally, the chain filter will also be updated by
// removing any spent outputs. // removing any spent outputs.

@ -7,7 +7,7 @@ import (
// FilteredChainView represents a subscription to a certain subset of of the // FilteredChainView represents a subscription to a certain subset of of the
// UTXO set for a particular chain. This interface is useful from the point of // UTXO set for a particular chain. This interface is useful from the point of
// view of maintaining an up-to-date channel graph for the Lighting Network. // view of maintaining an up-to-date channel graph for the Lightning Network.
// The subset of the UTXO to be subscribed is that of all the currently opened // The subset of the UTXO to be subscribed is that of all the currently opened
// channels. Each time a channel is closed (the output is spent), a // channels. Each time a channel is closed (the output is spent), a
// notification is to be sent allowing the graph to be pruned. // notification is to be sent allowing the graph to be pruned.

@ -371,7 +371,7 @@ func testUpdateFilterBackTrack(node *rpctest.Harness,
t.Fatalf("unable to generate block: %v", err) t.Fatalf("unable to generate block: %v", err)
} }
// We should've received another empty filtered block notification. // We should have received another empty filtered block notification.
select { select {
case filteredBlock := <-blockChan: case filteredBlock := <-blockChan:
assertFilteredBlock(t, filteredBlock, currentHeight+1, assertFilteredBlock(t, filteredBlock, currentHeight+1,
@ -606,7 +606,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
} }
expectedHeight := uint32(oldHeight - i) expectedHeight := uint32(oldHeight - i)
if block.Height != expectedHeight { if block.Height != expectedHeight {
t.Fatalf("expected to receive disconencted "+ t.Fatalf("expected to receive disconnected "+
"block at height %d, instead got at %d", "block at height %d, instead got at %d",
expectedHeight, block.Height) expectedHeight, block.Height)
} }
@ -878,7 +878,7 @@ var interfaceImpls = []struct {
func TestFilteredChainView(t *testing.T) { func TestFilteredChainView(t *testing.T) {
// Initialize the harness around a btcd node which will serve as our // Initialize the harness around a btcd node which will serve as our
// dedicated miner to generate blocks, cause re-orgs, etc. We'll set up // dedicated miner to generate blocks, cause re-orgs, etc. We'll set up
// this node with a chain length of 125, so we have plentyyy of BTC to // this node with a chain length of 125, so we have plenty of BTC to
// play around with. // play around with.
miner, err := rpctest.New(netParams, nil, nil) miner, err := rpctest.New(netParams, nil, nil)
if err != nil { if err != nil {

@ -17,7 +17,7 @@ import (
// CfFilteredChainView is an implementation of the FilteredChainView interface // CfFilteredChainView is an implementation of the FilteredChainView interface
// which is supported by an underlying Bitcoin light client which supports // which is supported by an underlying Bitcoin light client which supports
// client side filtering of Golomb Coded Sets. Rather than fetching all the // client side filtering of Golomb Coded Sets. Rather than fetching all the
// blocks, the light client is able to query fitlers locally, to test if an // blocks, the light client is able to query filters locally, to test if an
// item in a block modifies any of our watched set of UTXOs. // item in a block modifies any of our watched set of UTXOs.
type CfFilteredChainView struct { type CfFilteredChainView struct {
started int32 started int32

@ -39,7 +39,7 @@ type blockEventQueue struct {
// will receive connected/new blocks from the FilteredChainView. // will receive connected/new blocks from the FilteredChainView.
newBlocks chan *FilteredBlock newBlocks chan *FilteredBlock
// stleBlocks is the channel where the consumer of the queue will // staleBlocks is the channel where the consumer of the queue will
// receive disconnected/stale blocks from the FilteredChainView. // receive disconnected/stale blocks from the FilteredChainView.
staleBlocks chan *FilteredBlock staleBlocks chan *FilteredBlock

@ -92,7 +92,7 @@ type graphPruneView struct {
// consulted during path finding. If a vertex/edge is found within the returned // consulted during path finding. If a vertex/edge is found within the returned
// prune view, it is to be ignored as a goroutine has had issues routing // prune view, it is to be ignored as a goroutine has had issues routing
// through it successfully. Within this method the main view of the // through it successfully. Within this method the main view of the
// missionControl is garbage collected as entires are detected to be "stale". // missionControl is garbage collected as entries are detected to be "stale".
func (m *missionControl) GraphPruneView() graphPruneView { func (m *missionControl) GraphPruneView() graphPruneView {
// First, we'll grab the current time, this value will be used to // First, we'll grab the current time, this value will be used to
// determine if an entry is stale or not. // determine if an entry is stale or not.

@ -135,7 +135,7 @@ func (r *ChannelRouter) notifyTopologyChange(topologyDiff *TopologyChange) {
// directly to the upstream client consumer. // directly to the upstream client consumer.
case c.ntfnChan <- topologyDiff: case c.ntfnChan <- topologyDiff:
// If the client cancel's the notifications, then we'll // If the client cancels the notifications, then we'll
// exit early. // exit early.
case <-c.exit: case <-c.exit:

@ -347,7 +347,7 @@ func TestEdgeUpdateNotification(t *testing.T) {
bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(), bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(),
chanValue, 0) chanValue, 0)
if err != nil { if err != nil {
t.Fatalf("unbale create channel edge: %v", err) t.Fatalf("unable create channel edge: %v", err)
} }
// We'll also add a record for the block that included our funding // We'll also add a record for the block that included our funding
@ -871,7 +871,7 @@ func TestChannelCloseNotification(t *testing.T) {
if len(closedChans) == 0 { if len(closedChans) == 0 {
t.Fatal("close channel ntfn not populated") t.Fatal("close channel ntfn not populated")
} else if len(closedChans) != 1 { } else if len(closedChans) != 1 {
t.Fatalf("only one should've been detected as closed, "+ t.Fatalf("only one should have been detected as closed, "+
"instead %v were", len(closedChans)) "instead %v were", len(closedChans))
} }

@ -120,7 +120,7 @@ func makeTestGraph() (*channeldb.ChannelGraph, func(), error) {
} }
// aliasMap is a map from a node's alias to its public key. This type is // aliasMap is a map from a node's alias to its public key. This type is
// provided in order to allow easily look up from the human rememberable alias // provided in order to allow easily look up from the human memorable alias
// to an exact node's public key. // to an exact node's public key.
type aliasMap map[string]*btcec.PublicKey type aliasMap map[string]*btcec.PublicKey
@ -498,9 +498,9 @@ func TestKShortestPathFinding(t *testing.T) {
t.Fatalf("unable to fetch source node: %v", err) t.Fatalf("unable to fetch source node: %v", err)
} }
// In this test we'd like to ensure that our algoirthm to find the // In this test we'd like to ensure that our algorithm to find the
// k-shortest paths from a given source node to any destination node // k-shortest paths from a given source node to any destination node
// works as exepcted. // works as expected.
// In our basic_graph.json, there exist two paths from roasbeef to luo // In our basic_graph.json, there exist two paths from roasbeef to luo
// ji. Our algorithm should properly find both paths, and also rank // ji. Our algorithm should properly find both paths, and also rank
@ -514,13 +514,13 @@ func TestKShortestPathFinding(t *testing.T) {
"luo ji: %v", err) "luo ji: %v", err)
} }
// The algorithm should've found two paths from roasbeef to luo ji. // The algorithm should have found two paths from roasbeef to luo ji.
if len(paths) != 2 { if len(paths) != 2 {
t.Fatalf("two path shouldn't been found, instead %v were", t.Fatalf("two path shouldn't been found, instead %v were",
len(paths)) len(paths))
} }
// Additinoally, the total hop length of the first path returned should // Additionally, the total hop length of the first path returned should
// be _less_ than that of the second path returned. // be _less_ than that of the second path returned.
if len(paths[0]) > len(paths[1]) { if len(paths[0]) > len(paths[1]) {
t.Fatalf("paths found not ordered properly") t.Fatalf("paths found not ordered properly")
@ -566,7 +566,7 @@ func TestNewRoutePathTooLong(t *testing.T) {
paymentAmt := lnwire.NewMSatFromSatoshis(100) paymentAmt := lnwire.NewMSatFromSatoshis(100)
// We start by confirminig that routing a payment 20 hops away is possible. // We start by confirming that routing a payment 20 hops away is possible.
// Alice should be able to find a valid route to ursula. // Alice should be able to find a valid route to ursula.
target := aliases["ursula"] target := aliases["ursula"]
_, err = findPath(nil, graph, sourceNode, target, ignoredVertexes, _, err = findPath(nil, graph, sourceNode, target, ignoredVertexes,
@ -705,7 +705,7 @@ func TestRouteFailDisabledEdge(t *testing.T) {
ignoredVertexes := make(map[Vertex]struct{}) ignoredVertexes := make(map[Vertex]struct{})
// First, we'll try to route from roasbeef -> songoku. This should // First, we'll try to route from roasbeef -> songoku. This should
// suceed without issue, and return a single path. // succeed without issue, and return a single path.
target := aliases["songoku"] target := aliases["songoku"]
payAmt := lnwire.NewMSatFromSatoshis(10000) payAmt := lnwire.NewMSatFromSatoshis(10000)
_, err = findPath(nil, graph, sourceNode, target, ignoredVertexes, _, err = findPath(nil, graph, sourceNode, target, ignoredVertexes,
@ -726,7 +726,7 @@ func TestRouteFailDisabledEdge(t *testing.T) {
} }
// Now, if we attempt to route through that edge, we should get a // Now, if we attempt to route through that edge, we should get a
// failure as it is no longer elligble. // failure as it is no longer eligible.
_, err = findPath(nil, graph, sourceNode, target, ignoredVertexes, _, err = findPath(nil, graph, sourceNode, target, ignoredVertexes,
ignoredEdges, payAmt) ignoredEdges, payAmt)
if !IsError(err, ErrNoPathFound) { if !IsError(err, ErrNoPathFound) {
@ -792,7 +792,7 @@ func TestPathFindSpecExample(t *testing.T) {
// Now we'll examine the first route returned for correctness. // Now we'll examine the first route returned for correctness.
// //
// It should be sending the exact payment amount as there're no // It should be sending the exact payment amount as there are no
// additional hops. // additional hops.
firstRoute := routes[0] firstRoute := routes[0]
if firstRoute.TotalAmount != amt { if firstRoute.TotalAmount != amt {

@ -79,7 +79,7 @@ type ChannelGraphSource interface {
e1, e2 *channeldb.ChannelEdgePolicy) error) error e1, e2 *channeldb.ChannelEdgePolicy) error) error
} }
// FeeSchema is the set fee configuration for a Lighting Node on the network. // FeeSchema is the set fee configuration for a Lightning Node on the network.
// Using the coefficients described within the schema, the required fee to // Using the coefficients described within the schema, the required fee to
// forward outgoing payments can be derived. // forward outgoing payments can be derived.
type FeeSchema struct { type FeeSchema struct {
@ -96,7 +96,7 @@ type FeeSchema struct {
} }
// ChannelPolicy holds the parameters that determine the policy we enforce // ChannelPolicy holds the parameters that determine the policy we enforce
// when fowarding payments on a channel. These parameters are communicated // when forwarding payments on a channel. These parameters are communicated
// to the rest of the network in ChannelUpdate messages. // to the rest of the network in ChannelUpdate messages.
type ChannelPolicy struct { type ChannelPolicy struct {
// FeeSchema holds the fee configuration for a channel. // FeeSchema holds the fee configuration for a channel.
@ -471,7 +471,7 @@ func (r *ChannelRouter) syncGraphWithChain() error {
return err return err
} }
// We're only interested in all prior outputs that've been // We're only interested in all prior outputs that have been
// spent in the block, so collate all the referenced previous // spent in the block, so collate all the referenced previous
// outpoints within each tx and input. // outpoints within each tx and input.
var spentOutputs []*wire.OutPoint var spentOutputs []*wire.OutPoint
@ -529,7 +529,7 @@ func (r *ChannelRouter) networkHandler() {
// We'll set up any dependants, and wait until a free // We'll set up any dependants, and wait until a free
// slot for this job opens up, this allow us to not // slot for this job opens up, this allow us to not
// have thousands of goroutines active. // have thousands of goroutines active.
validationBarrier.InitJobDependancies(updateMsg.msg) validationBarrier.InitJobDependencies(updateMsg.msg)
go func() { go func() {
defer validationBarrier.CompleteJob() defer validationBarrier.CompleteJob()
@ -632,7 +632,7 @@ func (r *ChannelRouter) networkHandler() {
log.Infof("Pruning channel graph using block %v (height=%v)", log.Infof("Pruning channel graph using block %v (height=%v)",
chainUpdate.Hash, blockHeight) chainUpdate.Hash, blockHeight)
// We're only interested in all prior outputs that've // We're only interested in all prior outputs that have
// been spent in the block, so collate all the // been spent in the block, so collate all the
// referenced previous outpoints within each tx and // referenced previous outpoints within each tx and
// input. // input.
@ -730,7 +730,7 @@ func (r *ChannelRouter) networkHandler() {
// We'll ensure that we don't attempt to prune // We'll ensure that we don't attempt to prune
// our *own* channels from the graph, as in any // our *own* channels from the graph, as in any
// case this shuold be re-advertised by the // case this should be re-advertised by the
// sub-system above us. // sub-system above us.
if info.NodeKey1.IsEqual(r.selfNode.PubKey) || if info.NodeKey1.IsEqual(r.selfNode.PubKey) ||
info.NodeKey2.IsEqual(r.selfNode.PubKey) { info.NodeKey2.IsEqual(r.selfNode.PubKey) {
@ -1572,7 +1572,7 @@ func (r *ChannelRouter) SendPayment(payment *LightningPayment) ([32]byte, *Route
// As this error indicates that the target // As this error indicates that the target
// channel was unable to carry this HTLC (for // channel was unable to carry this HTLC (for
// w/e reason), we'll query the index to find // w/e reason), we'll query the index to find
// the _outgoign_ channel the source of the // the _outgoing_ channel the source of the
// error was meant to pass the HTLC along to. // error was meant to pass the HTLC along to.
badChan, ok := route.nextHopChannel(errSource) badChan, ok := route.nextHopChannel(errSource)
if !ok { if !ok {
@ -1648,7 +1648,7 @@ func (r *ChannelRouter) SendPayment(payment *LightningPayment) ([32]byte, *Route
// As this error indicates that the target // As this error indicates that the target
// channel was unable to carry this HTLC (for // channel was unable to carry this HTLC (for
// w/e reason), we'll query the index to find // w/e reason), we'll query the index to find
// the _outgoign_ channel the source of the // the _outgoing_ channel the source of the
// error was meant to pass the HTLC along to. // error was meant to pass the HTLC along to.
badChan, ok := route.nextHopChannel(errSource) badChan, ok := route.nextHopChannel(errSource)
if !ok { if !ok {
@ -1671,7 +1671,7 @@ func (r *ChannelRouter) SendPayment(payment *LightningPayment) ([32]byte, *Route
continue continue
case *lnwire.FailPermanentNodeFailure: case *lnwire.FailPermanentNodeFailure:
// TODO(rosabeef): remove node from path // TODO(roasbeef): remove node from path
continue continue
default: default:

@ -26,17 +26,17 @@ type ValidationBarrier struct {
// dependants. // dependants.
chanAnnFinSignal map[lnwire.ShortChannelID]chan struct{} chanAnnFinSignal map[lnwire.ShortChannelID]chan struct{}
// chanEdgeDependancies tracks any channel edge updates which should // chanEdgeDependencies tracks any channel edge updates which should
// wait until the completion of the ChannelAnnouncement before // wait until the completion of the ChannelAnnouncement before
// proceeding. This is a dependency, as we can't validate the update // proceeding. This is a dependency, as we can't validate the update
// before we validate the announcement which creates the channel // before we validate the announcement which creates the channel
// itself. // itself.
chanEdgeDependancies map[lnwire.ShortChannelID]chan struct{} chanEdgeDependencies map[lnwire.ShortChannelID]chan struct{}
// nodeAnnDependancies tracks any pending NodeAnnouncement validation // nodeAnnDependencies tracks any pending NodeAnnouncement validation
// jobs which should wait until the completion of the // jobs which should wait until the completion of the
// ChannelAnnouncement before proceeding. // ChannelAnnouncement before proceeding.
nodeAnnDependancies map[Vertex]chan struct{} nodeAnnDependencies map[Vertex]chan struct{}
quit chan struct{} quit chan struct{}
sync.Mutex sync.Mutex
@ -50,8 +50,8 @@ func NewValidationBarrier(numActiveReqs int,
v := &ValidationBarrier{ v := &ValidationBarrier{
chanAnnFinSignal: make(map[lnwire.ShortChannelID]chan struct{}), chanAnnFinSignal: make(map[lnwire.ShortChannelID]chan struct{}),
chanEdgeDependancies: make(map[lnwire.ShortChannelID]chan struct{}), chanEdgeDependencies: make(map[lnwire.ShortChannelID]chan struct{}),
nodeAnnDependancies: make(map[Vertex]chan struct{}), nodeAnnDependencies: make(map[Vertex]chan struct{}),
quit: quitChan, quit: quitChan,
} }
@ -65,9 +65,9 @@ func NewValidationBarrier(numActiveReqs int,
return v return v
} }
// InitJobDependancies will wait for a new job slot to become open, and then // InitJobDependencies will wait for a new job slot to become open, and then
// sets up any dependant signals/trigger for the new job // sets up any dependent signals/trigger for the new job
func (v *ValidationBarrier) InitJobDependancies(job interface{}) { func (v *ValidationBarrier) InitJobDependencies(job interface{}) {
// We'll wait for either a new slot to become open, or for the quit // We'll wait for either a new slot to become open, or for the quit
// channel to be closed. // channel to be closed.
select { select {
@ -79,7 +79,7 @@ func (v *ValidationBarrier) InitJobDependancies(job interface{}) {
defer v.Unlock() defer v.Unlock()
// Once a slot is open, we'll examine the message of the job, to see if // Once a slot is open, we'll examine the message of the job, to see if
// there need to be any dependant barriers set up. // there need to be any dependent barriers set up.
switch msg := job.(type) { switch msg := job.(type) {
// If this is a channel announcement, then we'll need to set up den // If this is a channel announcement, then we'll need to set up den
@ -93,7 +93,7 @@ func (v *ValidationBarrier) InitJobDependancies(job interface{}) {
// one doesn't already exist, as there may be duplicate // one doesn't already exist, as there may be duplicate
// announcements. We'll close this signal once the // announcements. We'll close this signal once the
// ChannelAnnouncement has been validated. This will result in // ChannelAnnouncement has been validated. This will result in
// all the dependant jobs being unlocked so they can finish // all the dependent jobs being unlocked so they can finish
// execution themselves. // execution themselves.
if _, ok := v.chanAnnFinSignal[msg.ShortChannelID]; !ok { if _, ok := v.chanAnnFinSignal[msg.ShortChannelID]; !ok {
// We'll create the channel that we close after we // We'll create the channel that we close after we
@ -102,10 +102,10 @@ func (v *ValidationBarrier) InitJobDependancies(job interface{}) {
// at the same time. // at the same time.
annFinCond := make(chan struct{}) annFinCond := make(chan struct{})
v.chanAnnFinSignal[msg.ShortChannelID] = annFinCond v.chanAnnFinSignal[msg.ShortChannelID] = annFinCond
v.chanEdgeDependancies[msg.ShortChannelID] = annFinCond v.chanEdgeDependencies[msg.ShortChannelID] = annFinCond
v.nodeAnnDependancies[NewVertex(msg.NodeID1)] = annFinCond v.nodeAnnDependencies[NewVertex(msg.NodeID1)] = annFinCond
v.nodeAnnDependancies[NewVertex(msg.NodeID2)] = annFinCond v.nodeAnnDependencies[NewVertex(msg.NodeID2)] = annFinCond
} }
case *channeldb.ChannelEdgeInfo: case *channeldb.ChannelEdgeInfo:
@ -114,10 +114,10 @@ func (v *ValidationBarrier) InitJobDependancies(job interface{}) {
annFinCond := make(chan struct{}) annFinCond := make(chan struct{})
v.chanAnnFinSignal[shortID] = annFinCond v.chanAnnFinSignal[shortID] = annFinCond
v.chanEdgeDependancies[shortID] = annFinCond v.chanEdgeDependencies[shortID] = annFinCond
v.nodeAnnDependancies[NewVertex(msg.NodeKey1)] = annFinCond v.nodeAnnDependencies[NewVertex(msg.NodeKey1)] = annFinCond
v.nodeAnnDependancies[NewVertex(msg.NodeKey2)] = annFinCond v.nodeAnnDependencies[NewVertex(msg.NodeKey2)] = annFinCond
} }
// These other types don't have any dependants, so no further // These other types don't have any dependants, so no further
@ -149,8 +149,8 @@ func (v *ValidationBarrier) CompleteJob() {
// WaitForDependants will block until any jobs that this job dependants on have // WaitForDependants will block until any jobs that this job dependants on have
// finished executing. This allows us a graceful way to schedule goroutines // finished executing. This allows us a graceful way to schedule goroutines
// based on any pending uncompleted dependant jobs. If this job doesn't have an // based on any pending uncompleted dependent jobs. If this job doesn't have an
// active dependant, then this function will return immediately. // active dependent, then this function will return immediately.
func (v *ValidationBarrier) WaitForDependants(job interface{}) { func (v *ValidationBarrier) WaitForDependants(job interface{}) {
var ( var (
@ -165,15 +165,15 @@ func (v *ValidationBarrier) WaitForDependants(job interface{}) {
// completion of any active ChannelAnnouncement jobs related to them. // completion of any active ChannelAnnouncement jobs related to them.
case *channeldb.ChannelEdgePolicy: case *channeldb.ChannelEdgePolicy:
shortID := lnwire.NewShortChanIDFromInt(msg.ChannelID) shortID := lnwire.NewShortChanIDFromInt(msg.ChannelID)
signal, ok = v.chanEdgeDependancies[shortID] signal, ok = v.chanEdgeDependencies[shortID]
case *channeldb.LightningNode: case *channeldb.LightningNode:
vertex := NewVertex(msg.PubKey) vertex := NewVertex(msg.PubKey)
signal, ok = v.nodeAnnDependancies[vertex] signal, ok = v.nodeAnnDependencies[vertex]
case *lnwire.ChannelUpdate: case *lnwire.ChannelUpdate:
signal, ok = v.chanEdgeDependancies[msg.ShortChannelID] signal, ok = v.chanEdgeDependencies[msg.ShortChannelID]
case *lnwire.NodeAnnouncement: case *lnwire.NodeAnnouncement:
vertex := NewVertex(msg.NodeID) vertex := NewVertex(msg.NodeID)
signal, ok = v.nodeAnnDependancies[vertex] signal, ok = v.nodeAnnDependencies[vertex]
// Other types of jobs can be executed immediately, so we'll just // Other types of jobs can be executed immediately, so we'll just
// return directly. // return directly.
@ -201,7 +201,7 @@ func (v *ValidationBarrier) WaitForDependants(job interface{}) {
} }
} }
// SignalDependants will signal any jobs that are dependant on this job that // SignalDependants will signal any jobs that are dependent on this job that
// they can continue execution. If the job doesn't have any dependants, then // they can continue execution. If the job doesn't have any dependants, then
// this function sill exit immediately. // this function sill exit immediately.
func (v *ValidationBarrier) SignalDependants(job interface{}) { func (v *ValidationBarrier) SignalDependants(job interface{}) {
@ -212,7 +212,7 @@ func (v *ValidationBarrier) SignalDependants(job interface{}) {
// If we've just finished executing a ChannelAnnouncement, then we'll // If we've just finished executing a ChannelAnnouncement, then we'll
// close out the signal, and remove the signal from the map of active // close out the signal, and remove the signal from the map of active
// ones. This will allow any dependant jobs to continue execution. // ones. This will allow any dependent jobs to continue execution.
case *channeldb.ChannelEdgeInfo: case *channeldb.ChannelEdgeInfo:
shortID := lnwire.NewShortChanIDFromInt(msg.ChannelID) shortID := lnwire.NewShortChanIDFromInt(msg.ChannelID)
finSignal, ok := v.chanAnnFinSignal[shortID] finSignal, ok := v.chanAnnFinSignal[shortID]
@ -227,20 +227,20 @@ func (v *ValidationBarrier) SignalDependants(job interface{}) {
delete(v.chanAnnFinSignal, msg.ShortChannelID) delete(v.chanAnnFinSignal, msg.ShortChannelID)
} }
delete(v.chanEdgeDependancies, msg.ShortChannelID) delete(v.chanEdgeDependencies, msg.ShortChannelID)
// For all other job types, we'll delete the tracking entries from the // For all other job types, we'll delete the tracking entries from the
// map, as if we reach this point, then all dependants have already // map, as if we reach this point, then all dependants have already
// finished executing and we can proceed. // finished executing and we can proceed.
case *channeldb.LightningNode: case *channeldb.LightningNode:
delete(v.nodeAnnDependancies, NewVertex(msg.PubKey)) delete(v.nodeAnnDependencies, NewVertex(msg.PubKey))
case *lnwire.NodeAnnouncement: case *lnwire.NodeAnnouncement:
delete(v.nodeAnnDependancies, NewVertex(msg.NodeID)) delete(v.nodeAnnDependencies, NewVertex(msg.NodeID))
case *lnwire.ChannelUpdate: case *lnwire.ChannelUpdate:
delete(v.chanEdgeDependancies, msg.ShortChannelID) delete(v.chanEdgeDependencies, msg.ShortChannelID)
case *channeldb.ChannelEdgePolicy: case *channeldb.ChannelEdgePolicy:
shortID := lnwire.NewShortChanIDFromInt(msg.ChannelID) shortID := lnwire.NewShortChanIDFromInt(msg.ChannelID)
delete(v.chanEdgeDependancies, shortID) delete(v.chanEdgeDependencies, shortID)
case *lnwire.AnnounceSignatures: case *lnwire.AnnounceSignatures:
return return

@ -356,14 +356,14 @@ func (r *rpcServer) sendCoinsOnChain(paymentMap map[string]int64,
// determineFeePerByte will determine the fee in sat/byte that should be paid // determineFeePerByte will determine the fee in sat/byte that should be paid
// given an estimator, a confirmation target, and a manual value for sat/byte. // given an estimator, a confirmation target, and a manual value for sat/byte.
// A value is chosen based on the two free paramters as one, or both of them // A value is chosen based on the two free parameters as one, or both of them
// can be zero. // can be zero.
func determineFeePerByte(feeEstimator lnwallet.FeeEstimator, targetConf int32, func determineFeePerByte(feeEstimator lnwallet.FeeEstimator, targetConf int32,
satPerByte int64) (btcutil.Amount, error) { satPerByte int64) (btcutil.Amount, error) {
switch { switch {
// If the target number of confirmations is set, then we'll use that to // If the target number of confirmations is set, then we'll use that to
// consult our fee estimator for an adquate fee. // consult our fee estimator for an adequate fee.
case targetConf != 0: case targetConf != 0:
satPerByte, err := feeEstimator.EstimateFeePerByte( satPerByte, err := feeEstimator.EstimateFeePerByte(
uint32(targetConf), uint32(targetConf),
@ -375,7 +375,7 @@ func determineFeePerByte(feeEstimator lnwallet.FeeEstimator, targetConf int32,
return btcutil.Amount(satPerByte), nil return btcutil.Amount(satPerByte), nil
// If a manual sat/byte fee rate is set, then we'll use that diretly. // If a manual sat/byte fee rate is set, then we'll use that directly.
case satPerByte != 0: case satPerByte != 0:
return btcutil.Amount(satPerByte), nil return btcutil.Amount(satPerByte), nil
@ -397,8 +397,8 @@ func determineFeePerByte(feeEstimator lnwallet.FeeEstimator, targetConf int32,
func (r *rpcServer) SendCoins(ctx context.Context, func (r *rpcServer) SendCoins(ctx context.Context,
in *lnrpc.SendCoinsRequest) (*lnrpc.SendCoinsResponse, error) { in *lnrpc.SendCoinsRequest) (*lnrpc.SendCoinsResponse, error) {
// Based on the passed fee related paramters, we'll determine an // Based on the passed fee related parameters, we'll determine an
// approriate fee rate for this transaction. // appropriate fee rate for this transaction.
feePerByte, err := determineFeePerByte( feePerByte, err := determineFeePerByte(
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte, r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
) )
@ -425,7 +425,7 @@ func (r *rpcServer) SendCoins(ctx context.Context,
func (r *rpcServer) SendMany(ctx context.Context, func (r *rpcServer) SendMany(ctx context.Context,
in *lnrpc.SendManyRequest) (*lnrpc.SendManyResponse, error) { in *lnrpc.SendManyRequest) (*lnrpc.SendManyResponse, error) {
// Based on the passed fee related paramters, we'll determine an // Based on the passed fee related parameters, we'll determine an
// approriate fee rate for this transaction. // approriate fee rate for this transaction.
feePerByte, err := determineFeePerByte( feePerByte, err := determineFeePerByte(
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte, r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
@ -699,9 +699,9 @@ func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest,
const minChannelSize = btcutil.Amount(6000) const minChannelSize = btcutil.Amount(6000)
// Restrict the size of the channel we'll actually open. Atm, we // Restrict the size of the channel we'll actually open. Atm, we
// require the amount to be above 6k satoahis s we currently hard-coded // require the amount to be above 6k satoshis we currently hard-coded
// a 5k satoshi fee in several areas. As a result 6k sat is the min // a 5k satoshi fee in several areas. As a result 6k sat is the min
// channnel size that allows us to safely sit above the dust threshold // channel size that allows us to safely sit above the dust threshold
// after fees are applied // after fees are applied
// TODO(roasbeef): remove after dynamic fees are in // TODO(roasbeef): remove after dynamic fees are in
if localFundingAmt < minChannelSize { if localFundingAmt < minChannelSize {
@ -735,8 +735,8 @@ func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest,
nodePubKeyBytes = nodePubKey.SerializeCompressed() nodePubKeyBytes = nodePubKey.SerializeCompressed()
} }
// Based on the passed fee related paramters, we'll determine an // Based on the passed fee related parameters, we'll determine an
// approriate fee rate for the funding transaction. // appropriate fee rate for the funding transaction.
feePerByte, err := determineFeePerByte( feePerByte, err := determineFeePerByte(
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte, r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
) )
@ -858,7 +858,7 @@ func (r *rpcServer) OpenChannelSync(ctx context.Context,
"initial state must be below the local funding amount") "initial state must be below the local funding amount")
} }
// Based on the passed fee related paramters, we'll determine an // Based on the passed fee related parameters, we'll determine an
// appropriate fee rate for the funding transaction. // appropriate fee rate for the funding transaction.
feePerByte, err := determineFeePerByte( feePerByte, err := determineFeePerByte(
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte, r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
@ -1272,7 +1272,7 @@ func (r *rpcServer) WalletBalance(ctx context.Context,
return nil, err return nil, err
} }
// Get uncomfirmed balance, from txs with 0 confirmations. // Get unconfirmed balance, from txs with 0 confirmations.
unconfirmedBal := totalBal - confirmedBal unconfirmedBal := totalBal - confirmedBal
rpcsLog.Debugf("[walletbalance] Total balance=%v", totalBal) rpcsLog.Debugf("[walletbalance] Total balance=%v", totalBal)
@ -1524,7 +1524,7 @@ func (r *rpcServer) ListChannels(ctx context.Context,
localBalance := localCommit.LocalBalance localBalance := localCommit.LocalBalance
remoteBalance := localCommit.RemoteBalance remoteBalance := localCommit.RemoteBalance
// As an artefact of our usage of mSAT internally, either party // As an artifact of our usage of mSAT internally, either party
// may end up in a state where they're holding a fractional // may end up in a state where they're holding a fractional
// amount of satoshis which can't be expressed within the // amount of satoshis which can't be expressed within the
// actual commitment output. Since we round down when going // actual commitment output. Since we round down when going
@ -2195,7 +2195,7 @@ func createRPCInvoice(invoice *channeldb.Invoice) (*lnrpc.Invoice, error) {
}, nil }, nil
} }
// LookupInvoice attemps to look up an invoice according to its payment hash. // LookupInvoice attempts to look up an invoice according to its payment hash.
// The passed payment hash *must* be exactly 32 bytes, if not an error is // The passed payment hash *must* be exactly 32 bytes, if not an error is
// returned. // returned.
func (r *rpcServer) LookupInvoice(ctx context.Context, func (r *rpcServer) LookupInvoice(ctx context.Context,
@ -2533,13 +2533,13 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context,
// edges to gather some basic statistics about its out going channels. // edges to gather some basic statistics about its out going channels.
var ( var (
numChannels uint32 numChannels uint32
totalCapcity btcutil.Amount totalCapacity btcutil.Amount
) )
if err := node.ForEachChannel(nil, func(_ *bolt.Tx, edge *channeldb.ChannelEdgeInfo, if err := node.ForEachChannel(nil, func(_ *bolt.Tx, edge *channeldb.ChannelEdgeInfo,
_, _ *channeldb.ChannelEdgePolicy) error { _, _ *channeldb.ChannelEdgePolicy) error {
numChannels++ numChannels++
totalCapcity += edge.Capacity totalCapacity += edge.Capacity
return nil return nil
}); err != nil { }); err != nil {
return nil, err return nil, err
@ -2565,7 +2565,7 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context,
Color: nodeColor, Color: nodeColor,
}, },
NumChannels: numChannels, NumChannels: numChannels,
TotalCapacity: int64(totalCapcity), TotalCapacity: int64(totalCapacity),
}, nil }, nil
} }
@ -2573,7 +2573,7 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context,
// route to a target destination capable of carrying a specific amount of // route to a target destination capable of carrying a specific amount of
// satoshis within the route's flow. The retuned route contains the full // satoshis within the route's flow. The retuned route contains the full
// details required to craft and send an HTLC, also including the necessary // details required to craft and send an HTLC, also including the necessary
// information that should be present within the Sphinx packet encapsualted // information that should be present within the Sphinx packet encapsulated
// within the HTLC. // within the HTLC.
// //
// TODO(roasbeef): should return a slice of routes in reality // TODO(roasbeef): should return a slice of routes in reality
@ -2581,7 +2581,7 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context,
func (r *rpcServer) QueryRoutes(ctx context.Context, func (r *rpcServer) QueryRoutes(ctx context.Context,
in *lnrpc.QueryRoutesRequest) (*lnrpc.QueryRoutesResponse, error) { in *lnrpc.QueryRoutesRequest) (*lnrpc.QueryRoutesResponse, error) {
// First parse the hex-encdoed public key into a full public key objet // First parse the hex-encoded public key into a full public key object
// we can properly manipulate. // we can properly manipulate.
pubKeyBytes, err := hex.DecodeString(in.PubKey) pubKeyBytes, err := hex.DecodeString(in.PubKey)
if err != nil { if err != nil {
@ -2817,7 +2817,7 @@ func (r *rpcServer) SubscribeChannelGraph(req *lnrpc.GraphTopologySubscription,
} }
} }
// marshallTopologyChange performs a mapping from the topology change sturct // marshallTopologyChange performs a mapping from the topology change struct
// returned by the router to the form of notifications expected by the current // returned by the router to the form of notifications expected by the current
// gRPC service. // gRPC service.
func marshallTopologyChange(topChange *routing.TopologyChange) *lnrpc.GraphTopologyUpdate { func marshallTopologyChange(topChange *routing.TopologyChange) *lnrpc.GraphTopologyUpdate {
@ -3066,7 +3066,7 @@ func (r *rpcServer) FeeReport(ctx context.Context,
} }
// minFeeRate is the smallest permitted fee rate within the network. This is // minFeeRate is the smallest permitted fee rate within the network. This is
// dervied by the fact that fee rates are computed using a fixed point of // derived by the fact that fee rates are computed using a fixed point of
// 1,000,000. As a result, the smallest representable fee rate is 1e-6, or // 1,000,000. As a result, the smallest representable fee rate is 1e-6, or
// 0.000001, or 0.0001%. // 0.000001, or 0.0001%.
const minFeeRate = 1e-6 const minFeeRate = 1e-6

@ -175,7 +175,7 @@ func newServer(listenAddrs []string, chanDB *channeldb.DB, cc *chainControl,
s.witnessBeacon = &preimageBeacon{ s.witnessBeacon = &preimageBeacon{
invoices: s.invoices, invoices: s.invoices,
wCache: chanDB.NewWitnessCache(), wCache: chanDB.NewWitnessCache(),
subscribers: make(map[uint64]*preimageSubcriber), subscribers: make(map[uint64]*preimageSubscriber),
} }
// If the debug HTLC flag is on, then we invoice a "master debug" // If the debug HTLC flag is on, then we invoice a "master debug"
@ -579,7 +579,7 @@ func (s *server) WaitForShutdown() {
// based on the server, and currently active bootstrap mechanisms as defined // based on the server, and currently active bootstrap mechanisms as defined
// within the current configuration. // within the current configuration.
func initNetworkBootstrappers(s *server) ([]discovery.NetworkPeerBootstrapper, error) { func initNetworkBootstrappers(s *server) ([]discovery.NetworkPeerBootstrapper, error) {
srvrLog.Infof("Initializing peer network boostrappers!") srvrLog.Infof("Initializing peer network bootstrappers!")
var bootStrappers []discovery.NetworkPeerBootstrapper var bootStrappers []discovery.NetworkPeerBootstrapper
@ -599,9 +599,9 @@ func initNetworkBootstrappers(s *server) ([]discovery.NetworkPeerBootstrapper, e
dnsSeeds, ok := chainDNSSeeds[*activeNetParams.GenesisHash] dnsSeeds, ok := chainDNSSeeds[*activeNetParams.GenesisHash]
// If we have a set of DNS seeds for this chain, then we'll add // If we have a set of DNS seeds for this chain, then we'll add
// it as an additional boostrapping source. // it as an additional bootstrapping source.
if ok { if ok {
srvrLog.Infof("Creating DNS peer boostrapper with "+ srvrLog.Infof("Creating DNS peer bootstrapper with "+
"seeds: %v", dnsSeeds) "seeds: %v", dnsSeeds)
dnsBootStrapper, err := discovery.NewDNSSeedBootstrapper( dnsBootStrapper, err := discovery.NewDNSSeedBootstrapper(
@ -670,7 +670,7 @@ func (s *server) peerBootstrapper(numTargetPeers uint32,
// We'll start with a 15 second backoff, and double the time every time // We'll start with a 15 second backoff, and double the time every time
// an epoch fails up to a ceiling. // an epoch fails up to a ceiling.
const backOffCeliing = time.Minute * 5 const backOffCeiling = time.Minute * 5
backOff := time.Second * 15 backOff := time.Second * 15
// We'll create a new ticker to wake us up every 15 seconds so we can // We'll create a new ticker to wake us up every 15 seconds so we can
@ -712,8 +712,8 @@ func (s *server) peerBootstrapper(numTargetPeers uint32,
sampleTicker.Stop() sampleTicker.Stop()
backOff *= 2 backOff *= 2
if backOff > backOffCeliing { if backOff > backOffCeiling {
backOff = backOffCeliing backOff = backOffCeiling
} }
srvrLog.Debugf("Backing off peer bootstrapper to "+ srvrLog.Debugf("Backing off peer bootstrapper to "+

@ -30,7 +30,7 @@ func newElementFromStr(s string, index index) (*element, error) {
} }
// derive computes one shachain element from another by applying a series of // derive computes one shachain element from another by applying a series of
// bit flips and hasing operations based on the starting and ending index. // bit flips and hashing operations based on the starting and ending index.
func (e *element) derive(toIndex index) (*element, error) { func (e *element) derive(toIndex index) (*element, error) {
fromIndex := e.index fromIndex := e.index
@ -71,7 +71,7 @@ func (e *element) isEqual(e2 *element) bool {
const ( const (
// maxHeight is used to determine the maximum allowable index and the // maxHeight is used to determine the maximum allowable index and the
// length of the array required to order to derive all previous hashes // length of the array required to order to derive all previous hashes
// by index. The entries of this array as also knowns as buckets. // by index. The entries of this array as also known as buckets.
maxHeight uint8 = 48 maxHeight uint8 = 48
// rootIndex is an index which corresponds to the root hash. // rootIndex is an index which corresponds to the root hash.

@ -257,7 +257,7 @@ func TestSpecificationDeriveElement(t *testing.T) {
"but it's not", test.name) "but it's not", test.name)
} }
// Generate element which we should get after deriviation. // Generate element which we should get after derivation.
output, err := newElementFromStr(test.output, test.index) output, err := newElementFromStr(test.output, test.index)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

@ -35,7 +35,7 @@ type Store interface {
} }
// RevocationStore is a concrete implementation of the Store interface. The // RevocationStore is a concrete implementation of the Store interface. The
// revocation store is able to efficiently store N derived shahain elements in // revocation store is able to efficiently store N derived shachain elements in
// a space efficient manner with a space complexity of O(log N). The original // a space efficient manner with a space complexity of O(log N). The original
// description of the storage methodology can be found here: // description of the storage methodology can be found here:
// https://github.com/lightningnetwork/lightning-rfc/blob/master/03-transactions.md#efficient-per-commitment-secret-storage // https://github.com/lightningnetwork/lightning-rfc/blob/master/03-transactions.md#efficient-per-commitment-secret-storage
@ -142,7 +142,7 @@ func (store *RevocationStore) AddNextEntry(hash *chainhash.Hash) error {
} }
if !e.isEqual(&store.buckets[i]) { if !e.isEqual(&store.buckets[i]) {
return errors.New("hash isn't deriavable from " + return errors.New("hash isn't derivable from " +
"previous ones") "previous ones")
} }
} }

@ -7,7 +7,7 @@ import (
) )
// changeBit is a functio that function that flips a bit of the hash at a // changeBit is a functio that function that flips a bit of the hash at a
// particluar bit-index. You should be aware that the bit flipping in this // particular bit-index. You should be aware that the bit flipping in this
// function a bit strange, example: // function a bit strange, example:
// hash: [0b00000000, 0b00000000, ... 0b00000000] // hash: [0b00000000, 0b00000000, ... 0b00000000]
// 0 1 ... 31 // 0 1 ... 31
@ -64,7 +64,7 @@ func countTrailingZeros(index index) uint8 {
return zeros return zeros
} }
// hashFromString takes a hex-encoded string as input and creates an instane of // hashFromString takes a hex-encoded string as input and creates an instance of
// chainhash.Hash. The chainhash.NewHashFromStr function not suitable because // chainhash.Hash. The chainhash.NewHashFromStr function not suitable because
// it reverse the given hash. // it reverse the given hash.
func hashFromString(s string) (*chainhash.Hash, error) { func hashFromString(s string) (*chainhash.Hash, error) {

@ -14,7 +14,7 @@ import (
var interruptChannel chan os.Signal var interruptChannel chan os.Signal
// shutdownRequestChannel is used to request the daemon to shutdown gracefully, // shutdownRequestChannel is used to request the daemon to shutdown gracefully,
// similar to when receiveing SIGINT. // similar to when receiving SIGINT.
var shutdownRequestChannel = make(chan struct{}) var shutdownRequestChannel = make(chan struct{})
// addHandlerChannel is used to add an interrupt handler to the list of handlers // addHandlerChannel is used to add an interrupt handler to the list of handlers

@ -269,8 +269,8 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
alicePeer := &peer{ alicePeer := &peer{
server: s, server: s,
sendQueue: make(chan outgoinMsg, 1), sendQueue: make(chan outgoingMsg, 1),
outgoingQueue: make(chan outgoinMsg, outgoingQueueLen), outgoingQueue: make(chan outgoingMsg, outgoingQueueLen),
activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel), activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
newChannels: make(chan *newChannelMsg, 1), newChannels: make(chan *newChannelMsg, 1),

@ -374,7 +374,7 @@ func (u *utxoNursery) IncubateOutputs(chanPoint wire.OutPoint,
0, 0,
) )
// We'll skip any zero value'd outputs as this indicates we // We'll skip any zero valued outputs as this indicates we
// don't have a settled balance within the commitment // don't have a settled balance within the commitment
// transaction. // transaction.
if selfOutput.Amount() > 0 { if selfOutput.Amount() > 0 {
@ -1189,7 +1189,7 @@ func (u *utxoNursery) waitForSweepConf(classHeight uint32,
// Mark the confirmed kindergarten outputs as graduated. // Mark the confirmed kindergarten outputs as graduated.
if err := u.cfg.Store.GraduateKinder(classHeight); err != nil { if err := u.cfg.Store.GraduateKinder(classHeight); err != nil {
utxnLog.Errorf("Unable to graduate %v kingdergarten outputs: "+ utxnLog.Errorf("Unable to graduate %v kindergarten outputs: "+
"%v", len(kgtnOutputs), err) "%v", len(kgtnOutputs), err)
return return
} }
@ -1940,7 +1940,7 @@ func readTxOut(r io.Reader, txo *wire.TxOut) error {
return nil return nil
} }
// Compile-time constraint to ensure kidOutput and babyOutpt implement the // Compile-time constraint to ensure kidOutput and babyOutput implement the
// CsvSpendableOutput interface. // CsvSpendableOutput interface.
var _ CsvSpendableOutput = (*kidOutput)(nil) var _ CsvSpendableOutput = (*kidOutput)(nil)
var _ CsvSpendableOutput = (*babyOutput)(nil) var _ CsvSpendableOutput = (*babyOutput)(nil)

Some files were not shown because too many files have changed in this diff Show More