Browse Source

multi: comprehensive typo fixes across all packages

master
practicalswift 6 years ago committed by Olaoluwa Osuntokun
parent
commit
a93736d21e
  1. 42
      autopilot/prefattach_test.go
  2. 12
      breacharbiter_test.go
  3. 2
      brontide/conn.go
  4. 8
      brontide/noise_test.go
  5. 6
      chainntnfs/interface.go
  6. 16
      chainntnfs/interface_test.go
  7. 4
      chancloser.go
  8. 12
      channeldb/channel.go
  9. 12
      channeldb/channel_test.go
  10. 2
      channeldb/db_test.go
  11. 10
      channeldb/graph.go
  12. 24
      channeldb/graph_test.go
  13. 10
      channeldb/invoice_test.go
  14. 4
      channeldb/invoices.go
  15. 14
      channeldb/meta_test.go
  16. 2
      channeldb/nodes.go
  17. 8
      channeldb/nodes_test.go
  18. 6
      cmd/lncli/commands.go
  19. 2
      cmd/lncli/main.go
  20. 2
      config.go
  21. 2
      contractcourt/briefcase.go
  22. 2
      contractcourt/chain_arbitrator.go
  23. 4
      contractcourt/chain_watcher.go
  24. 18
      contractcourt/channel_arbitrator.go
  25. 8
      contractcourt/contract_resolvers.go
  26. 8
      discovery/bootstrapper.go
  27. 26
      discovery/gossiper.go
  28. 26
      discovery/gossiper_test.go
  29. 4
      discovery/utils.go
  30. 2
      docker/README.md
  31. 2
      docker/btcd/Dockerfile
  32. 4
      docker/docker-compose.yml
  33. 2
      docker/lnd/Dockerfile
  34. 2
      docker/lnd/start-lnd.sh
  35. 2
      docker/ltcd/Dockerfile
  36. 4
      docs/code_contribution_guidelines.md
  37. 2
      docs/grpc/java.md
  38. 2
      docs/grpc/python.md
  39. 4
      fundingmanager.go
  40. 4
      fundingmanager_test.go
  41. 2
      htlcswitch/circuit.go
  42. 2
      htlcswitch/iterator.go
  43. 18
      htlcswitch/link.go
  44. 22
      htlcswitch/link_test.go
  45. 4
      htlcswitch/mock.go
  46. 6
      htlcswitch/switch.go
  47. 2
      htlcswitch/switch_test.go
  48. 2
      htlcswitch/test_utils.go
  49. 2
      lnd.go
  50. 28
      lnd_test.go
  51. 2
      lnrpc/gen_protos.sh
  52. 8
      lnrpc/rpc.proto
  53. 6
      lnrpc/rpc.swagger.json
  54. 6
      lntest/harness.go
  55. 4
      lntest/node.go
  56. 2
      lnwallet/btcwallet/btcwallet.go
  57. 4
      lnwallet/btcwallet/signer.go
  58. 28
      lnwallet/channel.go
  59. 14
      lnwallet/channel_test.go
  60. 2
      lnwallet/fee_estimator.go
  61. 14
      lnwallet/interface_test.go
  62. 10
      lnwallet/script_utils.go
  63. 14
      lnwallet/script_utils_test.go
  64. 2
      lnwallet/sigpool.go
  65. 8
      lnwallet/wallet.go
  66. 4
      lnwallet/witnessgen.go
  67. 2
      lnwire/commit_sig.go
  68. 2
      lnwire/features_test.go
  69. 4
      lnwire/lnwire_test.go
  70. 12
      lnwire/message.go
  71. 2
      lnwire/netaddress.go
  72. 4
      lnwire/onion_error.go
  73. 2
      lnwire/signature.go
  74. 32
      lnwire/update_fulfill_htlc.go
  75. 2
      log.go
  76. 6
      nursery_store.go
  77. 2
      nursery_store_test.go
  78. 24
      peer.go
  79. 2
      peer_test.go
  80. 4
      routing/chainview/bitcoind.go
  81. 4
      routing/chainview/btcd.go
  82. 2
      routing/chainview/interface.go
  83. 6
      routing/chainview/interface_test.go
  84. 2
      routing/chainview/neutrino.go
  85. 2
      routing/chainview/queue.go
  86. 2
      routing/missioncontrol.go
  87. 2
      routing/notifications.go
  88. 4
      routing/notifications_test.go
  89. 18
      routing/pathfind_test.go
  90. 18
      routing/router.go
  91. 60
      routing/validation_barrier.go
  92. 44
      rpcserver.go
  93. 14
      server.go
  94. 4
      shachain/element.go
  95. 2
      shachain/element_test.go
  96. 4
      shachain/store.go
  97. 4
      shachain/utils.go
  98. 2
      signal.go
  99. 4
      test_utils.go
  100. 6
      utxonursery.go
  101. Some files were not shown because too many files have changed in this diff Show More

42
autopilot/prefattach_test.go

@ -137,11 +137,11 @@ func TestConstrainedPrefAttachmentNeedMoreChan(t *testing.T) {
},
}
prefAttatch := NewConstrainedPrefAttachment(minChanSize, maxChanSize,
prefAttach := NewConstrainedPrefAttachment(minChanSize, maxChanSize,
chanLimit, threshold)
for i, testCase := range testCases {
amtToAllocate, needMore := prefAttatch.NeedMoreChans(testCase.channels,
amtToAllocate, needMore := prefAttach.NeedMoreChans(testCase.channels,
testCase.walletAmt)
if amtToAllocate != testCase.amtAvailable {
@ -228,7 +228,7 @@ func TestConstrainedPrefAttachmentSelectEmptyGraph(t *testing.T) {
if err != nil {
t.Fatalf("unable to generate self key: %v", err)
}
prefAttatch := NewConstrainedPrefAttachment(minChanSize, maxChanSize,
prefAttach := NewConstrainedPrefAttachment(minChanSize, maxChanSize,
chanLimit, threshold)
skipNodes := make(map[NodeID]struct{})
@ -246,7 +246,7 @@ func TestConstrainedPrefAttachmentSelectEmptyGraph(t *testing.T) {
// attempt to select a set of candidates channel for
// creation given the current state of the graph.
const walletFunds = btcutil.SatoshiPerBitcoin
directives, err := prefAttatch.Select(self, graph,
directives, err := prefAttach.Select(self, graph,
walletFunds, skipNodes)
if err != nil {
t1.Fatalf("unable to select attachment "+
@ -257,7 +257,7 @@ func TestConstrainedPrefAttachmentSelectEmptyGraph(t *testing.T) {
// started with an empty graph.
if len(directives) != 0 {
t1.Fatalf("zero attachment directives "+
"should've been returned instead %v were",
"should have been returned instead %v were",
len(directives))
}
})
@ -300,7 +300,7 @@ func TestConstrainedPrefAttachmentSelectTwoVertexes(t *testing.T) {
if err != nil {
t1.Fatalf("unable to generate self key: %v", err)
}
prefAttatch := NewConstrainedPrefAttachment(minChanSize, maxChanSize,
prefAttach := NewConstrainedPrefAttachment(minChanSize, maxChanSize,
chanLimit, threshold)
// For this set, we'll load the memory graph with two
@ -315,7 +315,7 @@ func TestConstrainedPrefAttachmentSelectTwoVertexes(t *testing.T) {
// attempt to select a set of candidates channel for
// creation given the current state of the graph.
const walletFunds = btcutil.SatoshiPerBitcoin * 10
directives, err := prefAttatch.Select(self, graph,
directives, err := prefAttach.Select(self, graph,
walletFunds, skipNodes)
if err != nil {
t1.Fatalf("unable to select attachment directives: %v", err)
@ -324,7 +324,7 @@ func TestConstrainedPrefAttachmentSelectTwoVertexes(t *testing.T) {
// Two new directives should have been selected, one
// for each node already present within the graph.
if len(directives) != 2 {
t1.Fatalf("two attachment directives should've been "+
t1.Fatalf("two attachment directives should have been "+
"returned instead %v were", len(directives))
}
@ -387,14 +387,14 @@ func TestConstrainedPrefAttachmentSelectInsufficientFunds(t *testing.T) {
if err != nil {
t1.Fatalf("unable to generate self key: %v", err)
}
prefAttatch := NewConstrainedPrefAttachment(
prefAttach := NewConstrainedPrefAttachment(
minChanSize, maxChanSize, chanLimit, threshold,
)
// Next, we'll attempt to select a set of candidates,
// passing zero for the amount of wallet funds. This
// should return an empty slice of directives.
directives, err := prefAttatch.Select(self, graph, 0,
directives, err := prefAttach.Select(self, graph, 0,
skipNodes)
if err != nil {
t1.Fatalf("unable to select attachment "+
@ -402,7 +402,7 @@ func TestConstrainedPrefAttachmentSelectInsufficientFunds(t *testing.T) {
}
if len(directives) != 0 {
t1.Fatalf("zero attachment directives "+
"should've been returned instead %v were",
"should have been returned instead %v were",
len(directives))
}
})
@ -446,21 +446,21 @@ func TestConstrainedPrefAttachmentSelectGreedyAllocation(t *testing.T) {
if err != nil {
t1.Fatalf("unable to generate self key: %v", err)
}
prefAttatch := NewConstrainedPrefAttachment(
prefAttach := NewConstrainedPrefAttachment(
minChanSize, maxChanSize, chanLimit, threshold,
)
const chanCapcity = btcutil.SatoshiPerBitcoin
const chanCapacity = btcutil.SatoshiPerBitcoin
// Next, we'll add 3 nodes to the graph, creating an
// "open triangle topology".
edge1, _, err := graph.addRandChannel(nil, nil,
chanCapcity)
chanCapacity)
if err != nil {
t1.Fatalf("unable to create channel: %v", err)
}
_, _, err = graph.addRandChannel(
edge1.Peer.PubKey(), nil, chanCapcity,
edge1.Peer.PubKey(), nil, chanCapacity,
)
if err != nil {
t1.Fatalf("unable to create channel: %v", err)
@ -502,7 +502,7 @@ func TestConstrainedPrefAttachmentSelectGreedyAllocation(t *testing.T) {
// result, the heuristic should try to greedily
// allocate funds to channels.
const availableBalance = btcutil.SatoshiPerBitcoin * 2.5
directives, err := prefAttatch.Select(self, graph,
directives, err := prefAttach.Select(self, graph,
availableBalance, skipNodes)
if err != nil {
t1.Fatalf("unable to select attachment "+
@ -576,15 +576,15 @@ func TestConstrainedPrefAttachmentSelectSkipNodes(t *testing.T) {
if err != nil {
t1.Fatalf("unable to generate self key: %v", err)
}
prefAttatch := NewConstrainedPrefAttachment(
prefAttach := NewConstrainedPrefAttachment(
minChanSize, maxChanSize, chanLimit, threshold,
)
// Next, we'll create a simple topology of two nodes,
// with a single channel connecting them.
const chanCapcity = btcutil.SatoshiPerBitcoin
const chanCapacity = btcutil.SatoshiPerBitcoin
_, _, err = graph.addRandChannel(nil, nil,
chanCapcity)
chanCapacity)
if err != nil {
t1.Fatalf("unable to create channel: %v", err)
}
@ -593,7 +593,7 @@ func TestConstrainedPrefAttachmentSelectSkipNodes(t *testing.T) {
// function to recommend potential attachment
// candidates.
const availableBalance = btcutil.SatoshiPerBitcoin * 2.5
directives, err := prefAttatch.Select(self, graph,
directives, err := prefAttach.Select(self, graph,
availableBalance, skipNodes)
if err != nil {
t1.Fatalf("unable to select attachment "+
@ -617,7 +617,7 @@ func TestConstrainedPrefAttachmentSelectSkipNodes(t *testing.T) {
// without providing any new information, then we
// should get no new directives as both nodes has
// already been attached to.
directives, err = prefAttatch.Select(self, graph,
directives, err = prefAttach.Select(self, graph,
availableBalance, skipNodes)
if err != nil {
t1.Fatalf("unable to select attachment "+

12
breacharbiter_test.go

@ -639,7 +639,7 @@ func TestChannelDBRetributionStore(t *testing.T) {
restartDb := func() RetributionStore {
// Close and reopen channeldb
if err = db.Close(); err != nil {
t.Fatalf("unalbe to close channeldb during restart: %v",
t.Fatalf("unable to close channeldb during restart: %v",
err)
}
db, err = channeldb.Open(db.Path())
@ -688,7 +688,7 @@ func countRetributions(t *testing.T, rs RetributionStore) int {
// removes each one individually. Between each addition or removal, the number
// of elements in the store is checked to ensure that it only changes by one.
func testRetributionStoreAddRemove(frs FailingRetributionStore, t *testing.T) {
// Make sure that a new retribution store is actually emtpy.
// Make sure that a new retribution store is actually empty.
if count := countRetributions(t, frs); count != 0 {
t.Fatalf("expected 0 retributions, found %v", count)
}
@ -704,7 +704,7 @@ func testRetributionStoreAddRemove(frs FailingRetributionStore, t *testing.T) {
// testRetributionStoreAddRemove, except that it also restarts the store between
// each operation to ensure that the results are properly persisted.
func testRetributionStorePersistence(frs FailingRetributionStore, t *testing.T) {
// Make sure that a new retribution store is still emtpy after failing
// Make sure that a new retribution store is still empty after failing
// right off the bat.
frs.Restart()
if count := countRetributions(t, frs); count != 0 {
@ -870,9 +870,9 @@ func testRetributionStoreForAll(
var isRestart bool
restartCheck:
// Construct a set of all channel points presented by the store. Entires
// Construct a set of all channel points presented by the store. Entries
// are only be added to the set if their corresponding retribution
// infromation matches the test vector.
// information matches the test vector.
var foundSet = make(map[wire.OutPoint]struct{})
// Iterate through the stored retributions, checking to see if we have
@ -897,7 +897,7 @@ restartCheck:
foundSet[ret.chanPoint] = struct{}{}
} else {
return fmt.Errorf("unkwown retribution retrieved "+
return fmt.Errorf("unknown retribution retrieved "+
"from db: %v", ret)
}

2
brontide/conn.go

@ -112,7 +112,7 @@ func (c *Conn) Read(b []byte) (n int, err error) {
// In order to reconcile the differences between the record abstraction
// of our AEAD connection, and the stream abstraction of TCP, we
// maintain an intermediate read buffer. If this buffer becomes
// depleated, then we read the next record, and feed it into the
// depleted, then we read the next record, and feed it into the
// buffer. Otherwise, we read directly from the buffer.
if c.readBuf.Len() == 0 {
plaintext, err := c.noise.ReadMessage(c.conn)

8
brontide/noise_test.go

@ -140,7 +140,7 @@ func TestMaxPayloadLength(t *testing.T) {
b := Machine{}
b.split()
// Create a payload that's juust over the maximum allotted payload
// Create a payload that's only *slightly* above the maximum allotted payload
// length.
payloadToReject := make([]byte, math.MaxUint16+1)
@ -162,7 +162,7 @@ func TestMaxPayloadLength(t *testing.T) {
"accepted")
}
// Generate a final payload which is juuust over the max payload length
// Generate a final payload which is only *slightly* above the max payload length
// when the MAC is accounted for.
payloadToReject = make([]byte, math.MaxUint16+1)
@ -190,7 +190,7 @@ func TestWriteMessageChunking(t *testing.T) {
// Launch a new goroutine to write the large message generated above in
// chunks. We spawn a new goroutine because otherwise, we may block as
// the kernal waits for the buffer to flush.
// the kernel waits for the buffer to flush.
var wg sync.WaitGroup
wg.Add(1)
go func() {
@ -364,7 +364,7 @@ func TestBolt0008TestVectors(t *testing.T) {
recvKey, err := hex.DecodeString("bb9020b8965f4df047e07f955f3c4b884" +
"18984aadc5cdb35096b9ea8fa5c3442")
if err != nil {
t.Fatalf("unable to parse recv'ing key: %v", err)
t.Fatalf("unable to parse receiving key: %v", err)
}
chainKey, err := hex.DecodeString("919219dbb2920afa8db80f9a51787a840" +

6
chainntnfs/interface.go

@ -36,12 +36,12 @@ type ChainNotifier interface {
heightHint uint32) (*ConfirmationEvent, error)
// RegisterSpendNtfn registers an intent to be notified once the target
// outpoint is succesfully spent within a confirmed transaction. The
// outpoint is successfully spent within a confirmed transaction. The
// returned SpendEvent will receive a send on the 'Spend' transaction
// once a transaction spending the input is detected on the blockchain.
// The heightHint parameter is provided as a convenience to light
// clients. The heightHint denotes the earliest height in the blockchain
// in which the target output could've been created.
// in which the target output could have been created.
//
// NOTE: This notifications should be triggered once the transaction is
// *seen* on the network, not when it has received a single confirmation.
@ -177,7 +177,7 @@ type NotifierDriver struct {
// New creates a new instance of a concrete ChainNotifier
// implementation given a variadic set up arguments. The function takes
// a varidaic number of interface parameters in order to provide
// a variadic number of interface parameters in order to provide
// initialization flexibility, thereby accommodating several potential
// ChainNotifier implementations.
New func(args ...interface{}) (ChainNotifier, error)

16
chainntnfs/interface_test.go

@ -112,7 +112,7 @@ func testSingleConfirmationNotification(miner *rpctest.Harness,
// We'd like to test the case of being notified once a txid reaches
// a *single* confirmation.
//
// So first, let's send some coins to "ourself", obtainig a txid.
// So first, let's send some coins to "ourself", obtaining a txid.
// We're spending from a coinbase output here, so we use the dedicated
// function.
@ -226,7 +226,7 @@ func testMultiConfirmationNotification(miner *rpctest.Harness,
func testBatchConfirmationNotification(miner *rpctest.Harness,
notifier chainntnfs.ChainNotifier, t *testing.T) {
// We'd like to test a case of serving notifiations to multiple
// We'd like to test a case of serving notifications to multiple
// clients, each requesting to be notified once a txid receives
// various numbers of confirmations.
confSpread := [6]uint32{1, 2, 3, 6, 20, 22}
@ -887,7 +887,7 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness,
// Broadcast our spending transaction.
spenderSha, err := miner.Node.SendRawTransaction(spendingTx, true)
if err != nil {
t.Fatalf("unable to brodacst tx: %v", err)
t.Fatalf("unable to broadcast tx: %v", err)
}
err = waitForMempoolTx(miner, spenderSha)
@ -928,7 +928,7 @@ func testSpendBeforeNtfnRegistration(miner *rpctest.Harness,
ntfn.SpentOutPoint, outpoint)
}
if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) {
t.Fatalf("ntfn includes wrong spender tx sha, reports %v intead of %v",
t.Fatalf("ntfn includes wrong spender tx sha, reports %v instead of %v",
ntfn.SpenderTxHash[:], spenderSha[:])
}
if ntfn.SpenderInputIndex != 0 {
@ -980,7 +980,7 @@ func testCancelSpendNtfn(node *rpctest.Harness,
// Broadcast our spending transaction.
spenderSha, err := node.Node.SendRawTransaction(spendingTx, true)
if err != nil {
t.Fatalf("unable to brodacst tx: %v", err)
t.Fatalf("unable to broadcast tx: %v", err)
}
err = waitForMempoolTx(node, spenderSha)
@ -1007,7 +1007,7 @@ func testCancelSpendNtfn(node *rpctest.Harness,
}
if !bytes.Equal(ntfn.SpenderTxHash[:], spenderSha[:]) {
t.Fatalf("ntfn includes wrong spender tx sha, "+
"reports %v intead of %v",
"reports %v instead of %v",
ntfn.SpenderTxHash[:], spenderSha[:])
}
if ntfn.SpenderInputIndex != 0 {
@ -1063,7 +1063,7 @@ func testCancelEpochNtfn(node *rpctest.Harness, notifier chainntnfs.ChainNotifie
select {
case _, ok := <-epochClients[0].Epochs:
if ok {
t.Fatalf("epoch notification should've been cancelled")
t.Fatalf("epoch notification should have been cancelled")
}
case <-time.After(2 * time.Second):
t.Fatalf("epoch notification not sent")
@ -1303,7 +1303,7 @@ var ntfnTests = []testCase{
func TestInterfaces(t *testing.T) {
// Initialize the harness around a btcd node which will serve as our
// dedicated miner to generate blocks, cause re-orgs, etc. We'll set up
// this node with a chain length of 125, so we have plentyyy of BTC to
// this node with a chain length of 125, so we have plenty of BTC to
// play around with.
miner, err := rpctest.New(netParams, nil, nil)
if err != nil {

4
chancloser.go

@ -83,7 +83,7 @@ type chanCloseCfg struct {
broadcastTx func(*wire.MsgTx) error
// quit is a channel that should be sent upon in the occasion the state
// machine shouldk cease all progress and shutdown.
// machine should cease all progress and shutdown.
quit chan struct{}
}
@ -247,7 +247,7 @@ func (c *channelCloser) ShutdownChan() (*lnwire.Shutdown, error) {
// ClosingTx returns the fully signed, final closing transaction.
//
// NOTE: THis transaction is only available if the state machine is in the
// NOTE: This transaction is only available if the state machine is in the
// closeFinished state.
func (c *channelCloser) ClosingTx() (*wire.MsgTx, error) {
// If the state machine hasn't finished closing the channel then we'll

12
channeldb/channel.go

@ -265,7 +265,7 @@ type ChannelCommitment struct {
Htlcs []HTLC
// TODO(roasbeef): pending commit pointer?
// * lets just walk thru
// * lets just walk through
}
// OpenChannel encapsulates the persistent and dynamic state of an open channel
@ -399,7 +399,7 @@ func (c *OpenChannel) FullSync() error {
return c.Db.Update(c.fullSync)
}
// updateChanBucket is a helper function that returns a writeable bucket that a
// updateChanBucket is a helper function that returns a writable bucket that a
// channel's data resides in given: the public key for the node, the outpoint,
// and the chainhash that the channel resides on.
func updateChanBucket(tx *bolt.Tx, nodeKey *btcec.PublicKey,
@ -474,7 +474,7 @@ func readChanBucket(tx *bolt.Tx, nodeKey *btcec.PublicKey,
}
// With the bucket for the node fetched, we can now go down another
// level, for this channel iteslf.
// level, for this channel itself.
var chanPointBuf bytes.Buffer
chanPointBuf.Grow(outPointSize)
if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
@ -924,7 +924,7 @@ func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error {
defer c.Unlock()
return c.Db.Update(func(tx *bolt.Tx) error {
// First, we'll grab the writeable bucket where this channel's
// First, we'll grab the writable bucket where this channel's
// data resides.
chanBucket, err := updateChanBucket(tx, c.IdentityPub,
&c.FundingOutpoint, c.ChainHash)
@ -1099,7 +1099,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
c.RLock()
defer c.RUnlock()
// If we haven't created any state updates yet, then we'll exit erly as
// If we haven't created any state updates yet, then we'll exit early as
// there's nothing to be found on disk in the revocation bucket.
if c.RemoteCommitment.CommitHeight == 0 {
return nil, nil
@ -1121,7 +1121,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
// Once we have the bucket that stores the revocation log from
// this channel, we'll jump to the _last_ key in bucket. As we
// store the update number on disk in a big-endian format,
// this'll retrieve the latest entry.
// this will retrieve the latest entry.
cursor := logBucket.Cursor()
_, tailLogEntry := cursor.Last()
logEntryReader := bytes.NewReader(tailLogEntry)

12
channeldb/channel_test.go

@ -490,7 +490,7 @@ func TestChannelStateTransition(t *testing.T) {
t.Fatalf("unable to append to revocation log: %v", err)
}
// At this point, the remote commit chain shuold be nil, and the posted
// At this point, the remote commit chain should be nil, and the posted
// remote commitment should match the one we added as a diff above.
if _, err := channel.RemoteCommitChainTip(); err != ErrNoPendingCommit {
t.Fatalf("expected ErrNoPendingCommit, instead got %v", err)
@ -591,7 +591,7 @@ func TestChannelStateTransition(t *testing.T) {
// revocation log has been deleted.
_, err = updatedChannel[0].FindPreviousState(oldRemoteCommit.CommitHeight)
if err == nil {
t.Fatal("revocation log search should've failed")
t.Fatal("revocation log search should have failed")
}
}
@ -600,7 +600,7 @@ func TestFetchPendingChannels(t *testing.T) {
cdb, cleanUp, err := makeTestDB()
if err != nil {
t.Fatalf("uanble to make test database: %v", err)
t.Fatalf("unable to make test database: %v", err)
}
defer cleanUp()
@ -630,7 +630,7 @@ func TestFetchPendingChannels(t *testing.T) {
"got %v", 1, len(pendingChannels))
}
// The broadcast height of the pending channel should've been set
// The broadcast height of the pending channel should have been set
// properly.
if pendingChannels[0].FundingBroadcastHeight != broadcastHeight {
t.Fatalf("broadcast height mismatch: expected %v, got %v",
@ -736,7 +736,7 @@ func TestFetchClosedChannels(t *testing.T) {
// channels only, or not.
pendingClosed, err := cdb.FetchClosedChannels(true)
if err != nil {
t.Fatalf("failed fetcing closed channels: %v", err)
t.Fatalf("failed fetching closed channels: %v", err)
}
if len(pendingClosed) != 1 {
t.Fatalf("incorrect number of pending closed channels: expecting %v,"+
@ -769,7 +769,7 @@ func TestFetchClosedChannels(t *testing.T) {
// be retrieved when fetching all the closed channels.
closed, err = cdb.FetchClosedChannels(false)
if err != nil {
t.Fatalf("failed fetcing closed channels: %v", err)
t.Fatalf("failed fetching closed channels: %v", err)
}
if len(closed) != 1 {
t.Fatalf("incorrect number of closed channels: expecting %v, "+

2
channeldb/db_test.go

@ -28,7 +28,7 @@ func TestOpenWithCreate(t *testing.T) {
t.Fatalf("unable to close channeldb: %v", err)
}
// The path should have been succesfully created.
// The path should have been successfully created.
if !fileExists(dbPath) {
t.Fatalf("channeldb failed to create data directory")
}

10
channeldb/graph.go

@ -180,7 +180,7 @@ func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePoli
// The targeted edge may have not been advertised
// within the network, so we ensure it's non-nil before
// deferencing its attributes.
// dereferencing its attributes.
if edge1 != nil {
edge1.db = c.db
if edge1.Node != nil {
@ -199,7 +199,7 @@ func (c *ChannelGraph) ForEachChannel(cb func(*ChannelEdgeInfo, *ChannelEdgePoli
// The targeted edge may have not been advertised
// within the network, so we ensure it's non-nil before
// deferencing its attributes.
// dereferencing its attributes.
if edge2 != nil {
edge2.db = c.db
if edge2.Node != nil {
@ -499,7 +499,7 @@ func (c *ChannelGraph) HasChannelEdge(chanID uint64) (time.Time, time.Time, bool
// If the channel has been found in the graph, then retrieve
// the edges itself so we can return the last updated
// timestmaps.
// timestamps.
nodes := tx.Bucket(nodeBucket)
if nodes == nil {
return ErrGraphNodeNotFound
@ -595,7 +595,7 @@ func (c *ChannelGraph) PruneGraph(spentOutputs []*wire.OutPoint,
return err
}
// For each of the outpoints that've been spent within the
// For each of the outpoints that have been spent within the
// block, we attempt to delete them from the graph as if that
// outpoint was a channel, then it has now been closed.
for _, chanPoint := range spentOutputs {
@ -1271,7 +1271,7 @@ type ChannelEdgeInfo struct {
// ChannelAuthProof is the authentication proof (the signature portion) for a
// channel. Using the four signatures contained in the struct, and some
// axillary knowledge (the funding script, node identities, and outpoint) nodes
// auxillary knowledge (the funding script, node identities, and outpoint) nodes
// on the network are able to validate the authenticity and existence of a
// channel. Each of these signatures signs the following digest: chanID ||
// nodeID1 || nodeID2 || bitcoinKey1|| bitcoinKey2 || 2-byte-feature-len ||

24
channeldb/graph_test.go

@ -120,7 +120,7 @@ func TestNodeInsertionAndDeletion(t *testing.T) {
}
// Finally, attempt to fetch the node again. This should fail as the
// node should've been deleted from the database.
// node should have been deleted from the database.
_, err = graph.FetchLightningNode(testPub)
if err != ErrGraphNodeNotFound {
t.Fatalf("fetch after delete should fail!")
@ -185,7 +185,7 @@ func TestPartialNode(t *testing.T) {
}
// Finally, attempt to fetch the node again. This should fail as the
// node should've been deleted from the database.
// node should have been deleted from the database.
_, err = graph.FetchLightningNode(testPub)
if err != ErrGraphNodeNotFound {
t.Fatalf("fetch after delete should fail!")
@ -355,9 +355,9 @@ func TestEdgeInsertionDeletion(t *testing.T) {
}
}
// TestDisconnecteBlockAtHeight checks that the pruned state of the channel
// TestDisconnectBlockAtHeight checks that the pruned state of the channel
// database is what we expect after calling DisconnectBlockAtHeight.
func TestDisconnecteBlockAtHeight(t *testing.T) {
func TestDisconnectBlockAtHeight(t *testing.T) {
t.Parallel()
db, cleanUp, err := makeTestDB()
@ -927,7 +927,7 @@ func assertPruneTip(t *testing.T, graph *ChannelGraph, blockHash *chainhash.Hash
}
}
func asserNumChans(t *testing.T, graph *ChannelGraph, n int) {
func assertNumChans(t *testing.T, graph *ChannelGraph, n int) {
numChans := 0
if err := graph.ForEachChannel(func(*ChannelEdgeInfo, *ChannelEdgePolicy,
*ChannelEdgePolicy) error {
@ -948,7 +948,7 @@ func asserNumChans(t *testing.T, graph *ChannelGraph, n int) {
func assertChanViewEqual(t *testing.T, a []wire.OutPoint, b []*wire.OutPoint) {
if len(a) != len(b) {
_, _, line, _ := runtime.Caller(1)
t.Fatalf("line %v: chan views dont match", line)
t.Fatalf("line %v: chan views don't match", line)
}
chanViewSet := make(map[wire.OutPoint]struct{})
@ -1080,7 +1080,7 @@ func TestGraphPruning(t *testing.T) {
// Count up the number of channels known within the graph, only 2
// should be remaining.
asserNumChans(t, graph, 2)
assertNumChans(t, graph, 2)
// Those channels should also be missing from the channel view.
channelView, err = graph.ChannelView()
@ -1104,14 +1104,14 @@ func TestGraphPruning(t *testing.T) {
t.Fatalf("unable to prune graph: %v", err)
}
// No channels should've been detected as pruned.
// No channels should have been detected as pruned.
if len(prunedChans) != 0 {
t.Fatalf("channels were pruned but shouldn't have been")
}
// Once again, the prune tip should've been updated.
// Once again, the prune tip should have been updated.
assertPruneTip(t, graph, &blockHash, blockHeight)
asserNumChans(t, graph, 2)
assertNumChans(t, graph, 2)
// Finally, create a block that prunes the remainder of the channels
// from the graph.
@ -1123,7 +1123,7 @@ func TestGraphPruning(t *testing.T) {
t.Fatalf("unable to prune graph: %v", err)
}
// The remainder of the channels should've been pruned from the graph.
// The remainder of the channels should have been pruned from the graph.
if len(prunedChans) != 2 {
t.Fatalf("incorrect number of channels pruned: expected %v, got %v",
2, len(prunedChans))
@ -1132,7 +1132,7 @@ func TestGraphPruning(t *testing.T) {
// The prune tip should be updated, and no channels should be found
// within the current graph.
assertPruneTip(t, graph, &blockHash, blockHeight)
asserNumChans(t, graph, 0)
assertNumChans(t, graph, 0)
// Finally, the channel view at this point in the graph should now be
// completely empty.

10
channeldb/invoice_test.go

@ -27,7 +27,7 @@ func randInvoice(value lnwire.MilliSatoshi) (*Invoice, error) {
},
}
i.Memo = []byte("memo")
i.Receipt = []byte("recipt")
i.Receipt = []byte("receipt")
// Create a random byte slice of MaxPaymentRequestSize bytes to be used
// as a dummy paymentrequest, and determine if it should be set based
@ -62,12 +62,12 @@ func TestInvoiceWorkflow(t *testing.T) {
CreationDate: time.Unix(time.Now().Unix(), 0),
}
fakeInvoice.Memo = []byte("memo")
fakeInvoice.Receipt = []byte("recipt")
fakeInvoice.Receipt = []byte("receipt")
fakeInvoice.PaymentRequest = []byte("")
copy(fakeInvoice.Terms.PaymentPreimage[:], rev[:])
fakeInvoice.Terms.Value = lnwire.NewMSatFromSatoshis(10000)
// Add the invoice to the database, this should suceed as there aren't
// Add the invoice to the database, this should succeed as there aren't
// any existing invoices within the database with the same payment
// hash.
if err := db.AddInvoice(fakeInvoice); err != nil {
@ -144,12 +144,12 @@ func TestInvoiceWorkflow(t *testing.T) {
}
// The retrieve list of invoices should be identical as since we're
// using big endian, the invoices should be retrieved in asecending
// using big endian, the invoices should be retrieved in ascending
// order (and the primary key should be incremented with each
// insertion).
for i := 0; i < len(invoices)-1; i++ {
if !reflect.DeepEqual(invoices[i], dbInvoices[i]) {
t.Fatalf("retrived invoices don't match %v vs %v",
t.Fatalf("retrieved invoices don't match %v vs %v",
spew.Sdump(invoices[i]),
spew.Sdump(dbInvoices[i]))
}

4
channeldb/invoices.go

@ -61,7 +61,7 @@ type ContractTerm struct {
// extended.
PaymentPreimage [32]byte
// Value is the expected amount of milli-satoshis to be payed to an
// Value is the expected amount of milli-satoshis to be paid to an
// HTLC which can be satisfied by the above preimage.
Value lnwire.MilliSatoshi
@ -301,7 +301,7 @@ func putInvoice(invoices *bolt.Bucket, invoiceIndex *bolt.Bucket,
return err
}
// Add the payment hash to the invoice index. This'll let us quickly
// Add the payment hash to the invoice index. This will let us quickly
// identify if we can settle an incoming payment, and also to possibly
// allow a single invoice to have multiple payment installations.
paymentHash := sha256.Sum256(i.Terms.PaymentPreimage[:])

14
channeldb/meta_test.go

@ -192,7 +192,7 @@ func TestMigrationWithPanic(t *testing.T) {
})
}
// Create migration function which changes the initialy created data and
// Create migration function which changes the initially created data and
// throw the panic, in this case we pretending that something goes.
migrationWithPanic := func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
@ -212,7 +212,7 @@ func TestMigrationWithPanic(t *testing.T) {
}
if meta.DbVersionNumber != 0 {
t.Fatal("migration paniced but version is changed")
t.Fatal("migration panicked but version is changed")
}
err = d.Update(func(tx *bolt.Tx) error {
@ -261,8 +261,8 @@ func TestMigrationWithFatal(t *testing.T) {
})
}
// Create migration function which changes the initialy created data and
// return the error, in this case we pretending that somthing goes
// Create migration function which changes the initially created data and
// return the error, in this case we pretending that something goes
// wrong.
migrationWithFatal := func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
@ -332,7 +332,7 @@ func TestMigrationWithoutErrors(t *testing.T) {
})
}
// Create migration function which changes the initialy created data.
// Create migration function which changes the initially created data.
migrationWithoutErrors := func(tx *bolt.Tx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix)
if err != nil {
@ -352,7 +352,7 @@ func TestMigrationWithoutErrors(t *testing.T) {
if meta.DbVersionNumber != 1 {
t.Fatal("version number isn't changed after " +
"succesfully aplied migration")
"successfully applied migration")
}
err = d.Update(func(tx *bolt.Tx) error {
@ -363,7 +363,7 @@ func TestMigrationWithoutErrors(t *testing.T) {
value := bucket.Get(keyPrefix)
if !bytes.Equal(value, afterMigration) {
return errors.New("migration wasn't applyied " +
return errors.New("migration wasn't applied " +
"properly")
}

2
channeldb/nodes.go

@ -23,7 +23,7 @@ var (
// channel open with. Information such as the Bitcoin network the node
// advertised, and its identity public key are also stored. Additionally, this
// struct and the bucket its stored within have store data similar to that of
// Bitcion's addrmanager. The TCP address information stored within the struct
// Bitcoin's addrmanager. The TCP address information stored within the struct
// can be used to establish persistent connections will all channel
// counterparties on daemon startup.
//

8
channeldb/nodes_test.go

@ -15,7 +15,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) {
cdb, cleanUp, err := makeTestDB()
if err != nil {
t.Fatalf("uanble to make test database: %v", err)
t.Fatalf("unable to make test database: %v", err)
}
defer cleanUp()
@ -72,7 +72,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) {
}
}
// Next, we'll excercise the methods to append additionall IP
// Next, we'll exercise the methods to append additional IP
// addresses, and also to update the last seen time.
if err := node1.UpdateLastSeen(time.Now()); err != nil {
t.Fatalf("unable to update last seen: %v", err)
@ -81,7 +81,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) {
t.Fatalf("unable to update addr: %v", err)
}
// Fetch the same node from the databse according to its public key.
// Fetch the same node from the database according to its public key.
node1DB, err := cdb.FetchLinkNode(pub1)
if err != nil {
t.Fatalf("unable to find node: %v", err)
@ -94,7 +94,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) {
node1.LastSeen.Unix(), node1DB.LastSeen.Unix())
}
if len(node1DB.Addresses) != 2 {
t.Fatalf("wrong length for node1 addrsses: expected %v, got %v",
t.Fatalf("wrong length for node1 addresses: expected %v, got %v",
2, len(node1DB.Addresses))
}
if node1DB.Addresses[0].String() != addr1.String() {

6
cmd/lncli/commands.go

@ -647,7 +647,7 @@ func closeChannel(ctx *cli.Context) error {
err error
)
// Show command help if no arguments provieded
// Show command help if no arguments provided
if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
cli.ShowCommandHelp(ctx, "closechannel")
return nil
@ -1020,7 +1020,7 @@ var sendPaymentCommand = cli.Command{
}
func sendPayment(ctx *cli.Context) error {
// Show command help if no arguments provieded
// Show command help if no arguments provided
if ctx.NArg() == 0 && ctx.NumFlags() == 0 {
cli.ShowCommandHelp(ctx, "sendpayment")
return nil
@ -1836,7 +1836,7 @@ func debugLevel(ctx *cli.Context) error {
return nil
}
var decodePayReqComamnd = cli.Command{
var decodePayReqCommand = cli.Command{
Name: "decodepayreq",
Usage: "Decode a payment request.",
Description: "Decode the passed payment request revealing the destination, payment hash and value of the payment request",

2
cmd/lncli/main.go

@ -188,7 +188,7 @@ func main() {
queryRoutesCommand,
getNetworkInfoCommand,
debugLevelCommand,
decodePayReqComamnd,
decodePayReqCommand,
listChainTxnsCommand,
stopCommand,
signMessageCommand,

2
config.go

@ -628,7 +628,7 @@ func parseAndSetDebugLevels(debugLevel string) error {
// Validate subsystem.
if _, exists := subsystemLoggers[subsysID]; !exists {
str := "The specified subsystem [%v] is invalid -- " +
"supported subsytems %v"
"supported subsystems %v"
return fmt.Errorf(str, subsysID, supportedSubsystems())
}

2
contractcourt/briefcase.go

@ -160,7 +160,7 @@ func (a ArbitratorState) String() string {
}
// resolverType is an enum that enumerates the various types of resolvers. When
// writing resolvers to disk, we prepend this to the raw bytes stroed. This
// writing resolvers to disk, we prepend this to the raw bytes stored. This
// allows us to properly decode the resolver into the proper type.
type resolverType uint8

2
contractcourt/chain_arbitrator.go

@ -90,7 +90,7 @@ type ChainArbitratorConfig struct {
// both to the utxo nursery. Once this function returns, the nursery
// should have safely persisted the outputs to disk, and should start
// the process of incubation. This is used when a resolver wishes to
// pass off the output to the nursery as we're inly waiting on an
// pass off the output to the nursery as we're only waiting on an
// absolute/relative item block.
IncubateOutputs func(wire.OutPoint, *lnwallet.CommitOutputResolution,
*lnwallet.OutgoingHtlcResolution,

4
contractcourt/chain_watcher.go

@ -41,7 +41,7 @@ type ChainEventSubscription struct {
// material required to bring the cheating channel peer to justice.
ContractBreach chan *lnwallet.BreachRetribution
// ProcessACK is a channel that'll be used by the chainWatcher to
// ProcessACK is a channel that will be used by the chainWatcher to
// synchronize dispatch and processing of the notification with the act
// of updating the state of the channel on disk. This ensures that the
// event can be reliably handed off.
@ -73,7 +73,7 @@ type chainWatcher struct {
// database to ensure that we act using the most up to date state.
chanState *channeldb.OpenChannel
// stateHintObfuscator is a 48-bit state hint that's used to obfsucate
// stateHintObfuscator is a 48-bit state hint that's used to obfuscate
// the current state number on the commitment transactions.
stateHintObfuscator [lnwallet.StateHintSize]byte

18
contractcourt/channel_arbitrator.go

@ -24,21 +24,21 @@ const (
broadcastRedeemMultiplier = 2
)
// WitnessSubcription represents an intent to be notified once new witnesses
// WitnessSubscription represents an intent to be notified once new witnesses
// are discovered by various active contract resolvers. A contract resolver may
// use this to be notified of when it can satisfy an incoming contract after we
// discover the witness for an outgoing contract.
type WitnessSubcription struct {
type WitnessSubscription struct {
// WitnessUpdates is a channel that newly discovered witnesses will be
// sent over.
//
// TODO(roasbef): couple with WitnessType?
// TODO(roasbeef): couple with WitnessType?
WitnessUpdates <-chan []byte
// CancelSubcription is a function closure that should be used by a
// CancelSubscription is a function closure that should be used by a
// client to cancel the subscription once they are no longer interested
// in receiving new updates.
CancelSubcription func()
CancelSubscription func()
}
// WitnessBeacon is a global beacon of witnesses. Contract resolvers will use
@ -49,9 +49,9 @@ type WitnessSubcription struct {
// TODO(roasbeef): need to delete the pre-images once we've used them
// and have been sufficiently confirmed?
type WitnessBeacon interface {
// SubcribeUpdates returns a channel that will be sent upon *each* time
// SubscribeUpdates returns a channel that will be sent upon *each* time
// a new preimage is discovered.
SubcribeUpdates() *WitnessSubcription
SubscribeUpdates() *WitnessSubscription
// LookupPreImage attempts to lookup a preimage in the global cache.
// True is returned for the second argument if the preimage is found.
@ -254,7 +254,7 @@ func (c *ChannelArbitrator) Start() error {
return err
}
// If we start and ended at the awiting full resolution state, then
// If we start and ended at the awaiting full resolution state, then
// we'll relaunch our set of unresolved contracts.
if startingState == StateWaitingFullResolution &&
nextState == StateWaitingFullResolution {
@ -1144,7 +1144,7 @@ func (c *ChannelArbitrator) prepContractResolutions(htlcActions ChainActionMap,
return htlcResolvers, msgsToSend, nil
}
// resolveContract is a goroutien tasked with fully resolving an unresolved
// resolveContract is a goroutine tasked with fully resolving an unresolved
// contract. Either the initial contract will be resolved after a single step,
// or the contract will itself create another contract to be resolved. In
// either case, one the contract has been fully resolved, we'll signal back to

8
contractcourt/contract_resolvers.go

@ -69,7 +69,7 @@ type ContractResolver interface {
// given ContractResolver implementation. It contains all the items that a
// resolver requires to carry out its duties.
type ResolverKit struct {
// ChannelArbiratorConfig contains all the interfaces and closures
// ChannelArbitratorConfig contains all the interfaces and closures
// required for the resolver to interact with outside sub-systems.
ChannelArbitratorConfig
@ -960,7 +960,7 @@ var _ ContractResolver = (*htlcOutgoingContestResolver)(nil)
// it hasn't expired. In this case, we can resolve the HTLC if we learn of the
// preimage, otherwise the remote party will sweep it after it expires.
//
// TODO(roabseef): just embed the other resolver?
// TODO(roasbeef): just embed the other resolver?
type htlcIncomingContestResolver struct {
// htlcExpiry is the absolute expiry of this incoming HTLC. We use this
// value to determine if we can exit early as if the HTLC times out,
@ -1055,13 +1055,13 @@ func (h *htlcIncomingContestResolver) Resolve() (ContractResolver, error) {
// If the HTLC hasn't expired yet, then we may still be able to claim
// it if we learn of the pre-image, so we'll wait and see if it pops
// up, or the HTLC times out.
preimageSubscription := h.PreimageDB.SubcribeUpdates()
preimageSubscription := h.PreimageDB.SubscribeUpdates()
blockEpochs, err := h.Notifier.RegisterBlockEpochNtfn()
if err != nil {
return nil, err
}
defer func() {
preimageSubscription.CancelSubcription()
preimageSubscription.CancelSubscription()
blockEpochs.Cancel()
}()
for {

8
discovery/bootstrapper.go

@ -18,7 +18,7 @@ import (
)
// NetworkPeerBootstrapper is an interface that represents an initial peer
// boostrap mechanism. This interface is to be used to bootstrap a new peer to
// bootstrap mechanism. This interface is to be used to bootstrap a new peer to
// the connection by providing it with the pubkey+address of a set of existing
// peers on the network. Several bootstrap mechanisms can be implemented such
// as DNS, in channel graph, DHT's, etc.
@ -48,7 +48,7 @@ func MultiSourceBootstrap(ignore map[autopilot.NodeID]struct{}, numAddrs uint32,
var addrs []*lnwire.NetAddress
for _, bootStrapper := range bootStrappers {
// If we already have enough addresses, then we can exit early
// w/o querying the additional boostrappers.
// w/o querying the additional bootstrappers.
if uint32(len(addrs)) >= numAddrs {
break
}
@ -85,7 +85,7 @@ type ChannelGraphBootstrapper struct {
chanGraph autopilot.ChannelGraph
// hashAccumulator is a set of 32 random bytes that are read upon the
// creation of the channel graph boostrapper. We use this value to
// creation of the channel graph bootstrapper. We use this value to
// randomly select nodes within the known graph to connect to. After
// each selection, we rotate the accumulator by hashing it with itself.
hashAccumulator [32]byte
@ -257,7 +257,7 @@ var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil)
// NewDNSSeedBootstrapper returns a new instance of the DNSSeedBootstrapper.
// The set of passed seeds should point to DNS servers that properly implement
// Lighting's DNS peer bootstrapping protocol as defined in BOLT-0010. The set
// Lightning's DNS peer bootstrapping protocol as defined in BOLT-0010. The set
// of passed DNS seeds should come in pairs, with the second host name to be
// used as a fallback for manual TCP resolution in the case of an error
// receiving the UDP response. The second host should return a single A record

26
discovery/gossiper.go

@ -27,7 +27,7 @@ var (
// messageStoreKey is a key used to create a top level bucket in
// the gossiper database, used for storing messages that are to
// be sent to peers. Currently this is used for reliably sending
// AnnounceSignatures messages, by peristing them until a send
// AnnounceSignatures messages, by persisting them until a send
// operation has succeeded.
messageStoreKey = []byte("message-store")
)
@ -184,7 +184,7 @@ type AuthenticatedGossiper struct {
// as we know it.
bestHeight uint32
// selfKey is the identity public key of the backing Lighting node.
// selfKey is the identity public key of the backing Lightning node.
selfKey *btcec.PublicKey
// channelMtx is used to restrict the database access to one
@ -315,7 +315,7 @@ func (d *AuthenticatedGossiper) SynchronizeNode(pub *btcec.PublicKey) error {
// channel forwarding policies for the specified channels. If no channels are
// specified, then the update will be applied to all outgoing channels from the
// source node. Policy updates are done in two stages: first, the
// AuthenticatedGossiper ensures the update has been committed by dependant
// AuthenticatedGossiper ensures the update has been committed by dependent
// sub-systems, then it signs and broadcasts new updates to the network.
func (d *AuthenticatedGossiper) PropagateChanPolicyUpdate(
newSchema routing.ChannelPolicy, chanPoints ...wire.OutPoint) error {
@ -493,7 +493,7 @@ func (d *deDupedAnnouncements) Reset() {
// reset is the private version of the Reset method. We have this so we can
// call this method within method that are already holding the lock.
func (d *deDupedAnnouncements) reset() {
// Storage of each type of announcement (channel anouncements, channel
// Storage of each type of announcement (channel announcements, channel
// updates, node announcements) is set to an empty map where the
// appropriate key points to the corresponding lnwire.Message.
d.channelAnnouncements = make(map[lnwire.ShortChannelID]msgWithSenders)
@ -502,7 +502,7 @@ func (d *deDupedAnnouncements) reset() {
}
// addMsg adds a new message to the current batch. If the message is already
// persent in the current batch, then this new instance replaces the latter,
// present in the current batch, then this new instance replaces the latter,
// and the set of senders is updated to reflect which node sent us this
// message.
func (d *deDupedAnnouncements) addMsg(message networkMsg) {
@ -590,7 +590,7 @@ func (d *deDupedAnnouncements) addMsg(message networkMsg) {
sender := routing.NewVertex(message.peer)
deDupKey := routing.NewVertex(msg.NodeID)
// We do the same for node annonuncements as we did for channel
// We do the same for node announcements as we did for channel
// updates, as they also carry a timestamp.
oldTimestamp := uint32(0)
mws, ok := d.nodeAnnouncements[deDupKey]
@ -823,7 +823,7 @@ func (d *AuthenticatedGossiper) networkHandler() {
trickleTimer := time.NewTicker(d.cfg.TrickleDelay)
defer trickleTimer.Stop()
// To start, we'll first check to see if there're any stale channels
// To start, we'll first check to see if there are any stale channels
// that we need to re-transmit.
if err := d.retransmitStaleChannels(); err != nil {
log.Errorf("unable to rebroadcast stale channels: %v",
@ -861,7 +861,7 @@ func (d *AuthenticatedGossiper) networkHandler() {
policyUpdate.errResp <- nil
case announcement := <-d.networkMsgs:
// Channel annoucnement signatures are the only message
// Channel announcement signatures are the only message
// that we'll process serially.
if _, ok := announcement.msg.(*lnwire.AnnounceSignatures); ok {
emittedAnnouncements := d.processNetworkAnnouncement(
@ -875,10 +875,10 @@ func (d *AuthenticatedGossiper) networkHandler() {
continue
}
// We'll set up any dependant, and wait until a free
// We'll set up any dependent, and wait until a free
// slot for this job opens up, this allow us to not
// have thousands of goroutines active.
validationBarrier.InitJobDependancies(announcement.msg)
validationBarrier.InitJobDependencies(announcement.msg)
go func() {
defer validationBarrier.CompleteJob()
@ -1147,11 +1147,11 @@ func (d *AuthenticatedGossiper) processChanPolicyUpdate(
return chanUpdates, nil
}
// processRejectedEdge examines a rejected edge to see if we can eexrtact any
// processRejectedEdge examines a rejected edge to see if we can extract any
// new announcements from it. An edge will get rejected if we already added
// the same edge without AuthProof to the graph. If the received announcement
// contains a proof, we can add this proof to our edge. We can end up in this
// situatation in the case where we create a channel, but for some reason fail
// situation in the case where we create a channel, but for some reason fail
// to receive the remote peer's proof, while the remote peer is able to fully
// assemble the proof and craft the ChannelAnnouncement.
func (d *AuthenticatedGossiper) processRejectedEdge(chanAnnMsg *lnwire.ChannelAnnouncement,
@ -1938,7 +1938,7 @@ func (d *AuthenticatedGossiper) sendAnnSigReliably(
// we do not succeed in sending it to the peer, we'll fetch it
// from the DB next time we start, and retry. We use the peer ID
// + shortChannelID as key, as there possibly is more than one
// channel oepning in progress to the same peer.
// channel opening in progress to the same peer.
var key [41]byte
copy(key[:33], remotePeer.SerializeCompressed())
binary.BigEndian.PutUint64(key[33:], msg.ShortChannelID.ToUint64())

26
discovery/gossiper_test.go

@ -534,7 +534,7 @@ func TestProcessAnnouncement(t *testing.T) {
case msg := <-ctx.broadcastedMessage:
assertSenderExistence(na.NodeID, msg)
case <-time.After(2 * trickleDelay):
t.Fatal("announcememt wasn't proceeded")
t.Fatal("announcement wasn't proceeded")
}
if len(ctx.router.nodes) != 1 {
@ -562,7 +562,7 @@ func TestProcessAnnouncement(t *testing.T) {
case msg := <-ctx.broadcastedMessage:
assertSenderExistence(na.NodeID, msg)
case <-time.After(2 * trickleDelay):
t.Fatal("announcememt wasn't proceeded")
t.Fatal("announcement wasn't proceeded")
}
if len(ctx.router.infos) != 1 {
@ -590,7 +590,7 @@ func TestProcessAnnouncement(t *testing.T) {
case msg := <-ctx.broadcastedMessage:
assertSenderExistence(na.NodeID, msg)
case <-time.After(2 * trickleDelay):
t.Fatal("announcememt wasn't proceeded")
t.Fatal("announcement wasn't proceeded")
}
if len(ctx.router.edges) != 1 {
@ -663,17 +663,17 @@ func TestPrematureAnnouncement(t *testing.T) {
select {
case <-ctx.broadcastedMessage:
case <-time.After(2 * trickleDelay):
t.Fatal("announcememt wasn't broadcasted")
t.Fatal("announcement wasn't broadcasted")
}
if len(ctx.router.infos) != 1 {
t.Fatalf("edge was't added to router: %v", err)
t.Fatalf("edge wasn't added to router: %v", err)
}
select {
case <-ctx.broadcastedMessage:
case <-time.After(2 * trickleDelay):
t.Fatal("announcememt wasn't broadcasted")
t.Fatal("announcement wasn't broadcasted")
}
if len(ctx.router.edges) != 1 {
@ -1132,7 +1132,7 @@ func TestSignatureAnnouncementRetry(t *testing.T) {
}
// When the peer comes online, the gossiper gets notified, and should
// retry sending the AnnnounceSignatures. We make the SendToPeer
// retry sending the AnnounceSignatures. We make the SendToPeer
// method work again.
sentToPeer := make(chan lnwire.Message, 1)
ctx.gossiper.cfg.SendToPeer = func(target *btcec.PublicKey,
@ -1141,7 +1141,7 @@ func TestSignatureAnnouncementRetry(t *testing.T) {
return nil
}
// Notify that peer is now online. THis should trigger a new call
// Notify that peer is now online. This should trigger a new call
// to SendToPeer.
close(conChan)
@ -1369,7 +1369,7 @@ func TestSignatureAnnouncementRetryAtStartup(t *testing.T) {
t.Fatalf("gossiper did not send message when peer came online")
}
// Now exchanging the remote channel proof, the channel annoncement
// Now exchanging the remote channel proof, the channel announcement
// broadcast should continue as normal.
select {
case err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.remoteProofAnn,
@ -1562,7 +1562,7 @@ func TestSignatureAnnouncementFullProofWhenRemoteProof(t *testing.T) {
case msg := <-sentToPeer:
_, ok := msg.(*lnwire.ChannelAnnouncement)
if !ok {
t.Fatalf("expected ChannelAnnouncement, intead got %T", msg)
t.Fatalf("expected ChannelAnnouncement, instead got %T", msg)
}
case <-time.After(2 * time.Second):
t.Fatal("did not send local proof to peer")
@ -1638,7 +1638,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
t.Fatal("channel update not replaced in batch")
}
// Adding an announcment with a later timestamp should replace the
// Adding an announcement with a later timestamp should replace the
// stored one.
ua3, err := createUpdateAnnouncement(0, 0, nodeKeyPriv1, timestamp+1)
if err != nil {
@ -1790,7 +1790,7 @@ func TestDeDuplicatedAnnouncements(t *testing.T) {
}
// TestReceiveRemoteChannelUpdateFirst tests that if we receive a
// CHannelUpdate from the remote before we have processed our
// ChannelUpdate from the remote before we have processed our
// own ChannelAnnouncement, it will be reprocessed later, after
// our ChannelAnnouncement.
func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
@ -1822,7 +1822,7 @@ func TestReceiveRemoteChannelUpdateFirst(t *testing.T) {
localKey := batch.nodeAnn1.NodeID
remoteKey := batch.nodeAnn2.NodeID
// Recreate the case where the remote node is snding us its ChannelUpdate
// Recreate the case where the remote node is sending us its ChannelUpdate
// before we have been able to process our own ChannelAnnouncement and
// ChannelUpdate.
err = <-ctx.gossiper.ProcessRemoteAnnouncement(batch.chanUpdAnn2, remoteKey)

4
discovery/utils.go

@ -10,8 +10,8 @@ import (
// createChanAnnouncement is a helper function which creates all channel
// announcements given the necessary channel related database items. This
// function is used to transform out databse structs into the coresponding wire
// sturcts for announcing new channels to other peers, or simply syncing up a
// function is used to transform out database structs into the corresponding wire
// structs for announcing new channels to other peers, or simply syncing up a
// peer's initial routing table upon connect.
func createChanAnnouncement(chanProof *channeldb.ChannelAuthProof,
chanInfo *channeldb.ChannelEdgeInfo,

2
docker/README.md

@ -280,7 +280,7 @@ bitcoins. The schema will be following:
+ --------------- +
(1) You may connect an additinal node "Bob" and make the multihop
(1) You may connect an additional node "Bob" and make the multihop
payment Alice->Faucet->Bob
(2) "Faucet", "Alice" and "Bob" are the lightning network daemons which

2
docker/btcd/Dockerfile

@ -34,7 +34,7 @@ RUN "/go/bin/gencerts" --host="*" --directory="/rpc" --force
# shared with any lnd, btcctl containers so they can securely query btcd's RPC
# server.
# You should NOT do this before certificate generation!
# Otherwise manually generated certificate will be overriden with shared
# Otherwise manually generated certificate will be overridden with shared
# mounted volume! For more info read dockerfile "VOLUME" documentation.
VOLUME ["/rpc"]

4
docker/docker-compose.yml

@ -95,12 +95,12 @@ volumes:
shared:
driver: local
# bitcoin volume is needed for maintaining blockchain persistance
# bitcoin volume is needed for maintaining blockchain persistence
# during btcd container recreation.
bitcoin:
driver: local
# litecoin volume is needed for maintaining blockchain persistance
# litecoin volume is needed for maintaining blockchain persistence
# during ltcd container recreation.
litecoin:
driver: local

2
docker/lnd/Dockerfile

@ -18,7 +18,7 @@ RUN git clone https://github.com/lightningnetwork/lnd $GOPATH/src/github.com/lig
# Make lnd folder default.
WORKDIR $GOPATH/src/github.com/lightningnetwork/lnd
# Instll dependency and install/build lnd.
# Install dependency and install/build lnd.
RUN glide install
RUN go install . ./cmd/...

2
docker/lnd/start-lnd.sh

@ -4,7 +4,7 @@
set -e
# error function is used within a bash function in order to send the error
# mesage directly to the stderr output and exit.
# message directly to the stderr output and exit.
error() {
echo "$1" > /dev/stderr
exit 0

2
docker/ltcd/Dockerfile

@ -32,7 +32,7 @@ RUN "/go/bin/gencerts" --host="*" --directory="/rpc" --force
# shared with any lnd, btcctl containers so they can securely query ltcd's RPC
# server.
# You should NOT do this before certificate generation!
# Otherwise manually generated certificate will be overriden with shared
# Otherwise manually generated certificate will be overridden with shared
# mounted volume! For more info read dockerfile "VOLUME" documentation.
VOLUME ["/rpc"]

4
docs/code_contribution_guidelines.md

@ -271,7 +271,7 @@ Further paragraphs come after blank lines.
Here are some of the reasons why wrapping your commit messages to 72 columns is
a good thing.
- git log doesnt do any special wrapping of the commit messages. With
- git log doesn't do any special wrapping of the commit messages. With
the default pager of less -S, this means your paragraphs flow far off the edge
of the screen, making them difficult to read. On an 80 column terminal, if we
subtract 4 columns for the indent on the left and 4 more for symmetry on the
@ -298,7 +298,7 @@ Blocks of code within `lnd` should be segmented into logical stanzas of
operation. Such spacing makes the code easier to follow at a skim, and reduces
unnecessary line noise. Coupled with the commenting scheme specified above,
proper spacing allows readers to quickly scan code, extracting semantics quickly.
Functions should _not_ just be layed out as a bare contiguous block of code.
Functions should _not_ just be laid out as a bare contiguous block of code.
**WRONG**
```go

2
docs/grpc/java.md

@ -131,7 +131,7 @@ Execute the following command in the directory where the **pom.xml** file is loc
```
mvn compile exec:java -Dexec.mainClass="Main" -Dexec.cleanupDaemonThreads=false
```
##### Sample ouput
##### Sample output
```
[INFO] Scanning for projects...
[INFO] ------------------------------------------------------------------------

2
docs/grpc/python.md

@ -44,7 +44,7 @@ Python gRPC.
#### Imports and Client
Everytime you use Python gRPC, you will have to import the generated rpc modules
Every time you use Python gRPC, you will have to import the generated rpc modules
and set up a channel and stub to your connect to your `lnd` node:
```python

4
fundingmanager.go

@ -186,7 +186,7 @@ type fundingConfig struct {
SignMessage func(pubKey *btcec.PublicKey, msg []byte) (*btcec.Signature, error)
// CurrentNodeAnnouncement should return the latest, fully signed node
// announcement from the backing Lighting Network node.
// announcement from the backing Lightning Network node.
CurrentNodeAnnouncement func() (lnwire.NodeAnnouncement, error)
// SendAnnouncement is used by the FundingManager to send
@ -981,7 +981,7 @@ func (f *fundingManager) processFundingAccept(msg *lnwire.AcceptChannel,
}
}
// handleFundingAceept processes a response to the workflow initiation sent by
// handleFundingAccept processes a response to the workflow initiation sent by
// the remote peer. This message then queues a message with the funding
// outpoint, and a commitment signature to the remote peer.
func (f *fundingManager) handleFundingAccept(fmsg *fundingAcceptMsg) {

4
fundingmanager_test.go

@ -891,7 +891,7 @@ func TestFundingManagerRestartBehavior(t *testing.T) {
return fmt.Errorf("intentional error in SendToPeer")
}
alice.fundingMgr.cfg.NotifyWhenOnline = func(peer *btcec.PublicKey, con chan<- struct{}) {
// Intetionally empty.
// Intentionally empty.
}
// Notify that transaction was mined
@ -966,7 +966,7 @@ func TestFundingManagerRestartBehavior(t *testing.T) {
// Check that the state machine is updated accordingly
assertAddedToRouterGraph(t, alice, bob, fundingOutPoint)
// Next, we check that Alice sends the annnouncement signatures
// Next, we check that Alice sends the announcement signatures
// on restart after six confirmations. Bob should as expected send
// them as well.
recreateAliceFundingManager(t, alice)

2
htlcswitch/circuit.go

@ -42,7 +42,7 @@ type PaymentCircuit struct {
}
// circuitKey is a channel ID, HTLC ID tuple used as an identifying key for a
// payment circuit. The circuit map is keyed with the idenitifer for the
// payment circuit. The circuit map is keyed with the identifier for the
// outgoing HTLC
type circuitKey struct {
chanID lnwire.ShortChannelID

2
htlcswitch/iterator.go

@ -9,7 +9,7 @@ import (
)
// NetworkHop indicates the blockchain network that is intended to be the next
// hop for a forwarded HTLC. The existnce of this field within the
// hop for a forwarded HTLC. The existence of this field within the
// ForwardingInfo struct enables the ability for HTLC to cross chain-boundaries
// at will.
type NetworkHop uint8

18
htlcswitch/link.go

@ -150,8 +150,8 @@ type ChannelLinkConfig struct {
// in thread-safe manner.
Registry InvoiceDatabase
// PreimageCache is a global witness baacon that houses any new
// preimges discovered by other links. We'll use this to add new
// PreimageCache is a global witness beacon that houses any new
// preimages discovered by other links. We'll use this to add new
// witnesses that we discover which will notify any sub-systems
// subscribed to new events.
PreimageCache contractcourt.WitnessBeacon
@ -532,7 +532,7 @@ func (l *channelLink) syncChanStates() error {
// a duplicate settle.
htlcsSettled := make(map[uint64]struct{})
for _, msg := range msgsToReSend {
settleMsg, ok := msg.(*lnwire.UpdateFufillHTLC)
settleMsg, ok := msg.(*lnwire.UpdateFulfillHTLC)
if !ok {
// If this isn't a settle message, then we'll skip it.
continue
@ -588,7 +588,7 @@ func (l *channelLink) syncChanStates() error {
return err
}
l.batchCounter++
l.cfg.Peer.SendMessage(&lnwire.UpdateFufillHTLC{
l.cfg.Peer.SendMessage(&lnwire.UpdateFulfillHTLC{
ChanID: l.ChanID(),
ID: htlc.HtlcIndex,
PaymentPreimage: p,
@ -896,7 +896,7 @@ func (l *channelLink) handleDownStreamPkt(pkt *htlcPacket, isReProcess bool) {
htlc.ID = index
l.cfg.Peer.SendMessage(htlc)
case *lnwire.UpdateFufillHTLC:
case *lnwire.UpdateFulfillHTLC:
// An HTLC we forward to the switch has just settled somewhere
// upstream. Therefore we settle the HTLC within the our local
// state machine.
@ -971,7 +971,7 @@ func (l *channelLink) handleUpstreamMsg(msg lnwire.Message) {
log.Tracef("Receive upstream htlc with payment hash(%x), "+
"assigning index: %v", msg.PaymentHash[:], index)
case *lnwire.UpdateFufillHTLC:
case *lnwire.UpdateFulfillHTLC:
pre := msg.PaymentPreimage
idx := msg.ID
if err := l.channel.ReceiveHTLCSettle(pre, idx); err != nil {
@ -1344,7 +1344,7 @@ func (l *channelLink) updateChannelFee(feePerKw btcutil.Amount) error {
feePerKw)
// We skip sending the UpdateFee message if the channel is not
// currently eligable to forward messages.
// currently eligible to forward messages.
if !l.EligibleToForward() {
log.Debugf("ChannelPoint(%v): skipping fee update for " +
"inactive channel")
@ -1391,7 +1391,7 @@ func (l *channelLink) processLockedInHtlcs(
outgoingChanID: l.ShortChanID(),
outgoingHTLCID: pd.ParentIndex,
amount: pd.Amount,
htlc: &lnwire.UpdateFufillHTLC{
htlc: &lnwire.UpdateFulfillHTLC{
PaymentPreimage: pd.RPreimage,
},
}
@ -1644,7 +1644,7 @@ func (l *channelLink) processLockedInHtlcs(
// HTLC was successfully settled locally send
// notification about it remote peer.
l.cfg.Peer.SendMessage(&lnwire.UpdateFufillHTLC{
l.cfg.Peer.SendMessage(&lnwire.UpdateFulfillHTLC{
ChanID: l.ChanID(),
ID: pd.HtlcIndex,
PaymentPreimage: preimage,

22
htlcswitch/link_test.go

@ -225,7 +225,7 @@ func TestChannelLinkSingleHopPayment(t *testing.T) {
// Wait for Bob to receive the revocation.
//
// TODO(roasbef); replace with select over returned err chan
// TODO(roasbeef); replace with select over returned err chan
time.Sleep(100 * time.Millisecond)
// Check that alice invoice was settled and bandwidth of HTLC
@ -1338,7 +1338,7 @@ func TestChannelLinkSingleHopMessageOrdering(t *testing.T) {
{"bob", "alice", &lnwire.CommitSig{}, false},
{"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"bob", "alice", &lnwire.UpdateFufillHTLC{}, false},
{"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
{"bob", "alice", &lnwire.CommitSig{}, false},
{"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"alice", "bob", &lnwire.CommitSig{}, false},
@ -1429,7 +1429,7 @@ func newSingleLinkTestHarness(chanAmt btcutil.Amount) (ChannelLink,
}
var (
invoiveRegistry = newMockRegistry()
invoiceRegistry = newMockRegistry()
decoder = &mockIteratorDecoder{}
obfuscator = newMockObfuscator()
alicePeer = &mockPeer{
@ -1464,7 +1464,7 @@ func newSingleLinkTestHarness(chanAmt btcutil.Amount) (ChannelLink,
UpdateContractSignals: func(*contractcourt.ContractSignals) error {
return nil
},
Registry: invoiveRegistry,
Registry: invoiceRegistry,
ChainEvents: &contractcourt.ChainEventSubscription{},
BlockEpochs: globalEpoch,
BatchTicker: ticker,
@ -1754,7 +1754,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) {
if err != nil {
t.Fatalf("unable to settle htlc: %v", err)
}
htlcSettle := &lnwire.UpdateFufillHTLC{
htlcSettle := &lnwire.UpdateFulfillHTLC{
ID: bobIndex,
PaymentPreimage: invoice.Terms.PaymentPreimage,
}
@ -1880,7 +1880,7 @@ func TestChannelLinkBandwidthConsistency(t *testing.T) {
// we eventually learn (simulating a multi-hop payment). The bandwidth
// of the channel should now be re-balanced to the starting point.
settlePkt := htlcPacket{
htlc: &lnwire.UpdateFufillHTLC{
htlc: &lnwire.UpdateFulfillHTLC{
ID: bobIndex,
PaymentPreimage: invoice.Terms.PaymentPreimage,
},
@ -2143,7 +2143,7 @@ func TestChannelLinkBandwidthConsistencyOverflow(t *testing.T) {
t.Fatalf("unable to settle htlc: %v", err)
}
htlcSettle := &lnwire.UpdateFufillHTLC{
htlcSettle := &lnwire.UpdateFulfillHTLC{
ID: uint64(i),
PaymentPreimage: preImages[i],
}
@ -2235,13 +2235,13 @@ func TestChannelRetransmission(t *testing.T) {
// Alice should resend the revoke_and_ack
// message to Bob because Bob claimed it in the
// reestbalish message.
// re-establish message.
{"alice", "bob", &lnwire.RevokeAndAck{}, false},
// Proceed the payment farther by sending the
// fulfilment message and trigger the state
// update.
{"bob", "alice", &lnwire.UpdateFufillHTLC{}, false},
{"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
{"bob", "alice", &lnwire.CommitSig{}, false},
{"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"alice", "bob", &lnwire.CommitSig{}, false},
@ -2283,7 +2283,7 @@ func TestChannelRetransmission(t *testing.T) {
// fulfilment message and trigger the state
// update.
{"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"bob", "alice", &lnwire.UpdateFufillHTLC{}, false},
{"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
{"bob", "alice", &lnwire.CommitSig{}, false},
{"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"alice", "bob", &lnwire.CommitSig{}, false},
@ -2325,7 +2325,7 @@ func TestChannelRetransmission(t *testing.T) {
{"bob", "alice", &lnwire.CommitSig{}, false},
{"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"bob", "alice", &lnwire.UpdateFufillHTLC{}, false},
{"bob", "alice", &lnwire.UpdateFulfillHTLC{}, false},
{"bob", "alice", &lnwire.CommitSig{}, false},
{"alice", "bob", &lnwire.RevokeAndAck{}, false},
{"alice", "bob", &lnwire.CommitSig{}, false},

4
htlcswitch/mock.go

@ -52,7 +52,7 @@ func (m *mockPreimageCache) AddPreimage(preimage []byte) error {
return nil
}
func (m *mockPreimageCache) SubcribeUpdates() *contractcourt.WitnessSubcription {
func (m *mockPreimageCache) SubscribeUpdates() *contractcourt.WitnessSubscription {
return nil
}
@ -356,7 +356,7 @@ func (s *mockServer) readHandler(message lnwire.Message) error {
switch msg := message.(type) {
case *lnwire.UpdateAddHTLC:
targetChan = msg.ChanID
case *lnwire.UpdateFufillHTLC:
case *lnwire.UpdateFulfillHTLC:
targetChan = msg.ChanID
case *lnwire.UpdateFailHTLC:
targetChan = msg.ChanID

6
htlcswitch/switch.go

@ -469,7 +469,7 @@ func (s *Switch) handleLocalDispatch(packet *htlcPacket) error {
// We've just received a settle update which means we can finalize the
// user payment and return successful response.
case *lnwire.UpdateFufillHTLC:
case *lnwire.UpdateFulfillHTLC:
// Notify the user that his payment was successfully proceed.
payment.err <- nil
payment.preimage <- htlc.PaymentPreimage
@ -652,7 +652,7 @@ func (s *Switch) handlePacketForward(packet *htlcPacket) error {
// We've just received a settle packet which means we can finalize the
// payment circuit by forwarding the settle msg to the channel from
// which htlc add packet was initially received.
case *lnwire.UpdateFufillHTLC, *lnwire.UpdateFailHTLC:
case *lnwire.UpdateFulfillHTLC, *lnwire.UpdateFailHTLC:
if !packet.isRouted {
// Use circuit map to find the link to forward settle/fail to.
circuit := s.circuits.LookupByHTLC(packet.outgoingChanID,
@ -835,7 +835,7 @@ func (s *Switch) htlcForwarder() {
if resolutionMsg.Failure != nil {
pkt.htlc = &lnwire.UpdateFailHTLC{}
} else {
pkt.htlc = &lnwire.UpdateFufillHTLC{
pkt.htlc = &lnwire.UpdateFulfillHTLC{
PaymentPreimage: *resolutionMsg.PreImage,
}
}

2
htlcswitch/switch_test.go

@ -89,7 +89,7 @@ func TestSwitchForward(t *testing.T) {
outgoingChanID: bobChannelLink.ShortChanID(),
outgoingHTLCID: 0,
amount: 1,
htlc: &lnwire.UpdateFufillHTLC{
htlc: &lnwire.UpdateFulfillHTLC{
PaymentPreimage: preimage,
},
}

2
htlcswitch/test_utils.go

@ -361,7 +361,7 @@ func getChanID(msg lnwire.Message) (lnwire.ChannelID, error) {
switch msg := msg.(type) {
case *lnwire.UpdateAddHTLC:
chanID = msg.ChanID
case *lnwire.UpdateFufillHTLC:
case *lnwire.UpdateFulfillHTLC:
chanID = msg.ChanID
case *lnwire.UpdateFailHTLC:
chanID = msg.ChanID

2
lnd.go

@ -304,7 +304,7 @@ func lndMain() error {
for _, channel := range dbChannels {
if chanID.IsChanPoint(&channel.FundingOutpoint) {
// TODO(rosbeef): populate baecon
// TODO(roasbeef): populate beacon
return lnwallet.NewLightningChannel(
activeChainControl.signer,
server.witnessBeacon,

28
lnd_test.go

@ -86,7 +86,7 @@ func (h *harnessTest) RunTestCase(testCase *testCase,
defer func() {
if err := recover(); err != nil {
description := errors.Wrap(err, 2).ErrorStack()
h.t.Fatalf("Failed: (%v) paniced with: \n%v",
h.t.Fatalf("Failed: (%v) panicked with: \n%v",
h.testCase.name, description)
}
}()
@ -192,7 +192,7 @@ func openChannelAndAssert(ctx context.Context, t *harnessTest,
}
// closeChannelAndAssert attempts to close a channel identified by the passed
// channel point owned by the passed lighting node. A fully blocking channel
// channel point owned by the passed Lightning node. A fully blocking channel
// closure is attempted, therefore the passed context should be a child derived
// via timeout from a base parent. Additionally, once the channel has been
// detected as closed, an assertion checks that the transaction is found within
@ -1346,7 +1346,7 @@ func assertNumForceClosedChannels(t *harnessTest,
}
// assertPendingHtlcStageAndMaturity uniformly tests all pending htlc's
// belonging to a force closed channel, testing for the expeced stage number,
// belonging to a force closed channel, testing for the expected stage number,
// blocks till maturity, and the maturity height.
func assertPendingHtlcStageAndMaturity(t *harnessTest,
forceClose *lnrpc.PendingChannelsResponse_ForceClosedChannel,
@ -2783,7 +2783,7 @@ func testPrivateChannels(net *lntest.NetworkHarness, t *harnessTest) {
// Finally, we make sure Dave and Bob does not know about the
// private channel between Carol and Alice. We first mine
// plenty of blocks, such that the channel would have been
// announceed in case it was public.
// announced in case it was public.
mineBlocks(t, net, 10)
// We create a helper method to check how many edges each of the
@ -2890,7 +2890,7 @@ func testInvoiceSubscriptions(net *lntest.NetworkHarness, t *harnessTest) {
// The invoice update should exactly match the invoice created
// above, but should now be settled and have SettleDate
if !invoiceUpdate.Settled {
t.Fatalf("invoice not settled but shoudl be")
t.Fatalf("invoice not settled but should be")
}
if invoiceUpdate.SettleDate == 0 {
t.Fatalf("invoice should have non zero settle date, but doesn't")
@ -3029,7 +3029,7 @@ func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) {
// Mine 6 blocks, then wait for node's to notify us that the channel has
// been opened. The funding transactions should be found within the
// first newly mined block. 6 blocks make sure the funding transaction
// has enouught confirmations to be announced publicly.
// has enough confirmations to be announced publicly.
block := mineBlocks(t, net, 6)[0]
chanPoints := make([]*lnrpc.ChannelPoint, maxPendingChannels)
@ -3166,7 +3166,7 @@ func waitForNTxsInMempool(miner *rpcclient.Client, n int,
}
}
// testRevokedCloseRetributinPostBreachConf tests that Alice is able carry out
// testRevokedCloseRetribution tests that Alice is able carry out
// retribution in the event that she fails immediately after detecting Bob's
// breach txn in the mempool.
func testRevokedCloseRetribution(net *lntest.NetworkHarness, t *harnessTest) {
@ -3700,7 +3700,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
// We'll introduce another closure to validate that Carol's current
// number of updates is at least as large as the provided minimum
// number.
checkCarolNumUpdatesAtleast := func(minimum uint64) {
checkCarolNumUpdatesAtLeast := func(minimum uint64) {
carolChan, err := getCarolChanInfo()
if err != nil {
t.Fatalf("unable to get carol's channel info: %v", err)
@ -3748,7 +3748,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
checkCarolBalance(pushAmt)
// Since Carol has not settled, she should only see at least one update
// to her channel.
checkCarolNumUpdatesAtleast(1)
checkCarolNumUpdatesAtLeast(1)
// Create a temporary file to house Carol's database state at this
// particular point in history.
@ -3778,7 +3778,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
// pushed to her, and that at least one more update has occurred.
time.Sleep(500 * time.Millisecond)
checkCarolBalance(pushAmt)
checkCarolNumUpdatesAtleast(carolStateNumPreCopy + 1)
checkCarolNumUpdatesAtLeast(carolStateNumPreCopy + 1)
// Now we shutdown Carol, copying over the her temporary database state
// which has the *prior* channel state over her current most up to date
@ -3795,7 +3795,7 @@ func testRevokedCloseRetributionRemoteHodl(net *lntest.NetworkHarness,
// Ensure that Carol's view of the channel is consistent with the
// state of the channel just before it was snapshotted.
checkCarolBalance(pushAmt)
checkCarolNumUpdatesAtleast(1)
checkCarolNumUpdatesAtLeast(1)
// Now query for Carol's channel state, it should show that she's at a
// state number in the past, *not* the latest state.
@ -4045,7 +4045,7 @@ out:
t.Fatalf("unable to send payment: %v", err)
}
// The payment should've resulted in an error since we sent it with the
// The payment should have resulted in an error since we sent it with the
// wrong payment hash.
if resp.PaymentError == "" {
t.Fatalf("payment should have been rejected due to invalid " +
@ -4222,7 +4222,7 @@ func subscribeGraphNotifications(t *harnessTest, ctxb context.Context,
t.Fatalf("unable to create topology client: %v", err)
}
// We'll launch a goroutine that'll be responsible for proxying all
// We'll launch a goroutine that will be responsible for proxying all
// notifications recv'd from the client into the channel below.
quit := make(chan struct{})
graphUpdates := make(chan *lnrpc.GraphTopologyUpdate, 20)
@ -4270,7 +4270,7 @@ func testGraphTopologyNotifications(net *lntest.NetworkHarness, t *harnessTest)
chanPoint := openChannelAndAssert(ctxt, t, net, net.Alice, net.Bob,
chanAmt, 0)
// The channel opening above should've triggered a few notifications
// The channel opening above should have triggered a few notifications
// sent to the notification client. We'll expect two channel updates,
// and two node announcements.
const numExpectedUpdates = 4

2
lnrpc/gen_protos.sh

@ -9,7 +9,7 @@ protoc -I/usr/local/include -I. \
# Generate the REST reverse prozxy.
# Generate the REST reverse proxy.
protoc -I/usr/local/include -I. \
-I$GOPATH/src \
-I$GOPATH/src/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis \

8
lnrpc/rpc.proto

@ -299,7 +299,7 @@ service Lightning {
}
/** lncli: `lookupinvoice`
LookupInvoice attemps to look up an invoice according to its payment hash.
LookupInvoice attempts to look up an invoice according to its payment hash.
The passed payment hash *must* be exactly 32 bytes, if not, an error is
returned.
*/
@ -389,7 +389,7 @@ service Lightning {
route to a target destination capable of carrying a specific amount of
satoshis. The retuned route contains the full details required to craft and
send an HTLC, also including the necessary information that should be
present within the Sphinx packet encapsualted within the HTLC.
present within the Sphinx packet encapsulated within the HTLC.
*/
rpc QueryRoutes(QueryRoutesRequest) returns (QueryRoutesResponse) {
option (google.api.http) = {
@ -610,7 +610,7 @@ message VerifyMessageRequest {
/// The message over which the signature is to be verified
bytes msg = 1 [ json_name = "msg" ];
/// The signature to be verifed over the given message
/// The signature to be verified over the given message
string signature = 2 [ json_name = "signature" ];
}
message VerifyMessageResponse {
@ -864,7 +864,7 @@ message OpenChannelRequest {
/// The pubkey of the node to open a channel with
bytes node_pubkey = 2 [json_name = "node_pubkey"];
/// The hex encorded pubkey of the node to open a channel with
/// The hex encoded pubkey of the node to open a channel with
string node_pubkey_string = 3 [json_name = "node_pubkey_string"];
/// The number of satoshis the wallet should commit to the channel

6
lnrpc/rpc.swagger.json

@ -352,7 +352,7 @@
},
"/v1/graph/routes/{pub_key}/{amt}": {
"get": {
"summary": "* lncli: `queryroutes`\nQueryRoutes attempts to query the daemon's Channel Router for a possible\nroute to a target destination capable of carrying a specific amount of\nsatoshis. The retuned route contains the full details required to craft and\nsend an HTLC, also including the necessary information that should be\npresent within the Sphinx packet encapsualted within the HTLC.",
"summary": "* lncli: `queryroutes`\nQueryRoutes attempts to query the daemon's Channel Router for a possible\nroute to a target destination capable of carrying a specific amount of\nsatoshis. The retuned route contains the full details required to craft and\nsend an HTLC, also including the necessary information that should be\npresent within the Sphinx packet encapsulated within the HTLC.",
"operationId": "QueryRoutes",
"responses": {
"200": {
@ -384,7 +384,7 @@
},
"/v1/invoice/{r_hash_str}": {
"get": {
"summary": "* lncli: `lookupinvoice`\nLookupInvoice attemps to look up an invoice according to its payment hash.\nThe passed payment hash *must* be exactly 32 bytes, if not, an error is\nreturned.",
"summary": "* lncli: `lookupinvoice`\nLookupInvoice attempts to look up an invoice according to its payment hash.\nThe passed payment hash *must* be exactly 32 bytes, if not, an error is\nreturned.",
"operationId": "LookupInvoice",
"responses": {
"200": {
@ -1563,7 +1563,7 @@
},
"node_pubkey_string": {
"type": "string",
"title": "/ The hex encorded pubkey of the node to open a channel with"
"title": "/ The hex encoded pubkey of the node to open a channel with"
},
"local_funding_amount": {
"type": "string",

6
lntest/harness.go

@ -53,7 +53,7 @@ type NetworkHarness struct {
// NewNetworkHarness creates a new network test harness.
// TODO(roasbeef): add option to use golang's build library to a binary of the
// current repo. This'll save developers from having to manually `go install`
// current repo. This will save developers from having to manually `go install`
// within the repo each time before changes
func NewNetworkHarness(r *rpctest.Harness) (*NetworkHarness, error) {
n := NetworkHarness{
@ -232,7 +232,7 @@ func (n *NetworkHarness) TearDownAll() error {
return nil
}
// NewNode fully initializes a returns a new HarnessNode binded to the
// NewNode fully initializes a returns a new HarnessNode bound to the
// current instance of the network harness. The created node is running, but
// not yet connected to other nodes within the network.
func (n *NetworkHarness) NewNode(extraArgs []string) (*HarnessNode, error) {
@ -681,7 +681,7 @@ func (n *NetworkHarness) CloseChannel(ctx context.Context,
}
// Next, we'll fetch the target channel in order to get the
// harness node that'll be receiving the channel close request.
// harness node that will be receiving the channel close request.
targetChan, err := filterChannel(lnNode, chanPoint)
if err != nil {
return nil, nil, err

4
lntest/node.go

@ -35,7 +35,7 @@ var (
// defaultNodePort is the initial p2p port which will be used by the
// first created lightning node to listen on for incoming p2p
// connections. Subsequent allocated ports for future lighting nodes
// connections. Subsequent allocated ports for future Lightning nodes
// instances will be monotonically increasing numbers calculated as
// such: defaultP2pPort + (3 * harness.nodeNum).
defaultNodePort = 19555
@ -317,7 +317,7 @@ func (hn *HarnessNode) start(lndError chan<- error) error {
}
copy(hn.PubKey[:], pubkey)
// Launch the watcher that'll hook into graph related topology change
// Launch the watcher that will hook into graph related topology change
// from the PoV of this node.
hn.wg.Add(1)
go hn.lightningNetworkWatcher()

2
lnwallet/btcwallet/btcwallet.go

@ -86,7 +86,7 @@ func New(cfg Config) (*BtcWallet, error) {
}
} else {
// Wallet has been created and been initialized at this point,
// open it along with all the required DB namepsaces, and the
// open it along with all the required DB namespaces, and the
// DB itself.
wallet, err = loader.OpenExistingWallet(pubPass, false)
if err != nil {

4
lnwallet/btcwallet/signer.go

@ -33,7 +33,7 @@ func (b *BtcWallet) FetchInputInfo(prevOut *wire.OutPoint) (*wire.TxOut, error)
}
b.cacheMtx.RUnlock()
// Otherwse, we manually look up the output within the tx store.
// Otherwise, we manually look up the output within the tx store.
txid := &prevOut.Hash
txDetail, err := base.UnstableAPI(b.wallet).TxDetails(txid)
if err != nil {
@ -180,7 +180,7 @@ func (b *BtcWallet) ComputeInputScript(tx *wire.MsgTx,
pubKey := privKey.PubKey()
pubKeyHash := btcutil.Hash160(pubKey.SerializeCompressed())
// Next, we'll generate a valid sigScript that'll allow us to
// Next, we'll generate a valid sigScript that will allow us to
// spend the p2sh output. The sigScript will contain only a
// single push of the p2wkh witness program corresponding to
// the matching public key of this address.

28
lnwallet/channel.go

@ -31,7 +31,7 @@ var (
// that has already been closed or is in the process of being closed.
ErrChanClosing = fmt.Errorf("channel is being closed, operation disallowed")
// ErrNoWindow is returned when revocation window is exausted.
// ErrNoWindow is returned when revocation window is exhausted.
ErrNoWindow = fmt.Errorf("unable to sign new commitment, the current" +
" revocation window is exhausted")
@ -941,7 +941,7 @@ type updateLog struct {
// htlcCounter is a monotonically increasing integer that tracks the
// total number of offered HTLC's by the owner of this update log. We
// use a distinct index for this purpose, as update's that remove
// entires from the log will be indexed using this counter.
// entries from the log will be indexed using this counter.
htlcCounter uint64
// List is the updatelog itself, we embed this value so updateLog has
@ -953,7 +953,7 @@ type updateLog struct {
updateIndex map[uint64]*list.Element
// offerIndex is an index that maps the counter for offered HTLC's to
// their list elemtn within the main list.List.
// their list element within the main list.List.
htlcIndex map[uint64]*list.Element
}
@ -1130,7 +1130,7 @@ type LightningChannel struct {
// Capacity is the total capacity of this channel.
Capacity btcutil.Amount
// stateHintObfuscator is a 48-bit state hint that's used to obfsucate
// stateHintObfuscator is a 48-bit state hint that's used to obfuscate
// the current state number on the commitment transactions.
stateHintObfuscator [StateHintSize]byte
@ -1387,7 +1387,7 @@ func (lc *LightningChannel) logUpdateToPayDesc(logUpdate *channeldb.LogUpdate,
// For HTLC's we we're offered we'll fetch the original offered HTLc
// from the remote party's update log so we can retrieve the same
// PaymentDescriptor that SettleHTLC would produce.
case *lnwire.UpdateFufillHTLC:
case *lnwire.UpdateFulfillHTLC:
ogHTLC := remoteUpdateLog.lookupHtlc(wireMsg.ID)
pd = &PaymentDescriptor{
@ -2620,7 +2620,7 @@ func (lc *LightningChannel) createCommitDiff(
logUpdate.UpdateMsg = htlc
case Settle:
logUpdate.UpdateMsg = &lnwire.UpdateFufillHTLC{
logUpdate.UpdateMsg = &lnwire.UpdateFulfillHTLC{
ChanID: chanID,
ID: pd.ParentIndex,
PaymentPreimage: pd.RPreimage,
@ -3568,9 +3568,9 @@ func (lc *LightningChannel) ReceiveRevocation(revMsg *lnwire.RevokeAndAck) ([]*P
continue
}
uncomitted := (htlc.addCommitHeightRemote == 0 ||
uncommitted := (htlc.addCommitHeightRemote == 0 ||
htlc.addCommitHeightLocal == 0)
if htlc.EntryType == Add && uncomitted {
if htlc.EntryType == Add && uncommitted {
continue
}
@ -4290,14 +4290,14 @@ func newOutgoingHtlcResolution(signer Signer, localChanCfg *channeldb.ChannelCon
if !localCommit {
// First, we'll re-generate the script used to send the HTLC to
// the remote party within their commitment transaction.
htlcReciverScript, err := receiverHTLCScript(htlc.RefundTimeout,
htlcReceiverScript, err := receiverHTLCScript(htlc.RefundTimeout,
keyRing.LocalHtlcKey, keyRing.RemoteHtlcKey,
keyRing.RevocationKey, htlc.RHash[:],
)
if err != nil {
return nil, err
}
htlcScriptHash, err := witnessScriptHash(htlcReciverScript)
htlcScriptHash, err := witnessScriptHash(htlcReceiverScript)
if err != nil {
return nil, err
}
@ -4310,7 +4310,7 @@ func newOutgoingHtlcResolution(signer Signer, localChanCfg *channeldb.ChannelCon
SweepSignDesc: SignDescriptor{
PubKey: localChanCfg.HtlcBasePoint,
SingleTweak: keyRing.LocalHtlcKeyTweak,
WitnessScript: htlcReciverScript,
WitnessScript: htlcReceiverScript,
Output: &wire.TxOut{
PkScript: htlcScriptHash,
Value: int64(htlc.Amt.ToSatoshis()),
@ -4621,8 +4621,8 @@ type ForceCloseSummary struct {
ChanPoint wire.OutPoint
// CloseTx is the transaction which closed the channel on-chain. If we
// initiate the force close, then this'll be our latest commitment
// state. Otherwise, this'll be the state that the remote peer
// initiate the force close, then this will be our latest commitment
// state. Otherwise, this will be the state that the remote peer
// broadcasted on-chain.
CloseTx *wire.MsgTx
@ -5327,7 +5327,7 @@ func (lc *LightningChannel) ActiveHtlcs() []channeldb.HTLC {
// We'll only return HTLC's that are locked into *both* commitment
// transactions. So we'll iterate through their set of HTLC's to note
// which ones are present on thir commitment.
// which ones are present on their commitment.
remoteHtlcs := make(map[[32]byte]struct{})
for _, htlc := range lc.channelState.RemoteCommitment.Htlcs {
onionHash := sha256.Sum256(htlc.OnionBlob[:])

14
lnwallet/channel_test.go

@ -2023,7 +2023,7 @@ func TestUpdateFeeFail(t *testing.T) {
}
// TestUpdateFeeSenderCommits veriefies that the state machine progresses as
// TestUpdateFeeSenderCommits verifies that the state machine progresses as
// expected if we send a fee update, and then the sender of the fee update
// sends a commitment signature.
func TestUpdateFeeSenderCommits(t *testing.T) {
@ -2103,7 +2103,7 @@ func TestUpdateFeeSenderCommits(t *testing.T) {
// that Bob's received everything up to the signature she sent,
// including the HTLC and fee update.
if _, err := aliceChannel.ReceiveRevocation(bobRevocation); err != nil {
t.Fatalf("alice unable to rocess bob's revocation: %v", err)
t.Fatalf("alice unable to process bob's revocation: %v", err)
}
// Alice receives new signature from Bob, and assumes this covers the
@ -2198,7 +2198,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) {
// Bob receives the revocation of the old commitment
if _, err := bobChannel.ReceiveRevocation(aliceRevocation); err != nil {
t.Fatalf("alice unable to rocess bob's revocation: %v", err)
t.Fatalf("alice unable to process bob's revocation: %v", err)
}
// Alice will sign next commitment. Since she sent the revocation, she
@ -2239,7 +2239,7 @@ func TestUpdateFeeReceiverCommits(t *testing.T) {
t.Fatalf("alice unable to sign commitment: %v", err)
}
// Alice receives revokation from Bob, and can now be sure that Bob
// Alice receives revocation from Bob, and can now be sure that Bob
// received the two updates, and they are considered locked in.
if _, err := aliceChannel.ReceiveRevocation(bobRevocation); err != nil {
t.Fatalf("bob unable to process alice's revocation: %v", err)
@ -2383,7 +2383,7 @@ func TestUpdateFeeMultipleUpdates(t *testing.T) {
// Bob's received everything up to the signature she sent, including the
// HTLC and fee update.
if _, err := aliceChannel.ReceiveRevocation(bobRevocation); err != nil {
t.Fatalf("alice unable to rocess bob's revocation: %v", err)
t.Fatalf("alice unable to process bob's revocation: %v", err)
}
// Alice receives new signature from Bob, and assumes this covers the
@ -2731,7 +2731,7 @@ func TestChanSyncOweCommitment(t *testing.T) {
// Each of the settle messages that Alice sent should match her
// original intent.
for i := 0; i < 3; i++ {
settleMsg, ok := aliceMsgsToSend[i].(*lnwire.UpdateFufillHTLC)
settleMsg, ok := aliceMsgsToSend[i].(*lnwire.UpdateFulfillHTLC)
if !ok {
t.Fatalf("expected a htlc settle message, "+
"instead have %v", spew.Sdump(settleMsg))
@ -3489,7 +3489,7 @@ func TestFeeUpdateRejectInsaneFee(t *testing.T) {
// Both Alice and Bob should reject this new fee rate as it it far too
// large.
if err := aliceChannel.UpdateFee(newFeeRate); err == nil {
t.Fatalf("alice should've rejected fee update")
t.Fatalf("alice should have rejected fee update")
}
}

2
lnwallet/fee_estimator.go

@ -205,7 +205,7 @@ var _ FeeEstimator = (*BtcdFeeEstimator)(nil)
// BitcoindFeeEstimator is an implementation of the FeeEstimator interface
// backed by the RPC interface of an active bitcoind node. This implementation
// will proxy any fee estimation requests to bitcoind's RPC interace.
// will proxy any fee estimation requests to bitcoind's RPC interface.
type BitcoindFeeEstimator struct {
// fallBackFeeRate is the fall back fee rate in satoshis per byte that
// is returned if the fee estimator does not yet have enough data to

14
lnwallet/interface_test.go

@ -503,7 +503,7 @@ func testFundingCancellationNotEnoughFunds(miner *rpctest.Harness,
lnwire.FFAnnounceChannel,
)
if _, ok := err.(*lnwallet.ErrInsufficientFunds); !ok {
t.Fatalf("coin selection succeded should have insufficient funds: %v",
t.Fatalf("coin selection succeeded should have insufficient funds: %v",
err)
}
@ -537,7 +537,7 @@ func testFundingCancellationNotEnoughFunds(miner *rpctest.Harness,
}
}
func testCancelNonExistantReservation(miner *rpctest.Harness,
func testCancelNonExistentReservation(miner *rpctest.Harness,
alice, _ *lnwallet.LightningWallet, t *testing.T) {
feeRate, err := alice.Cfg.FeeEstimator.EstimateFeePerWeight(1)
@ -574,7 +574,7 @@ func testReservationInitiatorBalanceBelowDustCancel(miner *rpctest.Harness,
)
switch {
case err == nil:
t.Fatalf("initialization should've failed due to " +
t.Fatalf("initialization should have failed due to " +
"insufficient local amount")
case !strings.Contains(err.Error(), "local output is too small"):
@ -984,7 +984,7 @@ func testListTransactionDetails(miner *rpctest.Harness,
}
// We assert that the value is greater than the amount we
// attempted to send, as the wallet should've paid some amount
// attempted to send, as the wallet should have paid some amount
// of network fees.
if txDetail.Value >= -outputAmt {
fmt.Println(spew.Sdump(txDetail))
@ -1477,7 +1477,7 @@ var walletTests = []walletTestCase{
},
{
name: "test cancel non-existent reservation",
test: testCancelNonExistantReservation,
test: testCancelNonExistentReservation,
},
{
name: "reorg wallet balance",
@ -1573,7 +1573,7 @@ func waitForWalletSync(r *rpctest.Harness, w *lnwallet.LightningWallet) error {
}
// TestInterfaces tests all registered interfaces with a unified set of tests
// which excersie each of the required methods found within the WalletController
// which exercise each of the required methods found within the WalletController
// interface.
//
// NOTE: In the future, when additional implementations of the WalletController
@ -1590,7 +1590,7 @@ func TestLightningWallet(t *testing.T) {
// Initialize the harness around a btcd node which will serve as our
// dedicated miner to generate blocks, cause re-orgs, etc. We'll set
// up this node with a chain length of 125, so we have plentyyy of BTC
// up this node with a chain length of 125, so we have plenty of BTC
// to play around with.
miningNode, err := rpctest.New(netParams, nil, nil)
if err != nil {

10
lnwallet/script_utils.go

@ -400,7 +400,7 @@ func senderHtlcSpendTimeout(receiverSig []byte, signer Signer,
// OP_CHECKSIG
// OP_ENDIF
// OP_ENDIF
func receiverHTLCScript(cltvExipiry uint32, senderHtlcKey,
func receiverHTLCScript(cltvExpiry uint32, senderHtlcKey,
receiverHtlcKey, revocationKey *btcec.PublicKey,
paymentHash []byte) ([]byte, error) {
@ -477,7 +477,7 @@ func receiverHTLCScript(cltvExipiry uint32, senderHtlcKey,
// lock-time required to timeout the HTLC. If the time has passed, then
// we'll proceed with a checksig to ensure that this is actually the
// sender of he original HTLC.
builder.AddInt64(int64(cltvExipiry))
builder.AddInt64(int64(cltvExpiry))
builder.AddOp(txscript.OP_CHECKLOCKTIMEVERIFY)
builder.AddOp(txscript.OP_DROP)
builder.AddOp(txscript.OP_CHECKSIG)
@ -574,7 +574,7 @@ func ReceiverHtlcSpendRevoke(signer Signer, signDesc *SignDescriptor,
// an HTLC to recover the pending funds after an absolute timeout in the
// scenario that the receiver of the HTLC broadcasts their version of the
// commitment transaction. If the caller has already set the lock time on the
// spending transaction, than a value of -1 can be passed for the cltvExipiry
// spending transaction, than a value of -1 can be passed for the cltvExpiry
// value.
//
// NOTE: The target input of the passed transaction MUST NOT have a final
@ -665,7 +665,7 @@ func createHtlcTimeoutTx(htlcOutput wire.OutPoint, htlcAmt btcutil.Amount,
return timeoutTx, nil
}
// createHtlcSuccessTx creats a transaction that spends the output on the
// createHtlcSuccessTx creates a transaction that spends the output on the
// commitment transaction of the peer that receives an HTLC. This transaction
// essentially acts as an off-chain covenant as it's only permitted to spend
// the designated HTLC output, and also that spend can _only_ be used as a
@ -1305,7 +1305,7 @@ func GetStateNumHint(commitTx *wire.MsgTx, obfuscator [StateHintSize]byte) uint6
stateNumXor := uint64(commitTx.TxIn[0].Sequence&0xFFFFFF) << 24
stateNumXor |= uint64(commitTx.LockTime & 0xFFFFFF)
// Finally, to obtain the final state number, we XOR by the obfuscater
// Finally, to obtain the final state number, we XOR by the obfuscator
// value to de-obfuscate the state number.
return stateNumXor ^ xorInt
}

14
lnwallet/script_utils_test.go

@ -181,18 +181,18 @@ func TestCommitmentSpendValidation(t *testing.T) {
// Finally, we test bob sweeping his output as normal in the case that
// Alice broadcasts this commitment transaction.
bobScriptp2wkh, err := commitScriptUnencumbered(bobPayKey)
bobScriptP2WKH, err := commitScriptUnencumbered(bobPayKey)
if err != nil {
t.Fatalf("unable to create bob p2wkh script: %v", err)
}
signDesc = &SignDescriptor{
PubKey: bobKeyPub,
SingleTweak: bobCommitTweak,
WitnessScript: bobScriptp2wkh,
WitnessScript: bobScriptP2WKH,
SigHashes: txscript.NewTxSigHashes(sweepTx),
Output: &wire.TxOut{
Value: int64(channelBalance),
PkScript: bobScriptp2wkh,
PkScript: bobScriptP2WKH,
},
HashType: txscript.SigHashAll,
InputIndex: 0,
@ -391,7 +391,7 @@ func TestHTLCSenderSpendValidation(t *testing.T) {
aliceSigner := &mockSigner{privkeys: []*btcec.PrivateKey{aliceKeyPriv}}
// We'll also generate a signature on the sweep transaction above
// that'll act as Bob's signature to Alice for the second level HTLC
// that will act as Bob's signature to Alice for the second level HTLC
// transaction.
bobSignDesc := SignDescriptor{
PubKey: bobKeyPub,
@ -452,7 +452,7 @@ func TestHTLCSenderSpendValidation(t *testing.T) {
},
{
// HTLC with valid preimage size + sig
// TODO(roabeef): invalid preimage
// TODO(roasbeef): invalid preimage
makeWitnessTestCase(t, func() (wire.TxWitness, error) {
signDesc := &SignDescriptor{
PubKey: bobKeyPub,
@ -636,7 +636,7 @@ func TestHTLCReceiverSpendValidation(t *testing.T) {
aliceSigner := &mockSigner{privkeys: []*btcec.PrivateKey{aliceKeyPriv}}
// We'll also generate a signature on the sweep transaction above
// that'll act as Alice's signature to Bob for the second level HTLC
// that will act as Alice's signature to Bob for the second level HTLC
// transaction.
aliceSignDesc := SignDescriptor{
PubKey: aliceKeyPub,
@ -863,7 +863,7 @@ func TestSecondLevelHtlcSpends(t *testing.T) {
Value: int64(htlcAmt),
}
// TODO(roasbeef): make actually use timeout/sucess txns?
// TODO(roasbeef): make actually use timeout/success txns?
// Finally, we'll create mock signers for both of them based on their
// private keys. This test simplifies a bit and uses the same key as

2
lnwallet/sigpool.go

@ -165,7 +165,7 @@ func (s *sigPool) Stop() error {
return nil
}
// poolWorker is the main worker goroutine wtihin the sigPool. Individual
// poolWorker is the main worker goroutine within the sigPool. Individual
// batches are distributed amongst each of the active workers. The workers then
// execute the task based on the type of job, and return the result back to
// caller.

8
lnwallet/wallet.go

@ -184,7 +184,7 @@ type addCounterPartySigsMsg struct {
// https://github.com/bitcoin/bips/blob/master/bip-0069.mediawiki.
theirFundingInputScripts []*InputScript
// This should be 1/2 of the signatures needed to succesfully spend our
// This should be 1/2 of the signatures needed to successfully spend our
// version of the commitment transaction.
theirCommitmentSig []byte
@ -210,7 +210,7 @@ type addSingleFunderSigsMsg struct {
fundingOutpoint *wire.OutPoint
// theirCommitmentSig are the 1/2 of the signatures needed to
// succesfully spend our version of the commitment transaction.
// successfully spend our version of the commitment transaction.
theirCommitmentSig []byte
// This channel is used to return the completed channel after the wallet
@ -1366,7 +1366,7 @@ func (l *LightningWallet) deriveMasterRevocationRoot() (*btcec.PrivateKey, error
}
// DeriveStateHintObfuscator derives the bytes to be used for obfuscating the
// state hints from the root to be used for a new channel. The obsfucsator is
// state hints from the root to be used for a new channel. The obfuscator is
// generated via the following computation:
//
// * sha256(initiatorKey || responderKey)[26:]
@ -1386,7 +1386,7 @@ func DeriveStateHintObfuscator(key1, key2 *btcec.PublicKey) [StateHintSize]byte
return obfuscator
}
// initStateHints properly sets the obsfucated state hints on both commitment
// initStateHints properly sets the obfuscated state hints on both commitment
// transactions using the passed obfuscator.
func initStateHints(commit1, commit2 *wire.MsgTx,
obfuscator [StateHintSize]byte) error {

4
lnwallet/witnessgen.go

@ -29,7 +29,7 @@ const (
// HtlcOfferedRevoke is a witness that allows us to sweep an HTLC which
// we offered to the remote party in the case that they broadcast a
// revoked commitmetn state.
// revoked commitment state.
HtlcOfferedRevoke WitnessType = 3
// HtlcAcceptedRevoke is a witness that allows us to sweep an HTLC
@ -46,7 +46,7 @@ const (
// HtlcAcceptedSuccessSecondLevel is a witness that allows us to sweep
// an HTLC output that was offered to us, and for which we have a
// payment preimage. This HTLC output isn't diretly on our commitment
// payment preimage. This HTLC output isn't directly on our commitment
// transaction, but is the result of confirmed second-level HTLC
// transaction. As a result, we can only spend this after a CSV delay.
HtlcAcceptedSuccessSecondLevel WitnessType = 6

2
lnwire/commit_sig.go

@ -10,7 +10,7 @@ import (
// receiver's pending set into a new commitment state. Implicitly, the new
// commitment transaction constructed which has been signed by CommitSig
// includes all HTLC's in the remote node's pending set. A CommitSig message
// may be sent after a series of UpdateAddHTLC/UpdateFufillHTLC messages in
// may be sent after a series of UpdateAddHTLC/UpdateFulfillHTLC messages in
// order to batch add several HTLC's with a single signature covering all
// implicitly accepted HTLC's.
type CommitSig struct {

2
lnwire/features_test.go

@ -84,7 +84,7 @@ func TestFeatureVectorSetUnset(t *testing.T) {
for j, expectedSet := range test.expectedFeatures {
if fv.HasFeature(FeatureBit(j)) != expectedSet {
t.Errorf("Expection failed in case %d, bit %d", i, j)
t.Errorf("Expectation failed in case %d, bit %d", i, j)
break
}
}

4
lnwire/lnwire_test.go

@ -563,8 +563,8 @@ func TestLightningWireProtocol(t *testing.T) {
},
},
{
msgType: MsgUpdateFufillHTLC,
scenario: func(m UpdateFufillHTLC) bool {
msgType: MsgUpdateFulfillHTLC,
scenario: func(m UpdateFulfillHTLC) bool {
return mainScenario(&m)
},
},

12
lnwire/message.go

@ -19,7 +19,7 @@ const MaxMessagePayload = 65535 // 65KB
// MessageType is the unique 2 byte big-endian integer that indicates the type
// of message on the wire. All messages have a very simple header which
// consists simply of 2-byte message type. We omit a length field, and checksum
// as the Lighting Protocol is intended to be encapsulated within a
// as the Lightning Protocol is intended to be encapsulated within a
// confidential+authenticated cryptographic messaging protocol.
type MessageType uint16
@ -38,7 +38,7 @@ const (
MsgShutdown = 38
MsgClosingSigned = 39
MsgUpdateAddHTLC = 128
MsgUpdateFufillHTLC = 130
MsgUpdateFulfillHTLC = 130
MsgUpdateFailHTLC = 131
MsgCommitSig = 132
MsgRevokeAndAck = 133
@ -74,8 +74,8 @@ func (t MessageType) String() string {
return "UpdateAddHTLC"
case MsgUpdateFailHTLC:
return "UpdateFailHTLC"
case MsgUpdateFufillHTLC:
return "UpdateFufillHTLC"
case MsgUpdateFulfillHTLC:
return "UpdateFulfillHTLC"
case MsgCommitSig:
return "CommitSig"
case MsgRevokeAndAck:
@ -165,8 +165,8 @@ func makeEmptyMessage(msgType MessageType) (Message, error) {
msg = &UpdateAddHTLC{}
case MsgUpdateFailHTLC:
msg = &UpdateFailHTLC{}
case MsgUpdateFufillHTLC:
msg = &UpdateFufillHTLC{}
case MsgUpdateFulfillHTLC:
msg = &UpdateFulfillHTLC{}
case MsgCommitSig:
msg = &CommitSig{}
case MsgRevokeAndAck:

2
lnwire/netaddress.go

@ -46,7 +46,7 @@ func (n *NetAddress) String() string {
return fmt.Sprintf("%x@%v", pubkey, n.Address)
}
// Network returns the name of the network this address is binded to.
// Network returns the name of the network this address is bound to.
//
// This part of the net.Addr interface.
func (n *NetAddress) Network() string {

4
lnwire/onion_error.go

@ -26,11 +26,11 @@ type FailureMessage interface {
}
// failureMessageLength is the size of the failure message plus the size of
// padding. The FailureMessage message should always be EXACLTY this size.
// padding. The FailureMessage message should always be EXACTLY this size.
const failureMessageLength = 256
const (
// FlagBadOnion error flag describes an unparseable, encrypted by
// FlagBadOnion error flag describes an unparsable, encrypted by
// previous node.
FlagBadOnion FailCode = 0x8000

2
lnwire/signature.go

@ -17,7 +17,7 @@ func SerializeSigToWire(b *[64]byte, e *btcec.Signature) error {
// 0x30 <length> 0x02 <length r> r 0x02 <length s> s
// which means the length of R is the 4th byte and the length of S
// is the second byte after R ends. 0x02 signifies a length-prefixed,
// zero-padded, big-endian bigint. 0x30 sigifies a DER signature.
// zero-padded, big-endian bigint. 0x30 signifies a DER signature.
// See the Serialize() method for btcec.Signature for details.
rLen := sig[3]
sLen := sig[5+rLen]

32
lnwire/update_fulfill_htlc.go

@ -2,12 +2,12 @@ package lnwire
import "io"
// UpdateFufillHTLC is sent by Alice to Bob when she wishes to settle a
// UpdateFulfillHTLC is sent by Alice to Bob when she wishes to settle a
// particular HTLC referenced by its HTLCKey within a specific active channel
// referenced by ChannelPoint. A subsequent CommitSig message will be sent by
// Alice to "lock-in" the removal of the specified HTLC, possible containing a
// batch signature covering several settled HTLC's.
type UpdateFufillHTLC struct {
type UpdateFulfillHTLC struct {
// ChanID references an active channel which holds the HTLC to be
// settled.
ChanID ChannelID
@ -21,26 +21,26 @@ type UpdateFufillHTLC struct {
PaymentPreimage [32]byte
}
// NewUpdateFufillHTLC returns a new empty UpdateFufillHTLC.
func NewUpdateFufillHTLC(chanID ChannelID, id uint64,
preimage [32]byte) *UpdateFufillHTLC {
// NewUpdateFulfillHTLC returns a new empty UpdateFulfillHTLC.
func NewUpdateFulfillHTLC(chanID ChannelID, id uint64,
preimage [32]byte) *UpdateFulfillHTLC {
return &UpdateFufillHTLC{
return &UpdateFulfillHTLC{
ChanID: chanID,
ID: id,
PaymentPreimage: preimage,
}
}
// A compile time check to ensure UpdateFufillHTLC implements the lnwire.Message
// A compile time check to ensure UpdateFulfillHTLC implements the lnwire.Message
// interface.
var _ Message = (*UpdateFufillHTLC)(nil)
var _ Message = (*UpdateFulfillHTLC)(nil)
// Decode deserializes a serialized UpdateFufillHTLC message stored in the passed
// Decode deserializes a serialized UpdateFulfillHTLC message stored in the passed
// io.Reader observing the specified protocol version.
//
// This is part of the lnwire.Message interface.
func (c *UpdateFufillHTLC) Decode(r io.Reader, pver uint32) error {
func (c *UpdateFulfillHTLC) Decode(r io.Reader, pver uint32) error {
return readElements(r,
&c.ChanID,
&c.ID,
@ -48,11 +48,11 @@ func (c *UpdateFufillHTLC) Decode(r io.Reader, pver uint32) error {
)
}
// Encode serializes the target UpdateFufillHTLC into the passed io.Writer
// Encode serializes the target UpdateFulfillHTLC into the passed io.Writer
// observing the protocol version specified.
//
// This is part of the lnwire.Message interface.
func (c *UpdateFufillHTLC) Encode(w io.Writer, pver uint32) error {
func (c *UpdateFulfillHTLC) Encode(w io.Writer, pver uint32) error {
return writeElements(w,
c.ChanID,
c.ID,
@ -64,15 +64,15 @@ func (c *UpdateFufillHTLC) Encode(w io.Writer, pver uint32) error {
// wire.
//
// This is part of the lnwire.Message interface.
func (c *UpdateFufillHTLC) MsgType() MessageType {
return MsgUpdateFufillHTLC
func (c *UpdateFulfillHTLC) MsgType() MessageType {
return MsgUpdateFulfillHTLC
}
// MaxPayloadLength returns the maximum allowed payload size for a UpdateFufillHTLC
// MaxPayloadLength returns the maximum allowed payload size for a UpdateFulfillHTLC
// complete message observing the specified protocol version.
//
// This is part of the lnwire.Message interface.
func (c *UpdateFufillHTLC) MaxPayloadLength(uint32) uint32 {
func (c *UpdateFulfillHTLC) MaxPayloadLength(uint32) uint32 {
// 32 + 8 + 32
return 72
}

2
log.go

@ -32,7 +32,7 @@ func (logWriter) Write(p []byte) (n int, err error) {
return len(p), nil
}
// Loggers per subsystem. A single backend logger is created and all subsytem
// Loggers per subsystem. A single backend logger is created and all subsystem
// loggers created from it will write to the backend. When adding new
// subsystems, add the subsystem logger variable here and to the
// subsystemLoggers map.

6
nursery_store.go

@ -221,7 +221,7 @@ var (
// kndrPrefix is the state prefix given to all CSV delayed outputs,
// either from the commitment transaction, or a stage-one htlc
// transaction, whose maturity height has solidified. Outputs marked in
// this state are in their final stage of incubation withn the nursery,
// this state are in their final stage of incubation within the nursery,
// and will be swept into the wallet after waiting out the relative
// timelock.
kndrPrefix = []byte("kndr")
@ -1355,7 +1355,7 @@ func (ns *nurseryStore) getLastFinalizedHeight(tx *bolt.Tx) (uint32, error) {
return byteOrder.Uint32(heightBytes), nil
}
// finalizeKinder records a finalized kingergarten sweep txn to the given height
// finalizeKinder records a finalized kindergarten sweep txn to the given height
// bucket. It also updates the nursery store's last finalized height, so that we
// do not finalize the same height twice. If the finalized txn is nil, i.e. if
// the height has no kindergarten outputs, the height will be marked as
@ -1463,7 +1463,7 @@ func (ns *nurseryStore) putLastGraduatedHeight(tx *bolt.Tx, height uint32) error
return err
}
// Serialize the provided last-gradauted height, and store it in the
// Serialize the provided last-graduated height, and store it in the
// top-level chain bucket for this nursery store.
var lastHeightBytes [4]byte
byteOrder.PutUint32(lastHeightBytes[:], height)

2
nursery_store_test.go

@ -341,7 +341,7 @@ func TestNurseryStoreIncubate(t *testing.T) {
}
// TestNurseryStoreFinalize tests that kindergarten sweep transactions are
// properly persistted, and that the last finalized height is being set
// properly persisted, and that the last finalized height is being set
// accordingly.
func TestNurseryStoreFinalize(t *testing.T) {
cdb, cleanUp, err := makeTestDB()

24
peer.go

@ -45,10 +45,10 @@ const (
outgoingQueueLen = 50
)
// outgoinMsg packages an lnwire.Message to be sent out on the wire, along with
// outgoingMsg packages an lnwire.Message to be sent out on the wire, along with
// a buffered channel which will be sent upon once the write is complete. This
// buffered channel acts as a semaphore to be used for synchronization purposes.
type outgoinMsg struct {
type outgoingMsg struct {
msg lnwire.Message
errChan chan error // MUST be buffered.
}
@ -116,11 +116,11 @@ type peer struct {
// sendQueue is the channel which is used to queue outgoing to be
// written onto the wire. Note that this channel is unbuffered.
sendQueue chan outgoinMsg
sendQueue chan outgoingMsg
// outgoingQueue is a buffered channel which allows second/third party
// objects to queue messages to be sent out on the wire.
outgoingQueue chan outgoinMsg
outgoingQueue chan outgoingMsg
// activeChannels is a map which stores the state machines of all
// active channels. Channels are indexed into the map by the txid of
@ -187,8 +187,8 @@ func newPeer(conn net.Conn, connReq *connmgr.ConnReq, server *server,
localFeatures: localFeatures,
sendQueue: make(chan outgoinMsg),
outgoingQueue: make(chan outgoinMsg),
sendQueue: make(chan outgoingMsg),
outgoingQueue: make(chan outgoingMsg),
activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
newChannels: make(chan *newChannelMsg, 1),
@ -748,7 +748,7 @@ out:
case *lnwire.UpdateAddHTLC:
isChanUpdate = true
targetChan = msg.ChanID
case *lnwire.UpdateFufillHTLC:
case *lnwire.UpdateFulfillHTLC:
isChanUpdate = true
targetChan = msg.ChanID
case *lnwire.UpdateFailMalformedHTLC:
@ -864,7 +864,7 @@ func messageSummary(msg lnwire.Message) string {
return fmt.Sprintf("chan_id=%v, id=%v, reason=%x", msg.ChanID,
msg.ID, msg.Reason)
case *lnwire.UpdateFufillHTLC:
case *lnwire.UpdateFulfillHTLC:
return fmt.Sprintf("chan_id=%v, id=%v, pre_image=%x",
msg.ChanID, msg.ID, msg.PaymentPreimage[:])
@ -1092,7 +1092,7 @@ func (p *peer) queueHandler() {
// writeHandler cannot accept messages on the
// sendQueue.
select {
case p.sendQueue <- elem.Value.(outgoinMsg):
case p.sendQueue <- elem.Value.(outgoingMsg):
pendingMsgs.Remove(elem)
case msg := <-p.outgoingQueue:
pendingMsgs.PushBack(msg)
@ -1149,7 +1149,7 @@ func (p *peer) PingTime() int64 {
// nil otherwise.
func (p *peer) queueMsg(msg lnwire.Message, errChan chan error) {
select {
case p.outgoingQueue <- outgoinMsg{msg, errChan}:
case p.outgoingQueue <- outgoingMsg{msg, errChan}:
case <-p.quit:
peerLog.Tracef("Peer shutting down, could not enqueue msg.")
if errChan != nil {
@ -1437,7 +1437,7 @@ func (p *peer) fetchActiveChanCloser(chanID lnwire.ChannelID) (*channelCloser, e
// cooperative channel closure transaction from the chain arb.
// Wtih this context, we'll ensure that we're able to respond
// if *any* of the transactions we sign off on are ever
// braodacast.
// broadcast.
closeCtx, err := p.server.chainArb.BeginCoopChanClose(
*channel.ChannelPoint(),
)
@ -1503,7 +1503,7 @@ func (p *peer) handleLocalCloseReq(req *htlcswitch.ChanClose) {
// cooperative channel closure transaction from the chain arb.
// Wtih this context, we'll ensure that we're able to respond
// if *any* of the transactions we sign off on are ever
// braodacast.
// broadcast.
closeCtx, err := p.server.chainArb.BeginCoopChanClose(
*channel.ChannelPoint(),
)

2
peer_test.go

@ -322,7 +322,7 @@ func TestPeerChannelClosureFeeNegotiationsResponder(t *testing.T) {
}
// The fee sent by the responder should be less than the fee we just
// sent as it should attempt to comrpomise.
// sent as it should attempt to compromise.
peerFee := responderClosingSigned.FeeSatoshis
if peerFee > increasedFee {
t.Fatalf("new fee should be less than our fee: new=%v, "+

4
routing/chainview/bitcoind.go

@ -228,7 +228,7 @@ func (b *BitcoindFilteredChainView) FilterBlock(blockHash *chainhash.Hash) (*Fil
}
// chainFilterer is the primary goroutine which: listens for new blocks coming
// and dispatches the relevent FilteredBlock notifications, updates the filter
// and dispatches the relevant FilteredBlock notifications, updates the filter
// due to requests by callers, and finally is able to preform targeted block
// filtration.
//
@ -236,7 +236,7 @@ func (b *BitcoindFilteredChainView) FilterBlock(blockHash *chainhash.Hash) (*Fil
func (b *BitcoindFilteredChainView) chainFilterer() {
defer b.wg.Done()
// filterBlock is a helper funciton that scans the given block, and
// filterBlock is a helper function that scans the given block, and
// notes which transactions spend outputs which are currently being
// watched. Additionally, the chain filter will also be updated by
// removing any spent outputs.

4
routing/chainview/btcd.go

@ -241,7 +241,7 @@ func (b *BtcdFilteredChainView) FilterBlock(blockHash *chainhash.Hash) (*Filtere
}
// chainFilterer is the primary goroutine which: listens for new blocks coming
// and dispatches the relevent FilteredBlock notifications, updates the filter
// and dispatches the relevant FilteredBlock notifications, updates the filter
// due to requests by callers, and finally is able to preform targeted block
// filtration.
//
@ -249,7 +249,7 @@ func (b *BtcdFilteredChainView) FilterBlock(blockHash *chainhash.Hash) (*Filtere
func (b *BtcdFilteredChainView) chainFilterer() {
defer b.wg.Done()
// filterBlock is a helper funciton that scans the given block, and
// filterBlock is a helper function that scans the given block, and
// notes which transactions spend outputs which are currently being
// watched. Additionally, the chain filter will also be updated by
// removing any spent outputs.

2
routing/chainview/interface.go

@ -7,7 +7,7 @@ import (
// FilteredChainView represents a subscription to a certain subset of of the
// UTXO set for a particular chain. This interface is useful from the point of
// view of maintaining an up-to-date channel graph for the Lighting Network.
// view of maintaining an up-to-date channel graph for the Lightning Network.
// The subset of the UTXO to be subscribed is that of all the currently opened
// channels. Each time a channel is closed (the output is spent), a
// notification is to be sent allowing the graph to be pruned.

6
routing/chainview/interface_test.go

@ -371,7 +371,7 @@ func testUpdateFilterBackTrack(node *rpctest.Harness,
t.Fatalf("unable to generate block: %v", err)
}
// We should've received another empty filtered block notification.
// We should have received another empty filtered block notification.
select {
case filteredBlock := <-blockChan:
assertFilteredBlock(t, filteredBlock, currentHeight+1,
@ -606,7 +606,7 @@ func testFilterBlockDisconnected(node *rpctest.Harness,
}
expectedHeight := uint32(oldHeight - i)
if block.Height != expectedHeight {
t.Fatalf("expected to receive disconencted "+
t.Fatalf("expected to receive disconnected "+
"block at height %d, instead got at %d",
expectedHeight, block.Height)
}
@ -878,7 +878,7 @@ var interfaceImpls = []struct {
func TestFilteredChainView(t *testing.T) {
// Initialize the harness around a btcd node which will serve as our
// dedicated miner to generate blocks, cause re-orgs, etc. We'll set up
// this node with a chain length of 125, so we have plentyyy of BTC to
// this node with a chain length of 125, so we have plenty of BTC to
// play around with.
miner, err := rpctest.New(netParams, nil, nil)
if err != nil {

2
routing/chainview/neutrino.go

@ -17,7 +17,7 @@ import (
// CfFilteredChainView is an implementation of the FilteredChainView interface
// which is supported by an underlying Bitcoin light client which supports
// client side filtering of Golomb Coded Sets. Rather than fetching all the
// blocks, the light client is able to query fitlers locally, to test if an
// blocks, the light client is able to query filters locally, to test if an
// item in a block modifies any of our watched set of UTXOs.
type CfFilteredChainView struct {
started int32

2
routing/chainview/queue.go

@ -39,7 +39,7 @@ type blockEventQueue struct {
// will receive connected/new blocks from the FilteredChainView.
newBlocks chan *FilteredBlock
// stleBlocks is the channel where the consumer of the queue will
// staleBlocks is the channel where the consumer of the queue will
// receive disconnected/stale blocks from the FilteredChainView.
staleBlocks chan *FilteredBlock

2
routing/missioncontrol.go

@ -92,7 +92,7 @@ type graphPruneView struct {
// consulted during path finding. If a vertex/edge is found within the returned
// prune view, it is to be ignored as a goroutine has had issues routing
// through it successfully. Within this method the main view of the
// missionControl is garbage collected as entires are detected to be "stale".
// missionControl is garbage collected as entries are detected to be "stale".
func (m *missionControl) GraphPruneView() graphPruneView {
// First, we'll grab the current time, this value will be used to
// determine if an entry is stale or not.

2
routing/notifications.go

@ -135,7 +135,7 @@ func (r *ChannelRouter) notifyTopologyChange(topologyDiff *TopologyChange) {
// directly to the upstream client consumer.
case c.ntfnChan <- topologyDiff:
// If the client cancel's the notifications, then we'll
// If the client cancels the notifications, then we'll
// exit early.
case <-c.exit:

4
routing/notifications_test.go

@ -347,7 +347,7 @@ func TestEdgeUpdateNotification(t *testing.T) {
bitcoinKey1.SerializeCompressed(), bitcoinKey2.SerializeCompressed(),
chanValue, 0)
if err != nil {
t.Fatalf("unbale create channel edge: %v", err)
t.Fatalf("unable create channel edge: %v", err)
}
// We'll also add a record for the block that included our funding
@ -871,7 +871,7 @@ func TestChannelCloseNotification(t *testing.T) {
if len(closedChans) == 0 {
t.Fatal("close channel ntfn not populated")
} else if len(closedChans) != 1 {
t.Fatalf("only one should've been detected as closed, "+
t.Fatalf("only one should have been detected as closed, "+
"instead %v were", len(closedChans))
}

18
routing/pathfind_test.go

@ -120,7 +120,7 @@ func makeTestGraph() (*channeldb.ChannelGraph, func(), error) {
}
// aliasMap is a map from a node's alias to its public key. This type is
// provided in order to allow easily look up from the human rememberable alias
// provided in order to allow easily look up from the human memorable alias
// to an exact node's public key.
type aliasMap map[string]*btcec.PublicKey
@ -498,9 +498,9 @@ func TestKShortestPathFinding(t *testing.T) {
t.Fatalf("unable to fetch source node: %v", err)
}
// In this test we'd like to ensure that our algoirthm to find the
// In this test we'd like to ensure that our algorithm to find the
// k-shortest paths from a given source node to any destination node
// works as exepcted.
// works as expected.
// In our basic_graph.json, there exist two paths from roasbeef to luo
// ji. Our algorithm should properly find both paths, and also rank
@ -514,13 +514,13 @@ func TestKShortestPathFinding(t *testing.T) {
"luo ji: %v", err)
}
// The algorithm should've found two paths from roasbeef to luo ji.
// The algorithm should have found two paths from roasbeef to luo ji.
if len(paths) != 2 {
t.Fatalf("two path shouldn't been found, instead %v were",
len(paths))
}
// Additinoally, the total hop length of the first path returned should
// Additionally, the total hop length of the first path returned should
// be _less_ than that of the second path returned.
if len(paths[0]) > len(paths[1]) {
t.Fatalf("paths found not ordered properly")
@ -566,7 +566,7 @@ func TestNewRoutePathTooLong(t *testing.T) {
paymentAmt := lnwire.NewMSatFromSatoshis(100)
// We start by confirminig that routing a payment 20 hops away is possible.
// We start by confirming that routing a payment 20 hops away is possible.
// Alice should be able to find a valid route to ursula.
target := aliases["ursula"]
_, err = findPath(nil, graph, sourceNode, target, ignoredVertexes,
@ -705,7 +705,7 @@ func TestRouteFailDisabledEdge(t *testing.T) {
ignoredVertexes := make(map[Vertex]struct{})
// First, we'll try to route from roasbeef -> songoku. This should
// suceed without issue, and return a single path.
// succeed without issue, and return a single path.
target := aliases["songoku"]
payAmt := lnwire.NewMSatFromSatoshis(10000)
_, err = findPath(nil, graph, sourceNode, target, ignoredVertexes,
@ -726,7 +726,7 @@ func TestRouteFailDisabledEdge(t *testing.T) {
}
// Now, if we attempt to route through that edge, we should get a
// failure as it is no longer elligble.
// failure as it is no longer eligible.
_, err = findPath(nil, graph, sourceNode, target, ignoredVertexes,
ignoredEdges, payAmt)
if !IsError(err, ErrNoPathFound) {
@ -792,7 +792,7 @@ func TestPathFindSpecExample(t *testing.T) {
// Now we'll examine the first route returned for correctness.
//
// It should be sending the exact payment amount as there're no
// It should be sending the exact payment amount as there are no
// additional hops.
firstRoute := routes[0]
if firstRoute.TotalAmount != amt {

18
routing/router.go

@ -79,7 +79,7 @@ type ChannelGraphSource interface {
e1, e2 *channeldb.ChannelEdgePolicy) error) error
}
// FeeSchema is the set fee configuration for a Lighting Node on the network.
// FeeSchema is the set fee configuration for a Lightning Node on the network.
// Using the coefficients described within the schema, the required fee to
// forward outgoing payments can be derived.
type FeeSchema struct {
@ -96,7 +96,7 @@ type FeeSchema struct {
}
// ChannelPolicy holds the parameters that determine the policy we enforce
// when fowarding payments on a channel. These parameters are communicated
// when forwarding payments on a channel. These parameters are communicated
// to the rest of the network in ChannelUpdate messages.
type ChannelPolicy struct {
// FeeSchema holds the fee configuration for a channel.
@ -471,7 +471,7 @@ func (r *ChannelRouter) syncGraphWithChain() error {
return err
}
// We're only interested in all prior outputs that've been
// We're only interested in all prior outputs that have been
// spent in the block, so collate all the referenced previous
// outpoints within each tx and input.
var spentOutputs []*wire.OutPoint
@ -529,7 +529,7 @@ func (r *ChannelRouter) networkHandler() {
// We'll set up any dependants, and wait until a free
// slot for this job opens up, this allow us to not
// have thousands of goroutines active.
validationBarrier.InitJobDependancies(updateMsg.msg)
validationBarrier.InitJobDependencies(updateMsg.msg)
go func() {
defer validationBarrier.CompleteJob()
@ -632,7 +632,7 @@ func (r *ChannelRouter) networkHandler() {
log.Infof("Pruning channel graph using block %v (height=%v)",
chainUpdate.Hash, blockHeight)
// We're only interested in all prior outputs that've
// We're only interested in all prior outputs that have
// been spent in the block, so collate all the
// referenced previous outpoints within each tx and
// input.
@ -730,7 +730,7 @@ func (r *ChannelRouter) networkHandler() {
// We'll ensure that we don't attempt to prune
// our *own* channels from the graph, as in any
// case this shuold be re-advertised by the
// case this should be re-advertised by the
// sub-system above us.
if info.NodeKey1.IsEqual(r.selfNode.PubKey) ||
info.NodeKey2.IsEqual(r.selfNode.PubKey) {
@ -1572,7 +1572,7 @@ func (r *ChannelRouter) SendPayment(payment *LightningPayment) ([32]byte, *Route
// As this error indicates that the target
// channel was unable to carry this HTLC (for
// w/e reason), we'll query the index to find
// the _outgoign_ channel the source of the
// the _outgoing_ channel the source of the
// error was meant to pass the HTLC along to.
badChan, ok := route.nextHopChannel(errSource)
if !ok {
@ -1648,7 +1648,7 @@ func (r *ChannelRouter) SendPayment(payment *LightningPayment) ([32]byte, *Route
// As this error indicates that the target
// channel was unable to carry this HTLC (for
// w/e reason), we'll query the index to find
// the _outgoign_ channel the source of the
// the _outgoing_ channel the source of the
// error was meant to pass the HTLC along to.
badChan, ok := route.nextHopChannel(errSource)
if !ok {
@ -1671,7 +1671,7 @@ func (r *ChannelRouter) SendPayment(payment *LightningPayment) ([32]byte, *Route
continue
case *lnwire.FailPermanentNodeFailure:
// TODO(rosabeef): remove node from path
// TODO(roasbeef): remove node from path
continue
default:

60
routing/validation_barrier.go

@ -26,17 +26,17 @@ type ValidationBarrier struct {
// dependants.
chanAnnFinSignal map[lnwire.ShortChannelID]chan struct{}
// chanEdgeDependancies tracks any channel edge updates which should
// chanEdgeDependencies tracks any channel edge updates which should
// wait until the completion of the ChannelAnnouncement before
// proceeding. This is a dependency, as we can't validate the update
// before we validate the announcement which creates the channel
// itself.
chanEdgeDependancies map[lnwire.ShortChannelID]chan struct{}
chanEdgeDependencies map[lnwire.ShortChannelID]chan struct{}
// nodeAnnDependancies tracks any pending NodeAnnouncement validation
// nodeAnnDependencies tracks any pending NodeAnnouncement validation
// jobs which should wait until the completion of the
// ChannelAnnouncement before proceeding.
nodeAnnDependancies map[Vertex]chan struct{}
nodeAnnDependencies map[Vertex]chan struct{}
quit chan struct{}
sync.Mutex
@ -50,8 +50,8 @@ func NewValidationBarrier(numActiveReqs int,
v := &ValidationBarrier{
chanAnnFinSignal: make(map[lnwire.ShortChannelID]chan struct{}),
chanEdgeDependancies: make(map[lnwire.ShortChannelID]chan struct{}),
nodeAnnDependancies: make(map[Vertex]chan struct{}),
chanEdgeDependencies: make(map[lnwire.ShortChannelID]chan struct{}),
nodeAnnDependencies: make(map[Vertex]chan struct{}),
quit: quitChan,
}
@ -65,9 +65,9 @@ func NewValidationBarrier(numActiveReqs int,
return v
}
// InitJobDependancies will wait for a new job slot to become open, and then
// sets up any dependant signals/trigger for the new job
func (v *ValidationBarrier) InitJobDependancies(job interface{}) {
// InitJobDependencies will wait for a new job slot to become open, and then
// sets up any dependent signals/trigger for the new job
func (v *ValidationBarrier) InitJobDependencies(job interface{}) {
// We'll wait for either a new slot to become open, or for the quit
// channel to be closed.
select {
@ -79,7 +79,7 @@ func (v *ValidationBarrier) InitJobDependancies(job interface{}) {
defer v.Unlock()
// Once a slot is open, we'll examine the message of the job, to see if
// there need to be any dependant barriers set up.
// there need to be any dependent barriers set up.
switch msg := job.(type) {
// If this is a channel announcement, then we'll need to set up den
@ -93,7 +93,7 @@ func (v *ValidationBarrier) InitJobDependancies(job interface{}) {
// one doesn't already exist, as there may be duplicate
// announcements. We'll close this signal once the
// ChannelAnnouncement has been validated. This will result in
// all the dependant jobs being unlocked so they can finish
// all the dependent jobs being unlocked so they can finish
// execution themselves.
if _, ok := v.chanAnnFinSignal[msg.ShortChannelID]; !ok {
// We'll create the channel that we close after we
@ -102,10 +102,10 @@ func (v *ValidationBarrier) InitJobDependancies(job interface{}) {
// at the same time.
annFinCond := make(chan struct{})
v.chanAnnFinSignal[msg.ShortChannelID] = annFinCond
v.chanEdgeDependancies[msg.ShortChannelID] = annFinCond
v.chanEdgeDependencies[msg.ShortChannelID] = annFinCond
v.nodeAnnDependancies[NewVertex(msg.NodeID1)] = annFinCond
v.nodeAnnDependancies[NewVertex(msg.NodeID2)] = annFinCond
v.nodeAnnDependencies[NewVertex(msg.NodeID1)] = annFinCond
v.nodeAnnDependencies[NewVertex(msg.NodeID2)] = annFinCond
}
case *channeldb.ChannelEdgeInfo:
@ -114,10 +114,10 @@ func (v *ValidationBarrier) InitJobDependancies(job interface{}) {
annFinCond := make(chan struct{})
v.chanAnnFinSignal[shortID] = annFinCond
v.chanEdgeDependancies[shortID] = annFinCond
v.chanEdgeDependencies[shortID] = annFinCond
v.nodeAnnDependancies[NewVertex(msg.NodeKey1)] = annFinCond
v.nodeAnnDependancies[NewVertex(msg.NodeKey2)] = annFinCond
v.nodeAnnDependencies[NewVertex(msg.NodeKey1)] = annFinCond
v.nodeAnnDependencies[NewVertex(msg.NodeKey2)] = annFinCond
}
// These other types don't have any dependants, so no further
@ -149,8 +149,8 @@ func (v *ValidationBarrier) CompleteJob() {
// WaitForDependants will block until any jobs that this job dependants on have
// finished executing. This allows us a graceful way to schedule goroutines
// based on any pending uncompleted dependant jobs. If this job doesn't have an
// active dependant, then this function will return immediately.
// based on any pending uncompleted dependent jobs. If this job doesn't have an
// active dependent, then this function will return immediately.
func (v *ValidationBarrier) WaitForDependants(job interface{}) {
var (
@ -165,15 +165,15 @@ func (v *ValidationBarrier) WaitForDependants(job interface{}) {
// completion of any active ChannelAnnouncement jobs related to them.
case *channeldb.ChannelEdgePolicy:
shortID := lnwire.NewShortChanIDFromInt(msg.ChannelID)
signal, ok = v.chanEdgeDependancies[shortID]
signal, ok = v.chanEdgeDependencies[shortID]
case *channeldb.LightningNode:
vertex := NewVertex(msg.PubKey)
signal, ok = v.nodeAnnDependancies[vertex]
signal, ok = v.nodeAnnDependencies[vertex]
case *lnwire.ChannelUpdate:
signal, ok = v.chanEdgeDependancies[msg.ShortChannelID]
signal, ok = v.chanEdgeDependencies[msg.ShortChannelID]
case *lnwire.NodeAnnouncement:
vertex := NewVertex(msg.NodeID)
signal, ok = v.nodeAnnDependancies[vertex]
signal, ok = v.nodeAnnDependencies[vertex]
// Other types of jobs can be executed immediately, so we'll just
// return directly.
@ -201,7 +201,7 @@ func (v *ValidationBarrier) WaitForDependants(job interface{}) {
}
}
// SignalDependants will signal any jobs that are dependant on this job that
// SignalDependants will signal any jobs that are dependent on this job that
// they can continue execution. If the job doesn't have any dependants, then
// this function sill exit immediately.
func (v *ValidationBarrier) SignalDependants(job interface{}) {
@ -212,7 +212,7 @@ func (v *ValidationBarrier) SignalDependants(job interface{}) {
// If we've just finished executing a ChannelAnnouncement, then we'll
// close out the signal, and remove the signal from the map of active
// ones. This will allow any dependant jobs to continue execution.
// ones. This will allow any dependent jobs to continue execution.
case *channeldb.ChannelEdgeInfo:
shortID := lnwire.NewShortChanIDFromInt(msg.ChannelID)
finSignal, ok := v.chanAnnFinSignal[shortID]
@ -227,20 +227,20 @@ func (v *ValidationBarrier) SignalDependants(job interface{}) {
delete(v.chanAnnFinSignal, msg.ShortChannelID)
}
delete(v.chanEdgeDependancies, msg.ShortChannelID)
delete(v.chanEdgeDependencies, msg.ShortChannelID)
// For all other job types, we'll delete the tracking entries from the
// map, as if we reach this point, then all dependants have already
// finished executing and we can proceed.
case *channeldb.LightningNode:
delete(v.nodeAnnDependancies, NewVertex(msg.PubKey))
delete(v.nodeAnnDependencies, NewVertex(msg.PubKey))
case *lnwire.NodeAnnouncement:
delete(v.nodeAnnDependancies, NewVertex(msg.NodeID))
delete(v.nodeAnnDependencies, NewVertex(msg.NodeID))
case *lnwire.ChannelUpdate:
delete(v.chanEdgeDependancies, msg.ShortChannelID)
delete(v.chanEdgeDependencies, msg.ShortChannelID)
case *channeldb.ChannelEdgePolicy:
shortID := lnwire.NewShortChanIDFromInt(msg.ChannelID)
delete(v.chanEdgeDependancies, shortID)
delete(v.chanEdgeDependencies, shortID)
case *lnwire.AnnounceSignatures:
return

44
rpcserver.go

@ -356,14 +356,14 @@ func (r *rpcServer) sendCoinsOnChain(paymentMap map[string]int64,
// determineFeePerByte will determine the fee in sat/byte that should be paid
// given an estimator, a confirmation target, and a manual value for sat/byte.
// A value is chosen based on the two free paramters as one, or both of them
// A value is chosen based on the two free parameters as one, or both of them
// can be zero.
func determineFeePerByte(feeEstimator lnwallet.FeeEstimator, targetConf int32,
satPerByte int64) (btcutil.Amount, error) {
switch {
// If the target number of confirmations is set, then we'll use that to
// consult our fee estimator for an adquate fee.
// consult our fee estimator for an adequate fee.
case targetConf != 0:
satPerByte, err := feeEstimator.EstimateFeePerByte(
uint32(targetConf),
@ -375,7 +375,7 @@ func determineFeePerByte(feeEstimator lnwallet.FeeEstimator, targetConf int32,
return btcutil.Amount(satPerByte), nil
// If a manual sat/byte fee rate is set, then we'll use that diretly.
// If a manual sat/byte fee rate is set, then we'll use that directly.
case satPerByte != 0:
return btcutil.Amount(satPerByte), nil
@ -397,8 +397,8 @@ func determineFeePerByte(feeEstimator lnwallet.FeeEstimator, targetConf int32,
func (r *rpcServer) SendCoins(ctx context.Context,
in *lnrpc.SendCoinsRequest) (*lnrpc.SendCoinsResponse, error) {
// Based on the passed fee related paramters, we'll determine an
// approriate fee rate for this transaction.
// Based on the passed fee related parameters, we'll determine an
// appropriate fee rate for this transaction.
feePerByte, err := determineFeePerByte(
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
)
@ -425,7 +425,7 @@ func (r *rpcServer) SendCoins(ctx context.Context,
func (r *rpcServer) SendMany(ctx context.Context,
in *lnrpc.SendManyRequest) (*lnrpc.SendManyResponse, error) {
// Based on the passed fee related paramters, we'll determine an
// Based on the passed fee related parameters, we'll determine an
// approriate fee rate for this transaction.
feePerByte, err := determineFeePerByte(
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
@ -699,9 +699,9 @@ func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest,
const minChannelSize = btcutil.Amount(6000)
// Restrict the size of the channel we'll actually open. Atm, we
// require the amount to be above 6k satoahis s we currently hard-coded
// require the amount to be above 6k satoshis we currently hard-coded
// a 5k satoshi fee in several areas. As a result 6k sat is the min
// channnel size that allows us to safely sit above the dust threshold
// channel size that allows us to safely sit above the dust threshold
// after fees are applied
// TODO(roasbeef): remove after dynamic fees are in
if localFundingAmt < minChannelSize {
@ -735,8 +735,8 @@ func (r *rpcServer) OpenChannel(in *lnrpc.OpenChannelRequest,
nodePubKeyBytes = nodePubKey.SerializeCompressed()
}
// Based on the passed fee related paramters, we'll determine an
// approriate fee rate for the funding transaction.
// Based on the passed fee related parameters, we'll determine an
// appropriate fee rate for the funding transaction.
feePerByte, err := determineFeePerByte(
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
)
@ -858,7 +858,7 @@ func (r *rpcServer) OpenChannelSync(ctx context.Context,
"initial state must be below the local funding amount")
}
// Based on the passed fee related paramters, we'll determine an
// Based on the passed fee related parameters, we'll determine an
// appropriate fee rate for the funding transaction.
feePerByte, err := determineFeePerByte(
r.server.cc.feeEstimator, in.TargetConf, in.SatPerByte,
@ -1272,7 +1272,7 @@ func (r *rpcServer) WalletBalance(ctx context.Context,
return nil, err
}
// Get uncomfirmed balance, from txs with 0 confirmations.
// Get unconfirmed balance, from txs with 0 confirmations.
unconfirmedBal := totalBal - confirmedBal
rpcsLog.Debugf("[walletbalance] Total balance=%v", totalBal)
@ -1524,7 +1524,7 @@ func (r *rpcServer) ListChannels(ctx context.Context,
localBalance := localCommit.LocalBalance
remoteBalance := localCommit.RemoteBalance
// As an artefact of our usage of mSAT internally, either party
// As an artifact of our usage of mSAT internally, either party
// may end up in a state where they're holding a fractional
// amount of satoshis which can't be expressed within the
// actual commitment output. Since we round down when going
@ -2195,7 +2195,7 @@ func createRPCInvoice(invoice *channeldb.Invoice) (*lnrpc.Invoice, error) {
}, nil
}
// LookupInvoice attemps to look up an invoice according to its payment hash.
// LookupInvoice attempts to look up an invoice according to its payment hash.
// The passed payment hash *must* be exactly 32 bytes, if not an error is
// returned.
func (r *rpcServer) LookupInvoice(ctx context.Context,
@ -2532,14 +2532,14 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context,
// With the node obtained, we'll now iterate through all its out going
// edges to gather some basic statistics about its out going channels.
var (
numChannels uint32
totalCapcity btcutil.Amount
numChannels uint32
totalCapacity btcutil.Amount
)
if err := node.ForEachChannel(nil, func(_ *bolt.Tx, edge *channeldb.ChannelEdgeInfo,
_, _ *channeldb.ChannelEdgePolicy) error {
numChannels++
totalCapcity += edge.Capacity
totalCapacity += edge.Capacity
return nil
}); err != nil {
return nil, err
@ -2565,7 +2565,7 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context,
Color: nodeColor,
},
NumChannels: numChannels,
TotalCapacity: int64(totalCapcity),
TotalCapacity: int64(totalCapacity),
}, nil
}
@ -2573,7 +2573,7 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context,
// route to a target destination capable of carrying a specific amount of
// satoshis within the route's flow. The retuned route contains the full
// details required to craft and send an HTLC, also including the necessary
// information that should be present within the Sphinx packet encapsualted
// information that should be present within the Sphinx packet encapsulated
// within the HTLC.
//
// TODO(roasbeef): should return a slice of routes in reality
@ -2581,7 +2581,7 @@ func (r *rpcServer) GetNodeInfo(ctx context.Context,
func (r *rpcServer) QueryRoutes(ctx context.Context,
in *lnrpc.QueryRoutesRequest) (*lnrpc.QueryRoutesResponse, error) {
// First parse the hex-encdoed public key into a full public key objet
// First parse the hex-encoded public key into a full public key object
// we can properly manipulate.
pubKeyBytes, err := hex.DecodeString(in.PubKey)
if err != nil {
@ -2817,7 +2817,7 @@ func (r *rpcServer) SubscribeChannelGraph(req *lnrpc.GraphTopologySubscription,
}
}
// marshallTopologyChange performs a mapping from the topology change sturct
// marshallTopologyChange performs a mapping from the topology change struct
// returned by the router to the form of notifications expected by the current
// gRPC service.
func marshallTopologyChange(topChange *routing.TopologyChange) *lnrpc.GraphTopologyUpdate {
@ -3066,7 +3066,7 @@ func (r *rpcServer) FeeReport(ctx context.Context,
}
// minFeeRate is the smallest permitted fee rate within the network. This is
// dervied by the fact that fee rates are computed using a fixed point of
// derived by the fact that fee rates are computed using a fixed point of
// 1,000,000. As a result, the smallest representable fee rate is 1e-6, or
// 0.000001, or 0.0001%.
const minFeeRate = 1e-6

14
server.go

@ -175,7 +175,7 @@ func newServer(listenAddrs []string, chanDB *channeldb.DB, cc *chainControl,
s.witnessBeacon = &preimageBeacon{
invoices: s.invoices,
wCache: chanDB.NewWitnessCache(),
subscribers: make(map[uint64]*preimageSubcriber),
subscribers: make(map[uint64]*preimageSubscriber),
}
// If the debug HTLC flag is on, then we invoice a "master debug"
@ -579,7 +579,7 @@ func (s *server) WaitForShutdown() {
// based on the server, and currently active bootstrap mechanisms as defined
// within the current configuration.
func initNetworkBootstrappers(s *server) ([]discovery.NetworkPeerBootstrapper, error) {
srvrLog.Infof("Initializing peer network boostrappers!")
srvrLog.Infof("Initializing peer network bootstrappers!")
var bootStrappers []discovery.NetworkPeerBootstrapper
@ -599,9 +599,9 @@ func initNetworkBootstrappers(s *server) ([]discovery.NetworkPeerBootstrapper, e
dnsSeeds, ok := chainDNSSeeds[*activeNetParams.GenesisHash]
// If we have a set of DNS seeds for this chain, then we'll add
// it as an additional boostrapping source.
// it as an additional bootstrapping source.
if ok {
srvrLog.Infof("Creating DNS peer boostrapper with "+
srvrLog.Infof("Creating DNS peer bootstrapper with "+
"seeds: %v", dnsSeeds)
dnsBootStrapper, err := discovery.NewDNSSeedBootstrapper(
@ -670,7 +670,7 @@ func (s *server) peerBootstrapper(numTargetPeers uint32,
// We'll start with a 15 second backoff, and double the time every time
// an epoch fails up to a ceiling.
const backOffCeliing = time.Minute * 5
const backOffCeiling = time.Minute * 5
backOff := time.Second * 15
// We'll create a new ticker to wake us up every 15 seconds so we can
@ -712,8 +712,8 @@ func (s *server) peerBootstrapper(numTargetPeers uint32,
sampleTicker.Stop()
backOff *= 2
if backOff > backOffCeliing {
backOff = backOffCeliing
if backOff > backOffCeiling {
backOff = backOffCeiling
}
srvrLog.Debugf("Backing off peer bootstrapper to "+

4
shachain/element.go

@ -30,7 +30,7 @@ func newElementFromStr(s string, index index) (*element, error) {
}
// derive computes one shachain element from another by applying a series of
// bit flips and hasing operations based on the starting and ending index.
// bit flips and hashing operations based on the starting and ending index.
func (e *element) derive(toIndex index) (*element, error) {
fromIndex := e.index
@ -71,7 +71,7 @@ func (e *element) isEqual(e2 *element) bool {
const (
// maxHeight is used to determine the maximum allowable index and the
// length of the array required to order to derive all previous hashes
// by index. The entries of this array as also knowns as buckets.
// by index. The entries of this array as also known as buckets.
maxHeight uint8 = 48
// rootIndex is an index which corresponds to the root hash.

2
shachain/element_test.go

@ -257,7 +257,7 @@ func TestSpecificationDeriveElement(t *testing.T) {
"but it's not", test.name)
}
// Generate element which we should get after deriviation.
// Generate element which we should get after derivation.
output, err := newElementFromStr(test.output, test.index)
if err != nil {
t.Fatal(err)

4
shachain/store.go

@ -35,7 +35,7 @@ type Store interface {
}
// RevocationStore is a concrete implementation of the Store interface. The
// revocation store is able to efficiently store N derived shahain elements in
// revocation store is able to efficiently store N derived shachain elements in
// a space efficient manner with a space complexity of O(log N). The original
// description of the storage methodology can be found here:
// https://github.com/lightningnetwork/lightning-rfc/blob/master/03-transactions.md#efficient-per-commitment-secret-storage
@ -142,7 +142,7 @@ func (store *RevocationStore) AddNextEntry(hash *chainhash.Hash) error {
}
if !e.isEqual(&store.buckets[i]) {
return errors.New("hash isn't deriavable from " +
return errors.New("hash isn't derivable from " +
"previous ones")
}
}

4
shachain/utils.go

@ -7,7 +7,7 @@ import (
)
// changeBit is a functio that function that flips a bit of the hash at a
// particluar bit-index. You should be aware that the bit flipping in this
// particular bit-index. You should be aware that the bit flipping in this
// function a bit strange, example:
// hash: [0b00000000, 0b00000000, ... 0b00000000]
// 0 1 ... 31
@ -64,7 +64,7 @@ func countTrailingZeros(index index) uint8 {
return zeros
}
// hashFromString takes a hex-encoded string as input and creates an instane of
// hashFromString takes a hex-encoded string as input and creates an instance of
// chainhash.Hash. The chainhash.NewHashFromStr function not suitable because
// it reverse the given hash.
func hashFromString(s string) (*chainhash.Hash, error) {

2
signal.go

@ -14,7 +14,7 @@ import (
var interruptChannel chan os.Signal
// shutdownRequestChannel is used to request the daemon to shutdown gracefully,
// similar to when receiveing SIGINT.
// similar to when receiving SIGINT.
var shutdownRequestChannel = make(chan struct{})
// addHandlerChannel is used to add an interrupt handler to the list of handlers

4
test_utils.go

@ -269,8 +269,8 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
alicePeer := &peer{
server: s,
sendQueue: make(chan outgoinMsg, 1),
outgoingQueue: make(chan outgoinMsg, outgoingQueueLen),
sendQueue: make(chan outgoingMsg, 1),
outgoingQueue: make(chan outgoingMsg, outgoingQueueLen),
activeChannels: make(map[lnwire.ChannelID]*lnwallet.LightningChannel),
newChannels: make(chan *newChannelMsg, 1),

6
utxonursery.go

@ -374,7 +374,7 @@ func (u *utxoNursery) IncubateOutputs(chanPoint wire.OutPoint,
0,
)
// We'll skip any zero value'd outputs as this indicates we
// We'll skip any zero valued outputs as this indicates we
// don't have a settled balance within the commitment
// transaction.
if selfOutput.Amount() > 0 {
@ -1189,7 +1189,7 @@ func (u *utxoNursery) waitForSweepConf(classHeight uint32,
// Mark the confirmed kindergarten outputs as graduated.
if err := u.cfg.Store.GraduateKinder(classHeight); err != nil {
utxnLog.Errorf("Unable to graduate %v kingdergarten outputs: "+
utxnLog.Errorf("Unable to graduate %v kindergarten outputs: "+
"%v", len(kgtnOutputs), err)
return
}
@ -1940,7 +1940,7 @@ func readTxOut(r io.Reader, txo *wire.TxOut) error {
return nil
}
// Compile-time constraint to ensure kidOutput and babyOutpt implement the
// Compile-time constraint to ensure kidOutput and babyOutput implement the
// CsvSpendableOutput interface.
var _ CsvSpendableOutput = (*kidOutput)(nil)
var _ CsvSpendableOutput = (*babyOutput)(nil)

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save