multi: fix spelling mistakes
This commit is contained in:
parent
517cdd66bf
commit
17b2140cb5
@ -8,7 +8,7 @@ import (
|
||||
// limits it will need to stay inside when opening channels.
|
||||
type AgentConstraints interface {
|
||||
// ChannelBudget should, given the passed parameters, return whether
|
||||
// more channels can be be opened while still staying withing the set
|
||||
// more channels can be be opened while still staying within the set
|
||||
// constraints. If the constraints allow us to open more channels, then
|
||||
// the first return value will represent the amount of additional funds
|
||||
// available towards creating channels. The second return value is the
|
||||
@ -75,7 +75,7 @@ func NewConstraints(minChanSize, maxChanSize btcutil.Amount, chanLimit,
|
||||
}
|
||||
|
||||
// ChannelBudget should, given the passed parameters, return whether more
|
||||
// channels can be be opened while still staying withing the set constraints.
|
||||
// channels can be be opened while still staying within the set constraints.
|
||||
// If the constraints allow us to open more channels, then the first return
|
||||
// value will represent the amount of additional funds available towards
|
||||
// creating channels. The second return value is the exact *number* of
|
||||
|
@ -909,7 +909,7 @@ func (b *BtcdNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint,
|
||||
// In order to ensure that we don't block the caller on what may be a
|
||||
// long rescan, we'll launch a new goroutine to handle the async result
|
||||
// of the rescan. We purposefully prevent from adding this goroutine to
|
||||
// the WaitGroup as we cannnot wait for a quit signal due to the
|
||||
// the WaitGroup as we cannot wait for a quit signal due to the
|
||||
// asyncResult channel not being exposed.
|
||||
//
|
||||
// TODO(wilmer): add retry logic if rescan fails?
|
||||
|
@ -207,7 +207,7 @@ func (r ConfRequest) MatchesTx(tx *wire.MsgTx) bool {
|
||||
}
|
||||
|
||||
// ConfNtfn represents a notifier client's request to receive a notification
|
||||
// once the target transaction/ouput script gets sufficient confirmations. The
|
||||
// once the target transaction/output script gets sufficient confirmations. The
|
||||
// client is asynchronously notified via the ConfirmationEvent channels.
|
||||
type ConfNtfn struct {
|
||||
// ConfID uniquely identifies the confirmation notification request for
|
||||
|
@ -1076,7 +1076,7 @@ func TestTxNotifierCancelConf(t *testing.T) {
|
||||
t.Fatalf("expected to receive confirmation notification")
|
||||
}
|
||||
|
||||
// The second one, however, should not have. The event's Confrimed
|
||||
// The second one, however, should not have. The event's Confirmed
|
||||
// channel must have also been closed to indicate the caller that the
|
||||
// TxNotifier can no longer fulfill their canceled request.
|
||||
select {
|
||||
|
@ -333,7 +333,7 @@ var listUnspentCommand = cli.Command{
|
||||
"to be MaxInt32, otherwise max_confs remains " +
|
||||
"zero. An error is returned if the value is " +
|
||||
"true and both min_confs and max_confs are " +
|
||||
"non-zero. (defualt: false)",
|
||||
"non-zero. (default: false)",
|
||||
},
|
||||
},
|
||||
Action: actionDecorator(listUnspent),
|
||||
|
@ -225,7 +225,7 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// dlpTestCase is a speical struct that we'll use to generate randomized test
|
||||
// dlpTestCase is a special struct that we'll use to generate randomized test
|
||||
// cases for the main TestChainWatcherDataLossProtect test. This struct has a
|
||||
// special Generate method that will generate a random state number, and a
|
||||
// broadcast state number which is greater than that state number.
|
||||
|
@ -258,7 +258,7 @@ type GossipSyncer struct {
|
||||
syncTransitionReqs chan *syncTransitionReq
|
||||
|
||||
// historicalSyncReqs is a channel that serves as a signal for the
|
||||
// gossip syncer to perform a historical sync. Theese can only be done
|
||||
// gossip syncer to perform a historical sync. These can only be done
|
||||
// once the gossip syncer is in a chansSynced state to ensure its state
|
||||
// machine behaves as expected.
|
||||
historicalSyncReqs chan *historicalSyncReq
|
||||
|
@ -4664,7 +4664,7 @@ func testSendToRouteErrorPropagation(net *lntest.NetworkHarness, t *harnessTest)
|
||||
}
|
||||
|
||||
// testUnannouncedChannels checks unannounced channels are not returned by
|
||||
// describeGraph RPC request unless explicity asked for.
|
||||
// describeGraph RPC request unless explicitly asked for.
|
||||
func testUnannouncedChannels(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
ctxb := context.Background()
|
||||
|
||||
@ -7970,7 +7970,7 @@ func testDataLossProtection(net *lntest.NetworkHarness, t *harnessTest) {
|
||||
// the channel, but it will already be closed. Carol should resend the
|
||||
// information Dave needs to sweep his funds.
|
||||
if err := restartDave(); err != nil {
|
||||
t.Fatalf("unabel to restart Eve: %v", err)
|
||||
t.Fatalf("unable to restart Eve: %v", err)
|
||||
}
|
||||
|
||||
// Dave should sweep his funds.
|
||||
|
@ -35,7 +35,7 @@ func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
|
||||
}
|
||||
|
||||
// Before we try to make the new service instance, we'll perform
|
||||
// some sanity checks on the arguments to ensure taht they're useable.
|
||||
// some sanity checks on the arguments to ensure that they're useable.
|
||||
switch {
|
||||
case config.Manager == nil:
|
||||
return nil, nil, fmt.Errorf("Manager must be set to create " +
|
||||
|
2
peer.go
2
peer.go
@ -1964,7 +1964,7 @@ out:
|
||||
// Since this channel will never fire again during the
|
||||
// lifecycle of the peer, we nil the channel to mark it
|
||||
// eligible for garbage collection, and make this
|
||||
// explicity ineligible to receive in future calls to
|
||||
// explicitly ineligible to receive in future calls to
|
||||
// select. This also shaves a few CPU cycles since the
|
||||
// select will ignore this case entirely.
|
||||
reenableTimeout = nil
|
||||
|
@ -17,7 +17,7 @@ type Write struct {
|
||||
}
|
||||
|
||||
// NewWrite creates a Write pool, using an underlying Writebuffer pool to
|
||||
// recycle buffer.Write objects accross the lifetime of the Write pool's
|
||||
// recycle buffer.Write objects across the lifetime of the Write pool's
|
||||
// workers.
|
||||
func NewWrite(writeBufferPool *WriteBuffer, numWorkers int,
|
||||
workerTimeout time.Duration) *Write {
|
||||
|
@ -91,7 +91,7 @@ func (p *paymentSession) ReportEdgePolicyFailure(
|
||||
// this channel. If so, then we'll prune out the vertex.
|
||||
_, ok := p.errFailedPolicyChans[*failedEdge]
|
||||
if ok {
|
||||
// TODO(joostjager): is this aggresive pruning still necessary?
|
||||
// TODO(joostjager): is this aggressive pruning still necessary?
|
||||
// Just pruning edges may also work unless there is a huge
|
||||
// number of failing channels from that node?
|
||||
p.ReportVertexFailure(errSource)
|
||||
|
@ -915,7 +915,7 @@ func (r *rpcServer) SendCoins(ctx context.Context,
|
||||
}
|
||||
|
||||
// If the destination address parses to a valid pubkey, we assume the user
|
||||
// accidently tried to send funds to a bare pubkey address. This check is
|
||||
// accidentally tried to send funds to a bare pubkey address. This check is
|
||||
// here to prevent unintended transfers.
|
||||
decodedAddr, _ := hex.DecodeString(in.Addr)
|
||||
_, err = btcec.ParsePubKey(decodedAddr, btcec.S256())
|
||||
|
@ -414,7 +414,7 @@ const (
|
||||
V3
|
||||
)
|
||||
|
||||
// AddOnionConfig houses all of the required paramaters in order to succesfully
|
||||
// AddOnionConfig houses all of the required parameters in order to successfully
|
||||
// create a new onion service or restore an existing one.
|
||||
type AddOnionConfig struct {
|
||||
// Type denotes the type of the onion service that should be created.
|
||||
|
@ -804,7 +804,7 @@ func (u *utxoNursery) sweepMatureOutputs(classHeight uint32,
|
||||
|
||||
for _, output := range kgtnOutputs {
|
||||
// Create local copy to prevent pointer to loop variable to be
|
||||
// passed in with disastruous consequences.
|
||||
// passed in with disastrous consequences.
|
||||
local := output
|
||||
|
||||
resultChan, err := u.cfg.SweepInput(&local)
|
||||
|
@ -125,7 +125,7 @@ type Config struct {
|
||||
// until MaxBackoff.
|
||||
MinBackoff time.Duration
|
||||
|
||||
// MaxBackoff defines the maximum backoff applied to conenctions with
|
||||
// MaxBackoff defines the maximum backoff applied to connections with
|
||||
// watchtowers. If the exponential backoff produces a timeout greater
|
||||
// than this value, the backoff will be clamped to MaxBackoff.
|
||||
MaxBackoff time.Duration
|
||||
|
@ -29,7 +29,7 @@ var (
|
||||
ErrFailedNegotiation = errors.New("session negotiation unsuccessful")
|
||||
|
||||
// ErrUnregisteredChannel signals that the client was unable to backup a
|
||||
// revoked state becuase the channel had not been previously registered
|
||||
// revoked state because the channel had not been previously registered
|
||||
// with the client.
|
||||
ErrUnregisteredChannel = errors.New("channel is not registered")
|
||||
)
|
||||
|
@ -80,7 +80,7 @@ type NegotiatorConfig struct {
|
||||
|
||||
// MaxBackoff defines the maximum backoff applied by the session
|
||||
// negotiator after all tower candidates have been exhausted and
|
||||
// reattempting negotation with the same set of candidates. If the
|
||||
// reattempting negotiation with the same set of candidates. If the
|
||||
// exponential backoff produces a timeout greater than this value, the
|
||||
// backoff duration will be clamped to MaxBackoff.
|
||||
MaxBackoff time.Duration
|
||||
@ -218,7 +218,7 @@ func (n *sessionNegotiator) negotiationDispatcher() {
|
||||
// and attempting to negotiate a new session until a successful negotiation
|
||||
// occurs. If the candidate iterator becomes exhausted because none were
|
||||
// successful, this method will back off exponentially up to the configured max
|
||||
// backoff. This method will continue trying until a negotiation is succesful
|
||||
// backoff. This method will continue trying until a negotiation is successful
|
||||
// before returning the negotiated session to the dispatcher via the succeed
|
||||
// channel.
|
||||
//
|
||||
|
@ -116,6 +116,6 @@ type CommittedUpdate struct {
|
||||
|
||||
// EncryptedBlob is a ciphertext containing the sweep information for
|
||||
// exacting justice if the commitment transaction matching the breach
|
||||
// hint is braodcast.
|
||||
// hint is broadcast.
|
||||
EncryptedBlob []byte
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ type SessionStateUpdate struct {
|
||||
|
||||
// EncryptedBlob is a ciphertext containing the sweep information for
|
||||
// exacting justice if the commitment transaction matching the breach
|
||||
// hint is braodcast.
|
||||
// hint is broadcast.
|
||||
EncryptedBlob []byte
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,7 @@ func (p *Policy) ComputeAltruistOutput(totalAmt btcutil.Amount,
|
||||
|
||||
// ComputeRewardOutputs splits the total funds in a breaching commitment
|
||||
// transaction between the victim and the tower, according to the sweep fee rate
|
||||
// and reward rate. The reward to he tower is substracted first, before
|
||||
// and reward rate. The reward to he tower is subtracted first, before
|
||||
// splitting the remaining balance amongst the victim and fees.
|
||||
func (p *Policy) ComputeRewardOutputs(totalAmt btcutil.Amount,
|
||||
txWeight int64) (btcutil.Amount, btcutil.Amount, error) {
|
||||
|
@ -4,7 +4,7 @@ import "io"
|
||||
|
||||
// Error is a generic error message that can be sent to a client if a request
|
||||
// fails outside of prescribed protocol errors. Typically this would be followed
|
||||
// by the server disconnecting the client, and so can be useful to transfering
|
||||
// by the server disconnecting the client, and so can be useful to transferring
|
||||
// the exact reason.
|
||||
type Error struct {
|
||||
// Code specifies the error code encountered by the server.
|
||||
|
Loading…
Reference in New Issue
Block a user