multi: replace per channel sigPool with global daemon level sigPool
In this commit, we remove the per channel `sigPool` within the `lnwallet.LightningChannel` struct. With this change, we ensure that as the number of channels grows, the number of gouroutines idling in the sigPool stays constant. It's the case that currently on the daemon, most channels are likely inactive, with only a hand full actually consistently carrying out channel updates. As a result, this change should reduce the amount of idle CPU usage, as we have less active goroutines in select loops. In order to make this change, the `SigPool` itself has been publicly exported such that outside callers can make a `SigPool` and pass it into newly created channels. Since the sig pool now lives outside the channel, we were also able to do away with the Stop() method on the channel all together. Finally, the server is the sub-system that is currently responsible for managing the `SigPool` within lnd.
This commit is contained in:
parent
0fafd5e2fd
commit
fa160f559c
@ -1123,18 +1123,6 @@ func TestBreachHandoffFail(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer cleanUpArb()
|
defer cleanUpArb()
|
||||||
|
|
||||||
// Instantiate a second lightning channel for alice, using the state of
|
|
||||||
// her last channel.
|
|
||||||
aliceKeyPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(),
|
|
||||||
alicesPrivKey)
|
|
||||||
aliceSigner := &mockSigner{aliceKeyPriv}
|
|
||||||
|
|
||||||
alice2, err := lnwallet.NewLightningChannel(aliceSigner, nil, alice.State())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unable to create test channels: %v", err)
|
|
||||||
}
|
|
||||||
defer alice2.Stop()
|
|
||||||
|
|
||||||
// Signal a spend of the funding transaction and wait for the close
|
// Signal a spend of the funding transaction and wait for the close
|
||||||
// observer to exit. This time we are allowing the handoff to succeed.
|
// observer to exit. This time we are allowing the handoff to succeed.
|
||||||
breach = &ContractBreachEvent{
|
breach = &ContractBreachEvent{
|
||||||
@ -1567,18 +1555,23 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa
|
|||||||
aliceSigner := &mockSigner{aliceKeyPriv}
|
aliceSigner := &mockSigner{aliceKeyPriv}
|
||||||
bobSigner := &mockSigner{bobKeyPriv}
|
bobSigner := &mockSigner{bobKeyPriv}
|
||||||
|
|
||||||
|
alicePool := lnwallet.NewSigPool(1, aliceSigner)
|
||||||
channelAlice, err := lnwallet.NewLightningChannel(
|
channelAlice, err := lnwallet.NewLightningChannel(
|
||||||
aliceSigner, pCache, aliceChannelState,
|
aliceSigner, pCache, aliceChannelState, alicePool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
alicePool.Start()
|
||||||
|
|
||||||
|
bobPool := lnwallet.NewSigPool(1, bobSigner)
|
||||||
channelBob, err := lnwallet.NewLightningChannel(
|
channelBob, err := lnwallet.NewLightningChannel(
|
||||||
bobSigner, pCache, bobChannelState,
|
bobSigner, pCache, bobChannelState, bobPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
bobPool.Start()
|
||||||
|
|
||||||
addr := &net.TCPAddr{
|
addr := &net.TCPAddr{
|
||||||
IP: net.ParseIP("127.0.0.1"),
|
IP: net.ParseIP("127.0.0.1"),
|
||||||
|
@ -3,10 +3,11 @@ package contractcourt
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/lightningnetwork/lnd/sweep"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/lightningnetwork/lnd/sweep"
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||||
"github.com/btcsuite/btcd/wire"
|
"github.com/btcsuite/btcd/wire"
|
||||||
"github.com/btcsuite/btcutil"
|
"github.com/btcsuite/btcutil"
|
||||||
@ -238,11 +239,11 @@ func newActiveChannelArbitrator(channel *channeldb.OpenChannel,
|
|||||||
}
|
}
|
||||||
|
|
||||||
chanMachine, err := lnwallet.NewLightningChannel(
|
chanMachine, err := lnwallet.NewLightningChannel(
|
||||||
c.cfg.Signer, c.cfg.PreimageDB, channel)
|
c.cfg.Signer, c.cfg.PreimageDB, channel, nil,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
chanMachine.Stop()
|
|
||||||
|
|
||||||
if err := c.cfg.MarkLinkInactive(chanPoint); err != nil {
|
if err := c.cfg.MarkLinkInactive(chanPoint); err != nil {
|
||||||
log.Errorf("unable to mark link inactive: %v", err)
|
log.Errorf("unable to mark link inactive: %v", err)
|
||||||
|
@ -1681,13 +1681,12 @@ func (f *fundingManager) handleFundingSigned(fmsg *fundingSignedMsg) {
|
|||||||
// Go on adding the channel to the channel graph, and crafting
|
// Go on adding the channel to the channel graph, and crafting
|
||||||
// channel announcements.
|
// channel announcements.
|
||||||
lnChannel, err := lnwallet.NewLightningChannel(
|
lnChannel, err := lnwallet.NewLightningChannel(
|
||||||
nil, nil, completeChan,
|
nil, nil, completeChan, nil,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fndgLog.Errorf("failed creating lnChannel: %v", err)
|
fndgLog.Errorf("failed creating lnChannel: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer lnChannel.Stop()
|
|
||||||
|
|
||||||
err = f.sendFundingLocked(
|
err = f.sendFundingLocked(
|
||||||
fmsg.peer, completeChan, lnChannel, shortChanID,
|
fmsg.peer, completeChan, lnChannel, shortChanID,
|
||||||
@ -1971,12 +1970,11 @@ func (f *fundingManager) handleFundingConfirmation(peer lnpeer.Peer,
|
|||||||
|
|
||||||
// We create the state-machine object which wraps the database state.
|
// We create the state-machine object which wraps the database state.
|
||||||
lnChannel, err := lnwallet.NewLightningChannel(
|
lnChannel, err := lnwallet.NewLightningChannel(
|
||||||
nil, nil, completeChan,
|
nil, nil, completeChan, nil,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer lnChannel.Stop()
|
|
||||||
|
|
||||||
chanID := lnwire.NewChanIDFromOutPoint(&completeChan.FundingOutpoint)
|
chanID := lnwire.NewChanIDFromOutPoint(&completeChan.FundingOutpoint)
|
||||||
|
|
||||||
|
@ -443,7 +443,6 @@ func (l *channelLink) Stop() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
l.updateFeeTimer.Stop()
|
l.updateFeeTimer.Stop()
|
||||||
l.channel.Stop()
|
|
||||||
l.overflowQueue.Stop()
|
l.overflowQueue.Stop()
|
||||||
|
|
||||||
close(l.quit)
|
close(l.quit)
|
||||||
|
@ -1592,8 +1592,6 @@ func newSingleLinkTestHarness(chanAmt, chanReserve btcutil.Amount) (
|
|||||||
cleanUp := func() {
|
cleanUp := func() {
|
||||||
close(alicePeer.quit)
|
close(alicePeer.quit)
|
||||||
defer fCleanUp()
|
defer fCleanUp()
|
||||||
defer aliceLink.Stop()
|
|
||||||
defer bobChannel.Stop()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return aliceLink, bobChannel, bticker.Force, start, cleanUp, restore, nil
|
return aliceLink, bobChannel, bticker.Force, start, cleanUp, restore, nil
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"math/big"
|
"math/big"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -369,18 +370,23 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
|||||||
preimageMap: make(map[[32]byte][]byte),
|
preimageMap: make(map[[32]byte][]byte),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
alicePool := lnwallet.NewSigPool(runtime.NumCPU(), aliceSigner)
|
||||||
channelAlice, err := lnwallet.NewLightningChannel(
|
channelAlice, err := lnwallet.NewLightningChannel(
|
||||||
aliceSigner, pCache, aliceChannelState,
|
aliceSigner, pCache, aliceChannelState, alicePool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, err
|
return nil, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
alicePool.Start()
|
||||||
|
|
||||||
|
bobPool := lnwallet.NewSigPool(runtime.NumCPU(), bobSigner)
|
||||||
channelBob, err := lnwallet.NewLightningChannel(
|
channelBob, err := lnwallet.NewLightningChannel(
|
||||||
bobSigner, pCache, bobChannelState,
|
bobSigner, pCache, bobChannelState, bobPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, err
|
return nil, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
bobPool.Start()
|
||||||
|
|
||||||
// Now that the channel are open, simulate the start of a session by
|
// Now that the channel are open, simulate the start of a session by
|
||||||
// having Alice and Bob extend their revocation windows to each other.
|
// having Alice and Bob extend their revocation windows to each other.
|
||||||
@ -402,6 +408,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
|||||||
|
|
||||||
restore := func() (*lnwallet.LightningChannel, *lnwallet.LightningChannel,
|
restore := func() (*lnwallet.LightningChannel, *lnwallet.LightningChannel,
|
||||||
error) {
|
error) {
|
||||||
|
|
||||||
aliceStoredChannels, err := dbAlice.FetchOpenChannels(aliceKeyPub)
|
aliceStoredChannels, err := dbAlice.FetchOpenChannels(aliceKeyPub)
|
||||||
switch err {
|
switch err {
|
||||||
case nil:
|
case nil:
|
||||||
@ -434,8 +441,9 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
|||||||
return nil, nil, errors.New("unable to find stored alice channel")
|
return nil, nil, errors.New("unable to find stored alice channel")
|
||||||
}
|
}
|
||||||
|
|
||||||
newAliceChannel, err := lnwallet.NewLightningChannel(aliceSigner,
|
newAliceChannel, err := lnwallet.NewLightningChannel(
|
||||||
nil, aliceStoredChannel)
|
aliceSigner, nil, aliceStoredChannel, alicePool,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Errorf("unable to create new channel: %v",
|
return nil, nil, errors.Errorf("unable to create new channel: %v",
|
||||||
err)
|
err)
|
||||||
@ -473,8 +481,9 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
|||||||
return nil, nil, errors.New("unable to find stored bob channel")
|
return nil, nil, errors.New("unable to find stored bob channel")
|
||||||
}
|
}
|
||||||
|
|
||||||
newBobChannel, err := lnwallet.NewLightningChannel(bobSigner,
|
newBobChannel, err := lnwallet.NewLightningChannel(
|
||||||
nil, bobStoredChannel)
|
bobSigner, nil, bobStoredChannel, bobPool,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Errorf("unable to create new channel: %v",
|
return nil, nil, errors.Errorf("unable to create new channel: %v",
|
||||||
err)
|
err)
|
||||||
@ -485,7 +494,7 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte,
|
|||||||
return channelAlice, channelBob, cleanUpFunc, restore, nil
|
return channelAlice, channelBob, cleanUpFunc, restore, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getChanID retrieves the channel point from nwire message.
|
// getChanID retrieves the channel point from an lnnwire message.
|
||||||
func getChanID(msg lnwire.Message) (lnwire.ChannelID, error) {
|
func getChanID(msg lnwire.Message) (lnwire.ChannelID, error) {
|
||||||
var chanID lnwire.ChannelID
|
var chanID lnwire.ChannelID
|
||||||
switch msg := msg.(type) {
|
switch msg := msg.(type) {
|
||||||
|
@ -5,10 +5,8 @@ import (
|
|||||||
"container/list"
|
"container/list"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
|
||||||
|
|
||||||
"github.com/btcsuite/btcd/blockchain"
|
"github.com/btcsuite/btcd/blockchain"
|
||||||
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
"github.com/btcsuite/btcd/chaincfg/chainhash"
|
||||||
@ -1260,8 +1258,6 @@ func compactLogs(ourLog, theirLog *updateLog,
|
|||||||
//
|
//
|
||||||
// See the individual comments within the above methods for further details.
|
// See the individual comments within the above methods for further details.
|
||||||
type LightningChannel struct {
|
type LightningChannel struct {
|
||||||
shutdown int32 // To be used atomically.
|
|
||||||
|
|
||||||
// Signer is the main signer instances that will be responsible for
|
// Signer is the main signer instances that will be responsible for
|
||||||
// signing any HTLC and commitment transaction generated by the state
|
// signing any HTLC and commitment transaction generated by the state
|
||||||
// machine.
|
// machine.
|
||||||
@ -1282,7 +1278,7 @@ type LightningChannel struct {
|
|||||||
// validating signatures in parallel. This is utilized as an
|
// validating signatures in parallel. This is utilized as an
|
||||||
// optimization to void serially signing or validating the HTLC
|
// optimization to void serially signing or validating the HTLC
|
||||||
// signatures, of which there may be hundreds.
|
// signatures, of which there may be hundreds.
|
||||||
sigPool *sigPool
|
sigPool *SigPool
|
||||||
|
|
||||||
// pCache is the global preimage cache shared across all other
|
// pCache is the global preimage cache shared across all other
|
||||||
// LightningChannel instance. We'll use this cache either when we force
|
// LightningChannel instance. We'll use this cache either when we force
|
||||||
@ -1347,11 +1343,6 @@ type LightningChannel struct {
|
|||||||
RemoteFundingKey *btcec.PublicKey
|
RemoteFundingKey *btcec.PublicKey
|
||||||
|
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
cowg sync.WaitGroup
|
|
||||||
wg sync.WaitGroup
|
|
||||||
|
|
||||||
quit chan struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLightningChannel creates a new, active payment channel given an
|
// NewLightningChannel creates a new, active payment channel given an
|
||||||
@ -1360,7 +1351,8 @@ type LightningChannel struct {
|
|||||||
// automatically persist pertinent state to the database in an efficient
|
// automatically persist pertinent state to the database in an efficient
|
||||||
// manner.
|
// manner.
|
||||||
func NewLightningChannel(signer Signer, pCache PreimageCache,
|
func NewLightningChannel(signer Signer, pCache PreimageCache,
|
||||||
state *channeldb.OpenChannel) (*LightningChannel, error) {
|
state *channeldb.OpenChannel,
|
||||||
|
sigPool *SigPool) (*LightningChannel, error) {
|
||||||
|
|
||||||
localCommit := state.LocalCommitment
|
localCommit := state.LocalCommitment
|
||||||
remoteCommit := state.RemoteCommitment
|
remoteCommit := state.RemoteCommitment
|
||||||
@ -1375,9 +1367,8 @@ func NewLightningChannel(signer Signer, pCache PreimageCache,
|
|||||||
)
|
)
|
||||||
|
|
||||||
lc := &LightningChannel{
|
lc := &LightningChannel{
|
||||||
// TODO(roasbeef): tune num sig workers?
|
|
||||||
sigPool: newSigPool(runtime.NumCPU(), signer),
|
|
||||||
Signer: signer,
|
Signer: signer,
|
||||||
|
sigPool: sigPool,
|
||||||
pCache: pCache,
|
pCache: pCache,
|
||||||
currentHeight: localCommit.CommitHeight,
|
currentHeight: localCommit.CommitHeight,
|
||||||
remoteCommitChain: newCommitmentChain(),
|
remoteCommitChain: newCommitmentChain(),
|
||||||
@ -1391,7 +1382,6 @@ func NewLightningChannel(signer Signer, pCache PreimageCache,
|
|||||||
Capacity: state.Capacity,
|
Capacity: state.Capacity,
|
||||||
LocalFundingKey: state.LocalChanCfg.MultiSigKey.PubKey,
|
LocalFundingKey: state.LocalChanCfg.MultiSigKey.PubKey,
|
||||||
RemoteFundingKey: state.RemoteChanCfg.MultiSigKey.PubKey,
|
RemoteFundingKey: state.RemoteChanCfg.MultiSigKey.PubKey,
|
||||||
quit: make(chan struct{}),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// With the main channel struct reconstructed, we'll now restore the
|
// With the main channel struct reconstructed, we'll now restore the
|
||||||
@ -1411,12 +1401,6 @@ func NewLightningChannel(signer Signer, pCache PreimageCache,
|
|||||||
|
|
||||||
lc.createStateHintObfuscator()
|
lc.createStateHintObfuscator()
|
||||||
|
|
||||||
// Finally, we'll kick of the signature job pool to handle any upcoming
|
|
||||||
// commitment state generation and validation.
|
|
||||||
if err := lc.sigPool.Start(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return lc, nil
|
return lc, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1467,23 +1451,6 @@ func (lc *LightningChannel) createStateHintObfuscator() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop gracefully shuts down any active goroutines spawned by the
|
|
||||||
// LightningChannel during regular duties.
|
|
||||||
func (lc *LightningChannel) Stop() {
|
|
||||||
if !atomic.CompareAndSwapInt32(&lc.shutdown, 0, 1) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
lc.sigPool.Stop()
|
|
||||||
|
|
||||||
close(lc.quit)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForClose blocks until the channel's close observer has terminated.
|
|
||||||
func (lc *LightningChannel) WaitForClose() {
|
|
||||||
lc.cowg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResetState resets the state of the channel back to the default state. This
|
// ResetState resets the state of the channel back to the default state. This
|
||||||
// ensures that any active goroutines which need to act based on on-chain
|
// ensures that any active goroutines which need to act based on on-chain
|
||||||
// events do so properly.
|
// events do so properly.
|
||||||
@ -2673,7 +2640,7 @@ func processRemoveEntry(htlc *PaymentDescriptor, ourBalance,
|
|||||||
// asynchronously and in parallel.
|
// asynchronously and in parallel.
|
||||||
func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
||||||
localChanCfg, remoteChanCfg *channeldb.ChannelConfig,
|
localChanCfg, remoteChanCfg *channeldb.ChannelConfig,
|
||||||
remoteCommitView *commitment) ([]signJob, chan struct{}, error) {
|
remoteCommitView *commitment) ([]SignJob, chan struct{}, error) {
|
||||||
|
|
||||||
txHash := remoteCommitView.txn.TxHash()
|
txHash := remoteCommitView.txn.TxHash()
|
||||||
dustLimit := remoteChanCfg.DustLimit
|
dustLimit := remoteChanCfg.DustLimit
|
||||||
@ -2684,7 +2651,7 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
|||||||
// smaller (than its total capacity) and some HTLCs may be dust.
|
// smaller (than its total capacity) and some HTLCs may be dust.
|
||||||
numSigs := (len(remoteCommitView.incomingHTLCs) +
|
numSigs := (len(remoteCommitView.incomingHTLCs) +
|
||||||
len(remoteCommitView.outgoingHTLCs))
|
len(remoteCommitView.outgoingHTLCs))
|
||||||
sigBatch := make([]signJob, 0, numSigs)
|
sigBatch := make([]SignJob, 0, numSigs)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
cancelChan := make(chan struct{})
|
cancelChan := make(chan struct{})
|
||||||
@ -2700,9 +2667,9 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
|||||||
|
|
||||||
// If the HTLC isn't dust, then we'll create an empty sign job
|
// If the HTLC isn't dust, then we'll create an empty sign job
|
||||||
// to add to the batch momentarily.
|
// to add to the batch momentarily.
|
||||||
sigJob := signJob{}
|
sigJob := SignJob{}
|
||||||
sigJob.cancel = cancelChan
|
sigJob.Cancel = cancelChan
|
||||||
sigJob.resp = make(chan signJobResp, 1)
|
sigJob.Resp = make(chan SignJobResp, 1)
|
||||||
|
|
||||||
// As this is an incoming HTLC and we're sinning the commitment
|
// As this is an incoming HTLC and we're sinning the commitment
|
||||||
// transaction of the remote node, we'll need to generate an
|
// transaction of the remote node, we'll need to generate an
|
||||||
@ -2718,7 +2685,7 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
|||||||
Hash: txHash,
|
Hash: txHash,
|
||||||
Index: uint32(htlc.remoteOutputIndex),
|
Index: uint32(htlc.remoteOutputIndex),
|
||||||
}
|
}
|
||||||
sigJob.tx, err = createHtlcTimeoutTx(
|
sigJob.Tx, err = createHtlcTimeoutTx(
|
||||||
op, outputAmt, htlc.Timeout,
|
op, outputAmt, htlc.Timeout,
|
||||||
uint32(remoteChanCfg.CsvDelay),
|
uint32(remoteChanCfg.CsvDelay),
|
||||||
keyRing.RevocationKey, keyRing.DelayKey,
|
keyRing.RevocationKey, keyRing.DelayKey,
|
||||||
@ -2730,7 +2697,7 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
|||||||
// Finally, we'll generate a sign descriptor to generate a
|
// Finally, we'll generate a sign descriptor to generate a
|
||||||
// signature to give to the remote party for this commitment
|
// signature to give to the remote party for this commitment
|
||||||
// transaction. Note we use the raw HTLC amount.
|
// transaction. Note we use the raw HTLC amount.
|
||||||
sigJob.signDesc = SignDescriptor{
|
sigJob.SignDesc = SignDescriptor{
|
||||||
KeyDesc: localChanCfg.HtlcBasePoint,
|
KeyDesc: localChanCfg.HtlcBasePoint,
|
||||||
SingleTweak: keyRing.LocalHtlcKeyTweak,
|
SingleTweak: keyRing.LocalHtlcKeyTweak,
|
||||||
WitnessScript: htlc.theirWitnessScript,
|
WitnessScript: htlc.theirWitnessScript,
|
||||||
@ -2738,10 +2705,10 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
|||||||
Value: int64(htlc.Amount.ToSatoshis()),
|
Value: int64(htlc.Amount.ToSatoshis()),
|
||||||
},
|
},
|
||||||
HashType: txscript.SigHashAll,
|
HashType: txscript.SigHashAll,
|
||||||
SigHashes: txscript.NewTxSigHashes(sigJob.tx),
|
SigHashes: txscript.NewTxSigHashes(sigJob.Tx),
|
||||||
InputIndex: 0,
|
InputIndex: 0,
|
||||||
}
|
}
|
||||||
sigJob.outputIndex = htlc.remoteOutputIndex
|
sigJob.OutputIndex = htlc.remoteOutputIndex
|
||||||
|
|
||||||
sigBatch = append(sigBatch, sigJob)
|
sigBatch = append(sigBatch, sigJob)
|
||||||
}
|
}
|
||||||
@ -2751,9 +2718,9 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
sigJob := signJob{}
|
sigJob := SignJob{}
|
||||||
sigJob.cancel = cancelChan
|
sigJob.Cancel = cancelChan
|
||||||
sigJob.resp = make(chan signJobResp, 1)
|
sigJob.Resp = make(chan SignJobResp, 1)
|
||||||
|
|
||||||
// As this is an outgoing HTLC and we're signing the commitment
|
// As this is an outgoing HTLC and we're signing the commitment
|
||||||
// transaction of the remote node, we'll need to generate an
|
// transaction of the remote node, we'll need to generate an
|
||||||
@ -2770,7 +2737,7 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
|||||||
Hash: txHash,
|
Hash: txHash,
|
||||||
Index: uint32(htlc.remoteOutputIndex),
|
Index: uint32(htlc.remoteOutputIndex),
|
||||||
}
|
}
|
||||||
sigJob.tx, err = createHtlcSuccessTx(
|
sigJob.Tx, err = createHtlcSuccessTx(
|
||||||
op, outputAmt, uint32(remoteChanCfg.CsvDelay),
|
op, outputAmt, uint32(remoteChanCfg.CsvDelay),
|
||||||
keyRing.RevocationKey, keyRing.DelayKey,
|
keyRing.RevocationKey, keyRing.DelayKey,
|
||||||
)
|
)
|
||||||
@ -2781,7 +2748,7 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
|||||||
// Finally, we'll generate a sign descriptor to generate a
|
// Finally, we'll generate a sign descriptor to generate a
|
||||||
// signature to give to the remote party for this commitment
|
// signature to give to the remote party for this commitment
|
||||||
// transaction. Note we use the raw HTLC amount.
|
// transaction. Note we use the raw HTLC amount.
|
||||||
sigJob.signDesc = SignDescriptor{
|
sigJob.SignDesc = SignDescriptor{
|
||||||
KeyDesc: localChanCfg.HtlcBasePoint,
|
KeyDesc: localChanCfg.HtlcBasePoint,
|
||||||
SingleTweak: keyRing.LocalHtlcKeyTweak,
|
SingleTweak: keyRing.LocalHtlcKeyTweak,
|
||||||
WitnessScript: htlc.theirWitnessScript,
|
WitnessScript: htlc.theirWitnessScript,
|
||||||
@ -2789,10 +2756,10 @@ func genRemoteHtlcSigJobs(keyRing *CommitmentKeyRing,
|
|||||||
Value: int64(htlc.Amount.ToSatoshis()),
|
Value: int64(htlc.Amount.ToSatoshis()),
|
||||||
},
|
},
|
||||||
HashType: txscript.SigHashAll,
|
HashType: txscript.SigHashAll,
|
||||||
SigHashes: txscript.NewTxSigHashes(sigJob.tx),
|
SigHashes: txscript.NewTxSigHashes(sigJob.Tx),
|
||||||
InputIndex: 0,
|
InputIndex: 0,
|
||||||
}
|
}
|
||||||
sigJob.outputIndex = htlc.remoteOutputIndex
|
sigJob.OutputIndex = htlc.remoteOutputIndex
|
||||||
|
|
||||||
sigBatch = append(sigBatch, sigJob)
|
sigBatch = append(sigBatch, sigJob)
|
||||||
}
|
}
|
||||||
@ -3059,26 +3026,23 @@ func (lc *LightningChannel) SignNextCommitment() (lnwire.Sig, []lnwire.Sig, erro
|
|||||||
// order as they appear on the commitment transaction after BIP 69
|
// order as they appear on the commitment transaction after BIP 69
|
||||||
// sorting.
|
// sorting.
|
||||||
sort.Slice(sigBatch, func(i, j int) bool {
|
sort.Slice(sigBatch, func(i, j int) bool {
|
||||||
return sigBatch[i].outputIndex < sigBatch[j].outputIndex
|
return sigBatch[i].OutputIndex < sigBatch[j].OutputIndex
|
||||||
})
|
})
|
||||||
|
|
||||||
// With the jobs sorted, we'll now iterate through all the responses to
|
// With the jobs sorted, we'll now iterate through all the responses to
|
||||||
// gather each of the signatures in order.
|
// gather each of the signatures in order.
|
||||||
htlcSigs = make([]lnwire.Sig, 0, len(sigBatch))
|
htlcSigs = make([]lnwire.Sig, 0, len(sigBatch))
|
||||||
for _, htlcSigJob := range sigBatch {
|
for _, htlcSigJob := range sigBatch {
|
||||||
select {
|
jobResp := <-htlcSigJob.Resp
|
||||||
case jobResp := <-htlcSigJob.resp:
|
|
||||||
// If an error occurred, then we'll cancel any other
|
// If an error occurred, then we'll cancel any other active
|
||||||
// active jobs.
|
// jobs.
|
||||||
if jobResp.err != nil {
|
if jobResp.Err != nil {
|
||||||
close(cancelChan)
|
close(cancelChan)
|
||||||
return sig, htlcSigs, err
|
return sig, htlcSigs, err
|
||||||
}
|
}
|
||||||
|
|
||||||
htlcSigs = append(htlcSigs, jobResp.sig)
|
htlcSigs = append(htlcSigs, jobResp.Sig)
|
||||||
case <-lc.quit:
|
|
||||||
return sig, htlcSigs, fmt.Errorf("channel shutting down")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// As we're about to proposer a new commitment state for the remote
|
// As we're about to proposer a new commitment state for the remote
|
||||||
@ -3740,7 +3704,7 @@ func (lc *LightningChannel) validateCommitmentSanity(theirLogCounter,
|
|||||||
// directly into the pool of workers.
|
// directly into the pool of workers.
|
||||||
func genHtlcSigValidationJobs(localCommitmentView *commitment,
|
func genHtlcSigValidationJobs(localCommitmentView *commitment,
|
||||||
keyRing *CommitmentKeyRing, htlcSigs []lnwire.Sig,
|
keyRing *CommitmentKeyRing, htlcSigs []lnwire.Sig,
|
||||||
localChanCfg, remoteChanCfg *channeldb.ChannelConfig) ([]verifyJob, error) {
|
localChanCfg, remoteChanCfg *channeldb.ChannelConfig) ([]VerifyJob, error) {
|
||||||
|
|
||||||
txHash := localCommitmentView.txn.TxHash()
|
txHash := localCommitmentView.txn.TxHash()
|
||||||
feePerKw := localCommitmentView.feePerKw
|
feePerKw := localCommitmentView.feePerKw
|
||||||
@ -3751,7 +3715,7 @@ func genHtlcSigValidationJobs(localCommitmentView *commitment,
|
|||||||
// length will be smaller than the total capacity.
|
// length will be smaller than the total capacity.
|
||||||
numHtlcs := (len(localCommitmentView.incomingHTLCs) +
|
numHtlcs := (len(localCommitmentView.incomingHTLCs) +
|
||||||
len(localCommitmentView.outgoingHTLCs))
|
len(localCommitmentView.outgoingHTLCs))
|
||||||
verifyJobs := make([]verifyJob, 0, numHtlcs)
|
verifyJobs := make([]VerifyJob, 0, numHtlcs)
|
||||||
|
|
||||||
// We'll iterate through each output in the commitment transaction,
|
// We'll iterate through each output in the commitment transaction,
|
||||||
// populating the sigHash closure function if it's detected to be an
|
// populating the sigHash closure function if it's detected to be an
|
||||||
@ -3879,11 +3843,11 @@ func genHtlcSigValidationJobs(localCommitmentView *commitment,
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
verifyJobs = append(verifyJobs, verifyJob{
|
verifyJobs = append(verifyJobs, VerifyJob{
|
||||||
htlcIndex: htlcIndex,
|
HtlcIndex: htlcIndex,
|
||||||
pubKey: keyRing.RemoteHtlcKey,
|
PubKey: keyRing.RemoteHtlcKey,
|
||||||
sig: sig,
|
Sig: sig,
|
||||||
sigHash: sigHash,
|
SigHash: sigHash,
|
||||||
})
|
})
|
||||||
|
|
||||||
i++
|
i++
|
||||||
@ -4083,18 +4047,17 @@ func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig,
|
|||||||
for i := 0; i < len(verifyJobs); i++ {
|
for i := 0; i < len(verifyJobs); i++ {
|
||||||
// In the case that a single signature is invalid, we'll exit
|
// In the case that a single signature is invalid, we'll exit
|
||||||
// early and cancel all the outstanding verification jobs.
|
// early and cancel all the outstanding verification jobs.
|
||||||
select {
|
htlcErr := <-verifyResps
|
||||||
case htlcErr := <-verifyResps:
|
|
||||||
if htlcErr != nil {
|
if htlcErr != nil {
|
||||||
close(cancelChan)
|
close(cancelChan)
|
||||||
|
|
||||||
sig, err := lnwire.NewSigFromSignature(
|
sig, err := lnwire.NewSigFromSignature(
|
||||||
htlcErr.sig,
|
htlcErr.Sig,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sigHash, err := htlcErr.sigHash()
|
sigHash, err := htlcErr.SigHash()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -4104,14 +4067,11 @@ func (lc *LightningChannel) ReceiveNewCommitment(commitSig lnwire.Sig,
|
|||||||
return &InvalidHtlcSigError{
|
return &InvalidHtlcSigError{
|
||||||
commitHeight: nextHeight,
|
commitHeight: nextHeight,
|
||||||
htlcSig: sig.ToSignatureBytes(),
|
htlcSig: sig.ToSignatureBytes(),
|
||||||
htlcIndex: htlcErr.htlcIndex,
|
htlcIndex: htlcErr.HtlcIndex,
|
||||||
sigHash: sigHash,
|
sigHash: sigHash,
|
||||||
commitTx: txBytes.Bytes(),
|
commitTx: txBytes.Bytes(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case <-lc.quit:
|
|
||||||
return fmt.Errorf("channel shutting down")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// The signature checks out, so we can now add the new commitment to
|
// The signature checks out, so we can now add the new commitment to
|
||||||
|
@ -1444,14 +1444,18 @@ func TestStateUpdatePersistence(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to fetch channel: %v", err)
|
t.Fatalf("unable to fetch channel: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
aliceChannelNew, err := NewLightningChannel(
|
aliceChannelNew, err := NewLightningChannel(
|
||||||
aliceChannel.Signer, nil, aliceChannels[0],
|
aliceChannel.Signer, nil, aliceChannels[0],
|
||||||
|
aliceChannel.sigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create new channel: %v", err)
|
t.Fatalf("unable to create new channel: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bobChannelNew, err := NewLightningChannel(
|
bobChannelNew, err := NewLightningChannel(
|
||||||
bobChannel.Signer, nil, bobChannels[0],
|
bobChannel.Signer, nil, bobChannels[0],
|
||||||
|
bobChannel.sigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create new channel: %v", err)
|
t.Fatalf("unable to create new channel: %v", err)
|
||||||
@ -2544,20 +2548,19 @@ func TestChanSyncFullySynced(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to fetch channel: %v", err)
|
t.Fatalf("unable to fetch channel: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
aliceChannelNew, err := NewLightningChannel(
|
aliceChannelNew, err := NewLightningChannel(
|
||||||
aliceChannel.Signer, nil, aliceChannels[0],
|
aliceChannel.Signer, nil, aliceChannels[0], aliceChannel.sigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create new channel: %v", err)
|
t.Fatalf("unable to create new channel: %v", err)
|
||||||
}
|
}
|
||||||
defer aliceChannelNew.Stop()
|
|
||||||
bobChannelNew, err := NewLightningChannel(
|
bobChannelNew, err := NewLightningChannel(
|
||||||
bobChannel.Signer, nil, bobChannels[0],
|
bobChannel.Signer, nil, bobChannels[0], bobChannel.sigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create new channel: %v", err)
|
t.Fatalf("unable to create new channel: %v", err)
|
||||||
}
|
}
|
||||||
defer bobChannelNew.Stop()
|
|
||||||
|
|
||||||
assertNoChanSyncNeeded(t, aliceChannelNew, bobChannelNew)
|
assertNoChanSyncNeeded(t, aliceChannelNew, bobChannelNew)
|
||||||
}
|
}
|
||||||
@ -2576,6 +2579,7 @@ func restartChannel(channelOld *LightningChannel) (*LightningChannel, error) {
|
|||||||
|
|
||||||
channelNew, err := NewLightningChannel(
|
channelNew, err := NewLightningChannel(
|
||||||
channelOld.Signer, channelOld.pCache, nodeChannels[0],
|
channelOld.Signer, channelOld.pCache, nodeChannels[0],
|
||||||
|
channelOld.sigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -2786,7 +2790,6 @@ func TestChanSyncOweCommitment(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart alice: %v", err)
|
t.Fatalf("unable to restart alice: %v", err)
|
||||||
}
|
}
|
||||||
defer aliceChannel.Stop()
|
|
||||||
assertAliceCommitRetransmit()
|
assertAliceCommitRetransmit()
|
||||||
|
|
||||||
// TODO(roasbeef): restart bob as well???
|
// TODO(roasbeef): restart bob as well???
|
||||||
@ -3053,7 +3056,6 @@ func TestChanSyncOweRevocation(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart alice: %v", err)
|
t.Fatalf("unable to restart alice: %v", err)
|
||||||
}
|
}
|
||||||
defer aliceChannel.Stop()
|
|
||||||
assertAliceOwesRevoke()
|
assertAliceOwesRevoke()
|
||||||
|
|
||||||
// TODO(roasbeef): restart bob too???
|
// TODO(roasbeef): restart bob too???
|
||||||
@ -3233,7 +3235,6 @@ func TestChanSyncOweRevocationAndCommit(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart channel: %v", err)
|
t.Fatalf("unable to restart channel: %v", err)
|
||||||
}
|
}
|
||||||
defer bobChannel.Stop()
|
|
||||||
assertBobSendsRevokeAndCommit()
|
assertBobSendsRevokeAndCommit()
|
||||||
|
|
||||||
// We'll now finish the state transition by having Alice process both
|
// We'll now finish the state transition by having Alice process both
|
||||||
@ -3428,7 +3429,6 @@ func TestChanSyncOweRevocationAndCommitForceTransition(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart channel: %v", err)
|
t.Fatalf("unable to restart channel: %v", err)
|
||||||
}
|
}
|
||||||
defer bobChannel.Stop()
|
|
||||||
if len(bobMsgsToSend) != 2 {
|
if len(bobMsgsToSend) != 2 {
|
||||||
t.Fatalf("expected bob to send %v messages, instead "+
|
t.Fatalf("expected bob to send %v messages, instead "+
|
||||||
"sends: %v", 2, spew.Sdump(bobMsgsToSend))
|
"sends: %v", 2, spew.Sdump(bobMsgsToSend))
|
||||||
@ -3775,12 +3775,10 @@ func TestChannelRetransmissionFeeUpdate(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart alice: %v", err)
|
t.Fatalf("unable to restart alice: %v", err)
|
||||||
}
|
}
|
||||||
defer aliceChannel.Stop()
|
|
||||||
bobChannel, err = restartChannel(bobChannel)
|
bobChannel, err = restartChannel(bobChannel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart channel: %v", err)
|
t.Fatalf("unable to restart channel: %v", err)
|
||||||
}
|
}
|
||||||
defer bobChannel.Stop()
|
|
||||||
|
|
||||||
// Bob doesn't get this message so upon reconnection, they need to
|
// Bob doesn't get this message so upon reconnection, they need to
|
||||||
// synchronize. Alice should conclude that she owes Bob a commitment,
|
// synchronize. Alice should conclude that she owes Bob a commitment,
|
||||||
@ -4293,12 +4291,10 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart alice: %v", err)
|
t.Fatalf("unable to restart alice: %v", err)
|
||||||
}
|
}
|
||||||
defer aliceChannel.Stop()
|
|
||||||
bobChannel, err = restartChannel(bobChannel)
|
bobChannel, err = restartChannel(bobChannel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart bob: %v", err)
|
t.Fatalf("unable to restart bob: %v", err)
|
||||||
}
|
}
|
||||||
defer bobChannel.Stop()
|
|
||||||
|
|
||||||
// With both nodes restarted, Bob will now attempt to cancel one of
|
// With both nodes restarted, Bob will now attempt to cancel one of
|
||||||
// Alice's HTLC's.
|
// Alice's HTLC's.
|
||||||
@ -4399,12 +4395,10 @@ func TestLockedInHtlcForwardingSkipAfterRestart(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart alice: %v", err)
|
t.Fatalf("unable to restart alice: %v", err)
|
||||||
}
|
}
|
||||||
defer aliceChannel.Stop()
|
|
||||||
bobChannel, err = restartChannel(bobChannel)
|
bobChannel, err = restartChannel(bobChannel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart bob: %v", err)
|
t.Fatalf("unable to restart bob: %v", err)
|
||||||
}
|
}
|
||||||
defer bobChannel.Stop()
|
|
||||||
|
|
||||||
// Readd the Fail to both Alice and Bob's channels, as the non-committed
|
// Readd the Fail to both Alice and Bob's channels, as the non-committed
|
||||||
// update will not have survived the restart.
|
// update will not have survived the restart.
|
||||||
@ -5590,19 +5584,19 @@ func TestChannelRestoreUpdateLogs(t *testing.T) {
|
|||||||
// the update logs up to the correct state set up above.
|
// the update logs up to the correct state set up above.
|
||||||
newAliceChannel, err := NewLightningChannel(
|
newAliceChannel, err := NewLightningChannel(
|
||||||
aliceChannel.Signer, nil, aliceChannel.channelState,
|
aliceChannel.Signer, nil, aliceChannel.channelState,
|
||||||
|
aliceChannel.sigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create new channel: %v", err)
|
t.Fatalf("unable to create new channel: %v", err)
|
||||||
}
|
}
|
||||||
defer newAliceChannel.Stop()
|
|
||||||
|
|
||||||
newBobChannel, err := NewLightningChannel(
|
newBobChannel, err := NewLightningChannel(
|
||||||
bobChannel.Signer, nil, bobChannel.channelState,
|
bobChannel.Signer, nil, bobChannel.channelState,
|
||||||
|
bobChannel.sigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create new channel: %v", err)
|
t.Fatalf("unable to create new channel: %v", err)
|
||||||
}
|
}
|
||||||
defer newBobChannel.Stop()
|
|
||||||
|
|
||||||
// compare all the logs between the old and new channels, to make sure
|
// compare all the logs between the old and new channels, to make sure
|
||||||
// they all got restored properly.
|
// they all got restored properly.
|
||||||
@ -5669,13 +5663,14 @@ func assertInLogs(t *testing.T, channel *LightningChannel, numAddsLocal,
|
|||||||
// expected state.
|
// expected state.
|
||||||
func restoreAndAssert(t *testing.T, channel *LightningChannel, numAddsLocal,
|
func restoreAndAssert(t *testing.T, channel *LightningChannel, numAddsLocal,
|
||||||
numFailsLocal, numAddsRemote, numFailsRemote int) {
|
numFailsLocal, numAddsRemote, numFailsRemote int) {
|
||||||
|
|
||||||
newChannel, err := NewLightningChannel(
|
newChannel, err := NewLightningChannel(
|
||||||
channel.Signer, nil, channel.channelState,
|
channel.Signer, nil, channel.channelState,
|
||||||
|
channel.sigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create new channel: %v", err)
|
t.Fatalf("unable to create new channel: %v", err)
|
||||||
}
|
}
|
||||||
defer newChannel.Stop()
|
|
||||||
|
|
||||||
assertInLog(t, newChannel.localUpdateLog, numAddsLocal, numFailsLocal)
|
assertInLog(t, newChannel.localUpdateLog, numAddsLocal, numFailsLocal)
|
||||||
assertInLog(t, newChannel.remoteUpdateLog, numAddsRemote, numFailsRemote)
|
assertInLog(t, newChannel.remoteUpdateLog, numAddsRemote, numFailsRemote)
|
||||||
@ -5863,12 +5858,10 @@ func TestDuplicateFailRejection(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart channel: %v", err)
|
t.Fatalf("unable to restart channel: %v", err)
|
||||||
}
|
}
|
||||||
defer bobChannel.Stop()
|
|
||||||
aliceChannel, err = restartChannel(aliceChannel)
|
aliceChannel, err = restartChannel(aliceChannel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart channel: %v", err)
|
t.Fatalf("unable to restart channel: %v", err)
|
||||||
}
|
}
|
||||||
defer aliceChannel.Stop()
|
|
||||||
|
|
||||||
// If we try to fail the same HTLC again, then we should get an error.
|
// If we try to fail the same HTLC again, then we should get an error.
|
||||||
err = bobChannel.FailHTLC(0, []byte("failreason"), nil, nil, nil)
|
err = bobChannel.FailHTLC(0, []byte("failreason"), nil, nil, nil)
|
||||||
@ -5945,12 +5938,10 @@ func TestDuplicateSettleRejection(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart channel: %v", err)
|
t.Fatalf("unable to restart channel: %v", err)
|
||||||
}
|
}
|
||||||
defer bobChannel.Stop()
|
|
||||||
aliceChannel, err = restartChannel(aliceChannel)
|
aliceChannel, err = restartChannel(aliceChannel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to restart channel: %v", err)
|
t.Fatalf("unable to restart channel: %v", err)
|
||||||
}
|
}
|
||||||
defer aliceChannel.Stop()
|
|
||||||
|
|
||||||
// If we try to fail the same HTLC again, then we should get an error.
|
// If we try to fail the same HTLC again, then we should get an error.
|
||||||
err = bobChannel.SettleHTLC(alicePreimage, uint64(0), nil, nil, nil)
|
err = bobChannel.SettleHTLC(alicePreimage, uint64(0), nil, nil, nil)
|
||||||
@ -5983,9 +5974,9 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
|||||||
channel *LightningChannel, remoteLog bool, htlcIndex uint64,
|
channel *LightningChannel, remoteLog bool, htlcIndex uint64,
|
||||||
expLocal, expRemote uint64) *LightningChannel {
|
expLocal, expRemote uint64) *LightningChannel {
|
||||||
|
|
||||||
channel.Stop()
|
|
||||||
newChannel, err := NewLightningChannel(
|
newChannel, err := NewLightningChannel(
|
||||||
channel.Signer, nil, channel.channelState,
|
channel.Signer, nil, channel.channelState,
|
||||||
|
channel.sigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to create new channel: %v", err)
|
t.Fatalf("unable to create new channel: %v", err)
|
||||||
@ -6152,7 +6143,4 @@ func TestChannelRestoreCommitHeight(t *testing.T) {
|
|||||||
// HTLC an add height.
|
// HTLC an add height.
|
||||||
bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 0, 2, 1)
|
bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 0, 2, 1)
|
||||||
bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 1, 2, 2)
|
bobChannel = restoreAndAssertCommitHeights(t, bobChannel, true, 1, 2, 2)
|
||||||
|
|
||||||
aliceChannel.Stop()
|
|
||||||
bobChannel.Stop()
|
|
||||||
}
|
}
|
||||||
|
@ -19,119 +19,118 @@ const (
|
|||||||
// TODO(roasbeef): job buffer pool?
|
// TODO(roasbeef): job buffer pool?
|
||||||
)
|
)
|
||||||
|
|
||||||
// verifyJob is a job sent to the sigPool to verify a signature on a
|
// VerifyJob is a job sent to the sigPool sig pool to verify a signature
|
||||||
// transaction. The items contained in the struct are necessary and sufficient
|
// on a transaction. The items contained in the struct are necessary and
|
||||||
// to verify the full signature. The passed sigHash closure function should be
|
// sufficient to verify the full signature. The passed sigHash closure function
|
||||||
// set to a function that generates the relevant sighash.
|
// should be set to a function that generates the relevant sighash.
|
||||||
//
|
//
|
||||||
// TODO(roasbeef): when we move to ecschnorr, make into batch signature
|
// TODO(roasbeef): when we move to ecschnorr, make into batch signature
|
||||||
// verification using bos-coster (or pip?).
|
// verification using bos-coster (or pip?).
|
||||||
type verifyJob struct {
|
type VerifyJob struct {
|
||||||
// pubKey is the public key that was used to generate the purported
|
// PubKey is the public key that was used to generate the purported
|
||||||
// valid signature. Note that with the current channel construction,
|
// valid signature. Note that with the current channel construction,
|
||||||
// this public key will likely have been tweaked using the current per
|
// this public key will likely have been tweaked using the current per
|
||||||
// commitment point for a particular commitment transactions.
|
// commitment point for a particular commitment transactions.
|
||||||
pubKey *btcec.PublicKey
|
PubKey *btcec.PublicKey
|
||||||
|
|
||||||
// sig is the raw signature generated using the above public key. This
|
// Sig is the raw signature generated using the above public key. This
|
||||||
// is the signature to be verified.
|
// is the signature to be verified.
|
||||||
sig *btcec.Signature
|
Sig *btcec.Signature
|
||||||
|
|
||||||
// sigHash is a function closure generates the sighashes that the
|
// SigHash is a function closure generates the sighashes that the
|
||||||
// passed signature is known to have signed.
|
// passed signature is known to have signed.
|
||||||
sigHash func() ([]byte, error)
|
SigHash func() ([]byte, error)
|
||||||
|
|
||||||
// htlcIndex is the index of the HTLC from the PoV of the remote
|
// HtlcIndex is the index of the HTLC from the PoV of the remote
|
||||||
// party's update log.
|
// party's update log.
|
||||||
htlcIndex uint64
|
HtlcIndex uint64
|
||||||
|
|
||||||
// cancel is a channel that should be closed if the caller wishes to
|
// Cancel is a channel that should be closed if the caller wishes to
|
||||||
// cancel all pending verification jobs part of a single batch. This
|
// cancel all pending verification jobs part of a single batch. This
|
||||||
// channel is to be closed in the case that a single signature in a
|
// channel is to be closed in the case that a single signature in a
|
||||||
// batch has been returned as invalid, as there is no need to verify
|
// batch has been returned as invalid, as there is no need to verify
|
||||||
// the remainder of the signatures.
|
// the remainder of the signatures.
|
||||||
cancel chan struct{}
|
Cancel chan struct{}
|
||||||
|
|
||||||
// errResp is the channel that the result of the signature verification
|
// ErrResp is the channel that the result of the signature verification
|
||||||
// is to be sent over. In the see that the signature is valid, a nil
|
// is to be sent over. In the see that the signature is valid, a nil
|
||||||
// error will be passed. Otherwise, a concrete error detailing the
|
// error will be passed. Otherwise, a concrete error detailing the
|
||||||
// issue will be passed.
|
// issue will be passed.
|
||||||
errResp chan *htlcIndexErr
|
ErrResp chan *HtlcIndexErr
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyJobErr is a special type of error that also includes a pointer to the
|
// HtlcIndexErr is a special type of error that also includes a pointer to the
|
||||||
// original validation job. Ths error message allows us to craft more detailed
|
// original validation job. This error message allows us to craft more detailed
|
||||||
// errors at upper layers.
|
// errors at upper layers.
|
||||||
type htlcIndexErr struct {
|
type HtlcIndexErr struct {
|
||||||
error
|
error
|
||||||
|
|
||||||
*verifyJob
|
*VerifyJob
|
||||||
}
|
}
|
||||||
|
|
||||||
// signJob is a job sent to the sigPool to generate a valid signature according
|
// SignJob is a job sent to the sigPool sig pool to generate a valid
|
||||||
// to the passed SignDescriptor for the passed transaction. Jobs are intended
|
// signature according to the passed SignDescriptor for the passed transaction.
|
||||||
// to be sent in batches in order to parallelize the job of generating
|
// Jobs are intended to be sent in batches in order to parallelize the job of
|
||||||
// signatures for a new commitment transaction.
|
// generating signatures for a new commitment transaction.
|
||||||
type signJob struct {
|
type SignJob struct {
|
||||||
// signDesc is intended to be a full populated SignDescriptor which
|
// SignDesc is intended to be a full populated SignDescriptor which
|
||||||
// encodes the necessary material (keys, witness script, etc) required
|
// encodes the necessary material (keys, witness script, etc) required
|
||||||
// to generate a valid signature for the specified input.
|
// to generate a valid signature for the specified input.
|
||||||
signDesc SignDescriptor
|
SignDesc SignDescriptor
|
||||||
|
|
||||||
// tx is the transaction to be signed. This is required to generate the
|
// Tx is the transaction to be signed. This is required to generate the
|
||||||
// proper sighash for the input to be signed.
|
// proper sighash for the input to be signed.
|
||||||
tx *wire.MsgTx
|
Tx *wire.MsgTx
|
||||||
|
|
||||||
// outputIndex is the output index of the HTLC on the commitment
|
// OutputIndex is the output index of the HTLC on the commitment
|
||||||
// transaction being signed.
|
// transaction being signed.
|
||||||
outputIndex int32
|
OutputIndex int32
|
||||||
|
|
||||||
// cancel is a channel that should be closed if the caller wishes to
|
// Cancel is a channel that should be closed if the caller wishes to
|
||||||
// abandon all pending sign jobs part of a single batch.
|
// abandon all pending sign jobs part of a single batch.
|
||||||
cancel chan struct{}
|
Cancel chan struct{}
|
||||||
|
|
||||||
// resp is the channel that the response to this particular signJob
|
// Resp is the channel that the response to this particular SignJob
|
||||||
// will be sent over.
|
// will be sent over.
|
||||||
//
|
//
|
||||||
// TODO(roasbeef): actually need to allow caller to set, need to retain
|
// TODO(roasbeef): actually need to allow caller to set, need to retain
|
||||||
// order mark commit sig as special
|
// order mark commit sig as special
|
||||||
resp chan signJobResp
|
Resp chan SignJobResp
|
||||||
}
|
}
|
||||||
|
|
||||||
// signJobResp is the response to a sign job. Both channels are to be read in
|
// SignJobResp is the response to a sign job. Both channels are to be read in
|
||||||
// order to ensure no unnecessary goroutine blocking occurs. Additionally, both
|
// order to ensure no unnecessary goroutine blocking occurs. Additionally, both
|
||||||
// channels should be buffered.
|
// channels should be buffered.
|
||||||
type signJobResp struct {
|
type SignJobResp struct {
|
||||||
// sig is the generated signature for a particular signJob In the case
|
// Sig is the generated signature for a particular SignJob In the case
|
||||||
// of an error during signature generation, then this value sent will
|
// of an error during signature generation, then this value sent will
|
||||||
// be nil.
|
// be nil.
|
||||||
sig lnwire.Sig
|
Sig lnwire.Sig
|
||||||
|
|
||||||
// err is the error that occurred when executing the specified
|
// Err is the error that occurred when executing the specified
|
||||||
// signature job. In the case that no error occurred, this value will
|
// signature job. In the case that no error occurred, this value will
|
||||||
// be nil.
|
// be nil.
|
||||||
err error
|
Err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// sigPool is a struct that is meant to allow the current channel state machine
|
// TODO(roasbeef); fix description
|
||||||
// to parallelize all signature generation and verification. This struct is
|
|
||||||
// needed as _each_ HTLC when creating a commitment transaction requires a
|
// SigPool is a struct that is meant to allow the current channel state
|
||||||
// signature, and similarly a receiver of a new commitment must verify all the
|
// machine to parallelize all signature generation and verification. This
|
||||||
// HTLC signatures included within the CommitSig message. A pool of workers
|
// struct is needed as _each_ HTLC when creating a commitment transaction
|
||||||
// will be maintained by the sigPool. Batches of jobs (either to sign or
|
// requires a signature, and similarly a receiver of a new commitment must
|
||||||
// verify) can be sent to the pool of workers which will asynchronously perform
|
// verify all the HTLC signatures included within the CommitSig message. A pool
|
||||||
// the specified job.
|
// of workers will be maintained by the sigPool. Batches of jobs (either
|
||||||
//
|
// to sign or verify) can be sent to the pool of workers which will
|
||||||
// TODO(roasbeef): rename?
|
// asynchronously perform the specified job.
|
||||||
// * ecdsaPool?
|
type SigPool struct {
|
||||||
type sigPool struct {
|
|
||||||
started uint32 // To be used atomically.
|
started uint32 // To be used atomically.
|
||||||
stopped uint32 // To be used atomically.
|
stopped uint32 // To be used atomically.
|
||||||
|
|
||||||
signer Signer
|
signer Signer
|
||||||
|
|
||||||
verifyJobs chan verifyJob
|
verifyJobs chan VerifyJob
|
||||||
signJobs chan signJob
|
signJobs chan SignJob
|
||||||
|
|
||||||
wg sync.WaitGroup
|
wg sync.WaitGroup
|
||||||
quit chan struct{}
|
quit chan struct{}
|
||||||
@ -139,22 +138,22 @@ type sigPool struct {
|
|||||||
numWorkers int
|
numWorkers int
|
||||||
}
|
}
|
||||||
|
|
||||||
// newSigPool creates a new signature pool with the specified number of
|
// NewSigPool creates a new signature pool with the specified number of
|
||||||
// workers. The recommended parameter for the number of works is the number of
|
// workers. The recommended parameter for the number of works is the number of
|
||||||
// physical CPU cores available on the target machine.
|
// physical CPU cores available on the target machine.
|
||||||
func newSigPool(numWorkers int, signer Signer) *sigPool {
|
func NewSigPool(numWorkers int, signer Signer) *SigPool {
|
||||||
return &sigPool{
|
return &SigPool{
|
||||||
signer: signer,
|
signer: signer,
|
||||||
numWorkers: numWorkers,
|
numWorkers: numWorkers,
|
||||||
verifyJobs: make(chan verifyJob, jobBuffer),
|
verifyJobs: make(chan VerifyJob, jobBuffer),
|
||||||
signJobs: make(chan signJob, jobBuffer),
|
signJobs: make(chan SignJob, jobBuffer),
|
||||||
quit: make(chan struct{}),
|
quit: make(chan struct{}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts of all goroutines that the sigPool needs to carry out its
|
// Start starts of all goroutines that the sigPool sig pool needs to
|
||||||
// duties.
|
// carry out its duties.
|
||||||
func (s *sigPool) Start() error {
|
func (s *SigPool) Start() error {
|
||||||
if !atomic.CompareAndSwapUint32(&s.started, 0, 1) {
|
if !atomic.CompareAndSwapUint32(&s.started, 0, 1) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -169,7 +168,7 @@ func (s *sigPool) Start() error {
|
|||||||
|
|
||||||
// Stop signals any active workers carrying out jobs to exit so the sigPool can
|
// Stop signals any active workers carrying out jobs to exit so the sigPool can
|
||||||
// gracefully shutdown.
|
// gracefully shutdown.
|
||||||
func (s *sigPool) Stop() error {
|
func (s *SigPool) Stop() error {
|
||||||
if !atomic.CompareAndSwapUint32(&s.stopped, 0, 1) {
|
if !atomic.CompareAndSwapUint32(&s.stopped, 0, 1) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -180,11 +179,11 @@ func (s *sigPool) Stop() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// poolWorker is the main worker goroutine within the sigPool. Individual
|
// poolWorker is the main worker goroutine within the sigPool sig pool.
|
||||||
// batches are distributed amongst each of the active workers. The workers then
|
// Individual batches are distributed amongst each of the active workers. The
|
||||||
// execute the task based on the type of job, and return the result back to
|
// workers then execute the task based on the type of job, and return the
|
||||||
// caller.
|
// result back to caller.
|
||||||
func (s *sigPool) poolWorker() {
|
func (s *SigPool) poolWorker() {
|
||||||
defer s.wg.Done()
|
defer s.wg.Done()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@ -195,16 +194,17 @@ func (s *sigPool) poolWorker() {
|
|||||||
// send the result along with a possible error back to the
|
// send the result along with a possible error back to the
|
||||||
// caller.
|
// caller.
|
||||||
case sigMsg := <-s.signJobs:
|
case sigMsg := <-s.signJobs:
|
||||||
rawSig, err := s.signer.SignOutputRaw(sigMsg.tx,
|
rawSig, err := s.signer.SignOutputRaw(
|
||||||
&sigMsg.signDesc)
|
sigMsg.Tx, &sigMsg.SignDesc,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
select {
|
select {
|
||||||
case sigMsg.resp <- signJobResp{
|
case sigMsg.Resp <- SignJobResp{
|
||||||
sig: lnwire.Sig{},
|
Sig: lnwire.Sig{},
|
||||||
err: err,
|
Err: err,
|
||||||
}:
|
}:
|
||||||
continue
|
continue
|
||||||
case <-sigMsg.cancel:
|
case <-sigMsg.Cancel:
|
||||||
continue
|
continue
|
||||||
case <-s.quit:
|
case <-s.quit:
|
||||||
return
|
return
|
||||||
@ -213,11 +213,11 @@ func (s *sigPool) poolWorker() {
|
|||||||
|
|
||||||
sig, err := lnwire.NewSigFromRawSignature(rawSig)
|
sig, err := lnwire.NewSigFromRawSignature(rawSig)
|
||||||
select {
|
select {
|
||||||
case sigMsg.resp <- signJobResp{
|
case sigMsg.Resp <- SignJobResp{
|
||||||
sig: sig,
|
Sig: sig,
|
||||||
err: err,
|
Err: err,
|
||||||
}:
|
}:
|
||||||
case <-sigMsg.cancel:
|
case <-sigMsg.Cancel:
|
||||||
continue
|
continue
|
||||||
case <-s.quit:
|
case <-s.quit:
|
||||||
return
|
return
|
||||||
@ -227,43 +227,45 @@ func (s *sigPool) poolWorker() {
|
|||||||
// world. We'll attempt to construct the sighash, parse the
|
// world. We'll attempt to construct the sighash, parse the
|
||||||
// signature, and finally verify the signature.
|
// signature, and finally verify the signature.
|
||||||
case verifyMsg := <-s.verifyJobs:
|
case verifyMsg := <-s.verifyJobs:
|
||||||
sigHash, err := verifyMsg.sigHash()
|
sigHash, err := verifyMsg.SigHash()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
select {
|
select {
|
||||||
case verifyMsg.errResp <- &htlcIndexErr{
|
case verifyMsg.ErrResp <- &HtlcIndexErr{
|
||||||
error: err,
|
error: err,
|
||||||
verifyJob: &verifyMsg,
|
VerifyJob: &verifyMsg,
|
||||||
}:
|
}:
|
||||||
continue
|
continue
|
||||||
case <-verifyMsg.cancel:
|
case <-verifyMsg.Cancel:
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
rawSig := verifyMsg.sig
|
rawSig := verifyMsg.Sig
|
||||||
|
|
||||||
if !rawSig.Verify(sigHash, verifyMsg.pubKey) {
|
if !rawSig.Verify(sigHash, verifyMsg.PubKey) {
|
||||||
err := fmt.Errorf("invalid signature "+
|
err := fmt.Errorf("invalid signature "+
|
||||||
"sighash: %x, sig: %x", sigHash, rawSig.Serialize())
|
"sighash: %x, sig: %x", sigHash,
|
||||||
|
rawSig.Serialize())
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case verifyMsg.errResp <- &htlcIndexErr{
|
case verifyMsg.ErrResp <- &HtlcIndexErr{
|
||||||
error: err,
|
error: err,
|
||||||
verifyJob: &verifyMsg,
|
VerifyJob: &verifyMsg,
|
||||||
}:
|
}:
|
||||||
case <-verifyMsg.cancel:
|
case <-verifyMsg.Cancel:
|
||||||
case <-s.quit:
|
case <-s.quit:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
select {
|
select {
|
||||||
case verifyMsg.errResp <- nil:
|
case verifyMsg.ErrResp <- nil:
|
||||||
case <-verifyMsg.cancel:
|
case <-verifyMsg.Cancel:
|
||||||
case <-s.quit:
|
case <-s.quit:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// The sigPool is exiting, so we will as well.
|
// The sigPool sig pool is exiting, so we will as well.
|
||||||
case <-s.quit:
|
case <-s.quit:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -271,14 +273,14 @@ func (s *sigPool) poolWorker() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SubmitSignBatch submits a batch of signature jobs to the sigPool. The
|
// SubmitSignBatch submits a batch of signature jobs to the sigPool. The
|
||||||
// response and cancel channels for each of the signJob's are expected to be
|
// response and cancel channels for each of the SignJob's are expected to be
|
||||||
// fully populated, as the response for each job will be sent over the response
|
// fully populated, as the response for each job will be sent over the
|
||||||
// channel within the job itself.
|
// response channel within the job itself.
|
||||||
func (s *sigPool) SubmitSignBatch(signJobs []signJob) {
|
func (s *SigPool) SubmitSignBatch(signJobs []SignJob) {
|
||||||
for _, job := range signJobs {
|
for _, job := range signJobs {
|
||||||
select {
|
select {
|
||||||
case s.signJobs <- job:
|
case s.signJobs <- job:
|
||||||
case <-job.cancel:
|
case <-job.Cancel:
|
||||||
// TODO(roasbeef): return error?
|
// TODO(roasbeef): return error?
|
||||||
case <-s.quit:
|
case <-s.quit:
|
||||||
return
|
return
|
||||||
@ -291,18 +293,18 @@ func (s *sigPool) SubmitSignBatch(signJobs []signJob) {
|
|||||||
// denoting if signature verification was valid or not. The passed cancelChan
|
// denoting if signature verification was valid or not. The passed cancelChan
|
||||||
// allows the caller to cancel all pending jobs in the case that they wish to
|
// allows the caller to cancel all pending jobs in the case that they wish to
|
||||||
// bail early.
|
// bail early.
|
||||||
func (s *sigPool) SubmitVerifyBatch(verifyJobs []verifyJob,
|
func (s *SigPool) SubmitVerifyBatch(verifyJobs []VerifyJob,
|
||||||
cancelChan chan struct{}) <-chan *htlcIndexErr {
|
cancelChan chan struct{}) <-chan *HtlcIndexErr {
|
||||||
|
|
||||||
errChan := make(chan *htlcIndexErr, len(verifyJobs))
|
errChan := make(chan *HtlcIndexErr, len(verifyJobs))
|
||||||
|
|
||||||
for _, job := range verifyJobs {
|
for _, job := range verifyJobs {
|
||||||
job.cancel = cancelChan
|
job.Cancel = cancelChan
|
||||||
job.errResp = errChan
|
job.ErrResp = errChan
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case s.verifyJobs <- job:
|
case s.verifyJobs <- job:
|
||||||
case <-job.cancel:
|
case <-job.Cancel:
|
||||||
return errChan
|
return errChan
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -309,18 +309,24 @@ func CreateTestChannels() (*LightningChannel, *LightningChannel, func(), error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO(roasbeef): make mock version of pre-image store
|
// TODO(roasbeef): make mock version of pre-image store
|
||||||
|
|
||||||
|
alicePool := NewSigPool(1, aliceSigner)
|
||||||
channelAlice, err := NewLightningChannel(
|
channelAlice, err := NewLightningChannel(
|
||||||
aliceSigner, pCache, aliceChannelState,
|
aliceSigner, pCache, aliceChannelState, alicePool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
alicePool.Start()
|
||||||
|
|
||||||
|
bobPool := NewSigPool(1, bobSigner)
|
||||||
channelBob, err := NewLightningChannel(
|
channelBob, err := NewLightningChannel(
|
||||||
bobSigner, pCache, bobChannelState,
|
bobSigner, pCache, bobChannelState, bobPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
bobPool.Start()
|
||||||
|
|
||||||
err = SetStateNumHint(
|
err = SetStateNumHint(
|
||||||
aliceCommitTx, 0, channelAlice.stateHintObfuscator,
|
aliceCommitTx, 0, channelAlice.stateHintObfuscator,
|
||||||
@ -346,8 +352,8 @@ func CreateTestChannels() (*LightningChannel, *LightningChannel, func(), error)
|
|||||||
os.RemoveAll(bobPath)
|
os.RemoveAll(bobPath)
|
||||||
os.RemoveAll(alicePath)
|
os.RemoveAll(alicePath)
|
||||||
|
|
||||||
channelAlice.Stop()
|
alicePool.Stop()
|
||||||
channelBob.Stop()
|
bobPool.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now that the channel are open, simulate the start of a session by
|
// Now that the channel are open, simulate the start of a session by
|
||||||
|
17
peer.go
17
peer.go
@ -383,6 +383,7 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
|
|||||||
for _, dbChan := range chans {
|
for _, dbChan := range chans {
|
||||||
lnChan, err := lnwallet.NewLightningChannel(
|
lnChan, err := lnwallet.NewLightningChannel(
|
||||||
p.server.cc.signer, p.server.witnessBeacon, dbChan,
|
p.server.cc.signer, p.server.witnessBeacon, dbChan,
|
||||||
|
p.server.sigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -400,7 +401,6 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
|
|||||||
if dbChan.ChanStatus() != channeldb.Default {
|
if dbChan.ChanStatus() != channeldb.Default {
|
||||||
peerLog.Warnf("ChannelPoint(%v) has status %v, won't "+
|
peerLog.Warnf("ChannelPoint(%v) has status %v, won't "+
|
||||||
"start.", chanPoint, dbChan.ChanStatus())
|
"start.", chanPoint, dbChan.ChanStatus())
|
||||||
lnChan.Stop()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -409,13 +409,11 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
|
|||||||
if _, ok := p.failedChannels[chanID]; ok {
|
if _, ok := p.failedChannels[chanID]; ok {
|
||||||
peerLog.Warnf("ChannelPoint(%v) is failed, won't "+
|
peerLog.Warnf("ChannelPoint(%v) is failed, won't "+
|
||||||
"start.", chanPoint)
|
"start.", chanPoint)
|
||||||
lnChan.Stop()
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
_, currentHeight, err := p.server.cc.chainIO.GetBestBlock()
|
_, currentHeight, err := p.server.cc.chainIO.GetBestBlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lnChan.Stop()
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -425,7 +423,6 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
|
|||||||
graph := p.server.chanDB.ChannelGraph()
|
graph := p.server.chanDB.ChannelGraph()
|
||||||
info, p1, p2, err := graph.FetchChannelEdgesByOutpoint(chanPoint)
|
info, p1, p2, err := graph.FetchChannelEdgesByOutpoint(chanPoint)
|
||||||
if err != nil && err != channeldb.ErrEdgeNotFound {
|
if err != nil && err != channeldb.ErrEdgeNotFound {
|
||||||
lnChan.Stop()
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -473,7 +470,6 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
|
|||||||
*chanPoint,
|
*chanPoint,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lnChan.Stop()
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -483,7 +479,6 @@ func (p *peer) loadActiveChannels(chans []*channeldb.OpenChannel) error {
|
|||||||
currentHeight, true,
|
currentHeight, true,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
lnChan.Stop()
|
|
||||||
return fmt.Errorf("unable to add link %v to switch: %v",
|
return fmt.Errorf("unable to add link %v to switch: %v",
|
||||||
chanPoint, err)
|
chanPoint, err)
|
||||||
}
|
}
|
||||||
@ -1596,7 +1591,7 @@ out:
|
|||||||
// easily according to its channel ID.
|
// easily according to its channel ID.
|
||||||
lnChan, err := lnwallet.NewLightningChannel(
|
lnChan, err := lnwallet.NewLightningChannel(
|
||||||
p.server.cc.signer, p.server.witnessBeacon,
|
p.server.cc.signer, p.server.witnessBeacon,
|
||||||
newChan,
|
newChan, p.server.sigPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.activeChanMtx.Unlock()
|
p.activeChanMtx.Unlock()
|
||||||
@ -1624,7 +1619,6 @@ out:
|
|||||||
"block: %v", err)
|
"block: %v", err)
|
||||||
peerLog.Errorf(err.Error())
|
peerLog.Errorf(err.Error())
|
||||||
|
|
||||||
lnChan.Stop()
|
|
||||||
newChanReq.err <- err
|
newChanReq.err <- err
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1636,7 +1630,6 @@ out:
|
|||||||
"chain events: %v", err)
|
"chain events: %v", err)
|
||||||
peerLog.Errorf(err.Error())
|
peerLog.Errorf(err.Error())
|
||||||
|
|
||||||
lnChan.Stop()
|
|
||||||
newChanReq.err <- err
|
newChanReq.err <- err
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -1666,7 +1659,6 @@ out:
|
|||||||
p.PubKey())
|
p.PubKey())
|
||||||
peerLog.Errorf(err.Error())
|
peerLog.Errorf(err.Error())
|
||||||
|
|
||||||
lnChan.Stop()
|
|
||||||
newChanReq.err <- err
|
newChanReq.err <- err
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -2019,8 +2011,6 @@ func (p *peer) finalizeChanClosure(chanCloser *channelCloser) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
chanCloser.cfg.channel.Stop()
|
|
||||||
|
|
||||||
// Next, we'll launch a goroutine which will request to be notified by
|
// Next, we'll launch a goroutine which will request to be notified by
|
||||||
// the ChainNotifier once the closure transaction obtains a single
|
// the ChainNotifier once the closure transaction obtains a single
|
||||||
// confirmation.
|
// confirmation.
|
||||||
@ -2121,10 +2111,7 @@ func (p *peer) WipeChannel(chanPoint *wire.OutPoint) error {
|
|||||||
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
chanID := lnwire.NewChanIDFromOutPoint(chanPoint)
|
||||||
|
|
||||||
p.activeChanMtx.Lock()
|
p.activeChanMtx.Lock()
|
||||||
if channel, ok := p.activeChannels[chanID]; ok {
|
|
||||||
channel.Stop()
|
|
||||||
delete(p.activeChannels, chanID)
|
delete(p.activeChannels, chanID)
|
||||||
}
|
|
||||||
p.activeChanMtx.Unlock()
|
p.activeChanMtx.Unlock()
|
||||||
|
|
||||||
// Instruct the HtlcSwitch to close this link as the channel is no
|
// Instruct the HtlcSwitch to close this link as the channel is no
|
||||||
|
@ -1431,7 +1431,6 @@ func (r *rpcServer) CloseChannel(in *lnrpc.CloseChannelRequest,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
channel.Stop()
|
|
||||||
|
|
||||||
// If a force closure was requested, then we'll handle all the details
|
// If a force closure was requested, then we'll handle all the details
|
||||||
// around the creation and broadcast of the unilateral closure
|
// around the creation and broadcast of the unilateral closure
|
||||||
@ -1667,7 +1666,7 @@ func (r *rpcServer) fetchOpenDbChannel(chanPoint wire.OutPoint) (
|
|||||||
|
|
||||||
// fetchActiveChannel attempts to locate a channel identified by its channel
|
// fetchActiveChannel attempts to locate a channel identified by its channel
|
||||||
// point from the database's set of all currently opened channels and
|
// point from the database's set of all currently opened channels and
|
||||||
// return it as a fully popuplated state machine
|
// return it as a fully populated state machine
|
||||||
func (r *rpcServer) fetchActiveChannel(chanPoint wire.OutPoint) (
|
func (r *rpcServer) fetchActiveChannel(chanPoint wire.OutPoint) (
|
||||||
*lnwallet.LightningChannel, error) {
|
*lnwallet.LightningChannel, error) {
|
||||||
|
|
||||||
@ -1680,7 +1679,7 @@ func (r *rpcServer) fetchActiveChannel(chanPoint wire.OutPoint) (
|
|||||||
// we create a fully populated channel state machine which
|
// we create a fully populated channel state machine which
|
||||||
// uses the db channel as backing storage.
|
// uses the db channel as backing storage.
|
||||||
return lnwallet.NewLightningChannel(
|
return lnwallet.NewLightningChannel(
|
||||||
r.server.cc.wallet.Cfg.Signer, nil, dbChan,
|
r.server.cc.wallet.Cfg.Signer, nil, dbChan, nil,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@ -161,6 +162,8 @@ type server struct {
|
|||||||
|
|
||||||
connMgr *connmgr.ConnManager
|
connMgr *connmgr.ConnManager
|
||||||
|
|
||||||
|
sigPool *lnwallet.SigPool
|
||||||
|
|
||||||
// globalFeatures feature vector which affects HTLCs and thus are also
|
// globalFeatures feature vector which affects HTLCs and thus are also
|
||||||
// advertised to other nodes.
|
// advertised to other nodes.
|
||||||
globalFeatures *lnwire.FeatureVector
|
globalFeatures *lnwire.FeatureVector
|
||||||
@ -260,6 +263,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl,
|
|||||||
s := &server{
|
s := &server{
|
||||||
chanDB: chanDB,
|
chanDB: chanDB,
|
||||||
cc: cc,
|
cc: cc,
|
||||||
|
sigPool: lnwallet.NewSigPool(runtime.NumCPU()*2, cc.signer),
|
||||||
|
|
||||||
invoices: newInvoiceRegistry(chanDB),
|
invoices: newInvoiceRegistry(chanDB),
|
||||||
|
|
||||||
@ -947,6 +951,9 @@ func (s *server) Start() error {
|
|||||||
// sufficient number of confirmations, or when the input for the
|
// sufficient number of confirmations, or when the input for the
|
||||||
// funding transaction is spent in an attempt at an uncooperative close
|
// funding transaction is spent in an attempt at an uncooperative close
|
||||||
// by the counterparty.
|
// by the counterparty.
|
||||||
|
if err := s.sigPool.Start(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := s.cc.chainNotifier.Start(); err != nil {
|
if err := s.cc.chainNotifier.Start(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1034,6 +1041,7 @@ func (s *server) Stop() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown the wallet, funding manager, and the rpc server.
|
// Shutdown the wallet, funding manager, and the rpc server.
|
||||||
|
s.sigPool.Stop()
|
||||||
s.cc.chainNotifier.Stop()
|
s.cc.chainNotifier.Stop()
|
||||||
s.chanRouter.Stop()
|
s.chanRouter.Stop()
|
||||||
s.htlcSwitch.Stop()
|
s.htlcSwitch.Stop()
|
||||||
|
@ -298,18 +298,23 @@ func createTestPeer(notifier chainntnfs.ChainNotifier,
|
|||||||
aliceSigner := &mockSigner{aliceKeyPriv}
|
aliceSigner := &mockSigner{aliceKeyPriv}
|
||||||
bobSigner := &mockSigner{bobKeyPriv}
|
bobSigner := &mockSigner{bobKeyPriv}
|
||||||
|
|
||||||
|
alicePool := lnwallet.NewSigPool(1, aliceSigner)
|
||||||
channelAlice, err := lnwallet.NewLightningChannel(
|
channelAlice, err := lnwallet.NewLightningChannel(
|
||||||
aliceSigner, nil, aliceChannelState,
|
aliceSigner, nil, aliceChannelState, alicePool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, err
|
return nil, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
alicePool.Start()
|
||||||
|
|
||||||
|
bobPool := lnwallet.NewSigPool(1, bobSigner)
|
||||||
channelBob, err := lnwallet.NewLightningChannel(
|
channelBob, err := lnwallet.NewLightningChannel(
|
||||||
bobSigner, nil, bobChannelState,
|
bobSigner, nil, bobChannelState, bobPool,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, nil, err
|
return nil, nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
bobPool.Start()
|
||||||
|
|
||||||
chainIO := &mockChainIO{}
|
chainIO := &mockChainIO{}
|
||||||
wallet := &lnwallet.LightningWallet{
|
wallet := &lnwallet.LightningWallet{
|
||||||
|
Loading…
Reference in New Issue
Block a user