diff --git a/lnd.go b/lnd.go index f54820ed..c79ca924 100644 --- a/lnd.go +++ b/lnd.go @@ -217,18 +217,21 @@ func lndMain() error { } var ( - privateWalletPw = lnwallet.DefaultPrivatePassphrase - publicWalletPw = lnwallet.DefaultPublicPassphrase - birthday = time.Now() - recoveryWindow uint32 - unlockedWallet *wallet.Wallet + walletInitParams WalletUnlockParams + privateWalletPw = lnwallet.DefaultPrivatePassphrase + publicWalletPw = lnwallet.DefaultPublicPassphrase ) + // If the user didn't request a seed, then we'll manually assume a + // wallet birthday of now, as otherwise the seed would've specified + // this information. + walletInitParams.Birthday = time.Now() + // We wait until the user provides a password over RPC. In case lnd is // started with the --noseedbackup flag, we use the default password // for wallet encryption. if !cfg.NoSeedBackup { - walletInitParams, err := waitForWalletPassword( + params, err := waitForWalletPassword( cfg.RPCListeners, cfg.RESTListeners, serverOpts, proxyOpts, tlsConf, ) @@ -236,16 +239,14 @@ func lndMain() error { return err } + walletInitParams = *params privateWalletPw = walletInitParams.Password publicWalletPw = walletInitParams.Password - birthday = walletInitParams.Birthday - recoveryWindow = walletInitParams.RecoveryWindow - unlockedWallet = walletInitParams.Wallet - if recoveryWindow > 0 { + if walletInitParams.RecoveryWindow > 0 { ltndLog.Infof("Wallet recovery mode enabled with "+ "address lookahead of %d addresses", - recoveryWindow) + walletInitParams.RecoveryWindow) } } @@ -264,7 +265,7 @@ func lndMain() error { // Try to unlock the macaroon store with the private password. err = macaroonService.CreateUnlock(&privateWalletPw) if err != nil { - srvrLog.Error(err) + srvrLog.Errorf("unable to unlock macaroons: %v", err) return err } @@ -288,8 +289,9 @@ func lndMain() error { // instances of the pertinent interfaces required to operate the // Lightning Network Daemon. activeChainControl, chainCleanUp, err := newChainControlFromConfig( - cfg, chanDB, privateWalletPw, publicWalletPw, birthday, - recoveryWindow, unlockedWallet, neutrinoCS, + cfg, chanDB, privateWalletPw, publicWalletPw, + walletInitParams.Birthday, walletInitParams.RecoveryWindow, + walletInitParams.Wallet, neutrinoCS, ) if err != nil { fmt.Printf("unable to create chain control: %v\n", err) @@ -327,6 +329,7 @@ func lndMain() error { // connections. server, err := newServer( cfg.Listeners, chanDB, activeChainControl, idPrivKey, + walletInitParams.ChansToRestore, ) if err != nil { srvrLog.Errorf("unable to create server: %v\n", err) @@ -672,6 +675,10 @@ type WalletUnlockParams struct { // later when lnd actually uses it). Because unlocking involves scrypt // which is resource intensive, we want to avoid doing it twice. Wallet *wallet.Wallet + + // ChansToRestore a set of static channel backups that should be + // restored before the main server instance starts up. + ChansToRestore walletunlocker.ChannelsToRecover } // waitForWalletPassword will spin up gRPC and REST endpoints for the @@ -825,24 +832,23 @@ func waitForWalletPassword(grpcEndpoints, restEndpoints []net.Addr, return nil, err } - walletInitParams := &WalletUnlockParams{ + return &WalletUnlockParams{ Password: password, Birthday: birthday, RecoveryWindow: recoveryWindow, Wallet: newWallet, - } - - return walletInitParams, nil + ChansToRestore: initMsg.ChanBackups, + }, nil // The wallet has already been created in the past, and is simply being // unlocked. So we'll just return these passphrases. case unlockMsg := <-pwService.UnlockMsgs: - walletInitParams := &WalletUnlockParams{ + return &WalletUnlockParams{ Password: unlockMsg.Passphrase, RecoveryWindow: unlockMsg.RecoveryWindow, Wallet: unlockMsg.Wallet, - } - return walletInitParams, nil + ChansToRestore: unlockMsg.ChanBackups, + }, nil case <-signal.ShutdownChannel(): return nil, fmt.Errorf("shutting down") diff --git a/server.go b/server.go index 6ffbe071..a4739173 100644 --- a/server.go +++ b/server.go @@ -26,6 +26,7 @@ import ( sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/autopilot" "github.com/lightningnetwork/lnd/brontide" + "github.com/lightningnetwork/lnd/chanbackup" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channelnotifier" "github.com/lightningnetwork/lnd/contractcourt" @@ -45,6 +46,7 @@ import ( "github.com/lightningnetwork/lnd/sweep" "github.com/lightningnetwork/lnd/ticker" "github.com/lightningnetwork/lnd/tor" + "github.com/lightningnetwork/lnd/walletunlocker" "github.com/lightningnetwork/lnd/zpay32" ) @@ -184,6 +186,10 @@ type server struct { // changed since last start. currentNodeAnn *lnwire.NodeAnnouncement + // chansToRestore is the set of channels that upon starting, the server + // should attempt to restore/recover. + chansToRestore walletunlocker.ChannelsToRecover + quit chan struct{} wg sync.WaitGroup @@ -237,7 +243,8 @@ func noiseDial(idPriv *btcec.PrivateKey) func(net.Addr) (net.Conn, error) { // newServer creates a new instance of the server which is to listen using the // passed listener address. func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl, - privKey *btcec.PrivateKey) (*server, error) { + privKey *btcec.PrivateKey, + chansToRestore walletunlocker.ChannelsToRecover) (*server, error) { var err error @@ -293,11 +300,12 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB, cc *chainControl, } s := &server{ - chanDB: chanDB, - cc: cc, - sigPool: lnwallet.NewSigPool(cfg.Workers.Sig, cc.signer), - writePool: writePool, - readPool: readPool, + chanDB: chanDB, + cc: cc, + sigPool: lnwallet.NewSigPool(cfg.Workers.Sig, cc.signer), + writePool: writePool, + readPool: readPool, + chansToRestore: chansToRestore, invoices: invoices.NewRegistry(chanDB, decodeFinalCltvExpiry), @@ -1074,8 +1082,6 @@ func (s *server) Start() error { if err := s.fundingMgr.Start(); err != nil { return err } - s.connMgr.Start() - if err := s.invoices.Start(); err != nil { return err } @@ -1083,6 +1089,37 @@ func (s *server) Start() error { return err } + // Before we start the connMgr, we'll check to see if we have any + // backups to recover. We do this now as we want to ensure that have + // all the information we need to handle channel recovery _before_ we + // even accept connections from any peers. + chanRestorer := &chanDBRestorer{ + db: s.chanDB, + secretKeys: s.cc.keyRing, + } + if len(s.chansToRestore.PackedSingleChanBackups) != 0 { + err := chanbackup.UnpackAndRecoverSingles( + s.chansToRestore.PackedSingleChanBackups, + s.cc.keyRing, chanRestorer, s, + ) + if err != nil { + return fmt.Errorf("unable to unpack single "+ + "backups: %v", err) + } + } + if len(s.chansToRestore.PackedMultiChanBackup) != 0 { + err := chanbackup.UnpackAndRecoverMulti( + s.chansToRestore.PackedMultiChanBackup, + s.cc.keyRing, chanRestorer, s, + ) + if err != nil { + return fmt.Errorf("unable to unpack chan "+ + "backup: %v", err) + } + } + + s.connMgr.Start() + // With all the relevant sub-systems started, we'll now attempt to // establish persistent connections to our direct channel collaborators // within the network. Before doing so however, we'll prune our set of @@ -2761,7 +2798,8 @@ func (s *server) ConnectToPeer(addr *lnwire.NetAddress, perm bool) error { s.persistentPeersBackoff[targetPub] = cfg.MinBackoff } s.persistentConnReqs[targetPub] = append( - s.persistentConnReqs[targetPub], connReq) + s.persistentConnReqs[targetPub], connReq, + ) s.mu.Unlock() go s.connMgr.Connect(connReq)