Merge pull request #3184 from wpaulino/wtclient-subserver
multi: add watchtower client RPC subserver
This commit is contained in:
commit
8c9c4b52e8
@ -306,6 +306,7 @@ func main() {
|
||||
app.Commands = append(app.Commands, routerCommands()...)
|
||||
app.Commands = append(app.Commands, walletCommands()...)
|
||||
app.Commands = append(app.Commands, watchtowerCommands()...)
|
||||
app.Commands = append(app.Commands, wtclientCommands()...)
|
||||
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
fatal(err)
|
||||
|
283
cmd/lncli/wtclient_active.go
Normal file
283
cmd/lncli/wtclient_active.go
Normal file
@ -0,0 +1,283 @@
|
||||
// +build wtclientrpc
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnrpc/wtclientrpc"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// wtclientCommands will return nil for non-wtclientrpc builds.
|
||||
func wtclientCommands() []cli.Command {
|
||||
return []cli.Command{
|
||||
{
|
||||
Name: "wtclient",
|
||||
Usage: "Interact with the watchtower client.",
|
||||
Category: "Watchtower",
|
||||
Subcommands: []cli.Command{
|
||||
addTowerCommand,
|
||||
removeTowerCommand,
|
||||
listTowersCommand,
|
||||
getTowerCommand,
|
||||
statsCommand,
|
||||
policyCommand,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// getWtclient initializes a connection to the watchtower client RPC in order to
|
||||
// interact with it.
|
||||
func getWtclient(ctx *cli.Context) (wtclientrpc.WatchtowerClientClient, func()) {
|
||||
conn := getClientConn(ctx, false)
|
||||
cleanUp := func() {
|
||||
conn.Close()
|
||||
}
|
||||
return wtclientrpc.NewWatchtowerClientClient(conn), cleanUp
|
||||
}
|
||||
|
||||
var addTowerCommand = cli.Command{
|
||||
Name: "add",
|
||||
Usage: "Register a watchtower to use for future sessions/backups.",
|
||||
Description: "If the watchtower has already been registered, then " +
|
||||
"this command serves as a way of updating the watchtower " +
|
||||
"with new addresses it is reachable over.",
|
||||
ArgsUsage: "pubkey@address",
|
||||
Action: actionDecorator(addTower),
|
||||
}
|
||||
|
||||
func addTower(ctx *cli.Context) error {
|
||||
// Display the command's help message if the number of arguments/flags
|
||||
// is not what we expect.
|
||||
if ctx.NArg() != 1 || ctx.NumFlags() > 0 {
|
||||
return cli.ShowCommandHelp(ctx, "add")
|
||||
}
|
||||
|
||||
parts := strings.Split(ctx.Args().First(), "@")
|
||||
if len(parts) != 2 {
|
||||
return errors.New("expected tower of format pubkey@address")
|
||||
}
|
||||
pubKey, err := hex.DecodeString(parts[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid public key: %v", err)
|
||||
}
|
||||
address := parts[1]
|
||||
|
||||
client, cleanUp := getWtclient(ctx)
|
||||
defer cleanUp()
|
||||
|
||||
req := &wtclientrpc.AddTowerRequest{
|
||||
Pubkey: pubKey,
|
||||
Address: address,
|
||||
}
|
||||
resp, err := client.AddTower(context.Background(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printRespJSON(resp)
|
||||
return nil
|
||||
}
|
||||
|
||||
var removeTowerCommand = cli.Command{
|
||||
Name: "remove",
|
||||
Usage: "Remove a watchtower to prevent its use for future " +
|
||||
"sessions/backups.",
|
||||
Description: "An optional address can be provided to remove, " +
|
||||
"indicating that the watchtower is no longer reachable at " +
|
||||
"this address. If an address isn't provided, then the " +
|
||||
"watchtower will no longer be used for future sessions/backups.",
|
||||
ArgsUsage: "pubkey | pubkey@address",
|
||||
Action: actionDecorator(removeTower),
|
||||
}
|
||||
|
||||
func removeTower(ctx *cli.Context) error {
|
||||
// Display the command's help message if the number of arguments/flags
|
||||
// is not what we expect.
|
||||
if ctx.NArg() != 1 || ctx.NumFlags() > 0 {
|
||||
return cli.ShowCommandHelp(ctx, "remove")
|
||||
}
|
||||
|
||||
// The command can have only one argument, but it can be interpreted in
|
||||
// either of the following formats:
|
||||
//
|
||||
// pubkey or pubkey@address
|
||||
//
|
||||
// The hex-encoded public key of the watchtower is always required,
|
||||
// while the second is an optional address we'll remove from the
|
||||
// watchtower's database record.
|
||||
parts := strings.Split(ctx.Args().First(), "@")
|
||||
if len(parts) > 2 {
|
||||
return errors.New("expected tower of format pubkey@address")
|
||||
}
|
||||
pubKey, err := hex.DecodeString(parts[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid public key: %v", err)
|
||||
}
|
||||
var address string
|
||||
if len(parts) == 2 {
|
||||
address = parts[1]
|
||||
}
|
||||
|
||||
client, cleanUp := getWtclient(ctx)
|
||||
defer cleanUp()
|
||||
|
||||
req := &wtclientrpc.RemoveTowerRequest{
|
||||
Pubkey: pubKey,
|
||||
Address: address,
|
||||
}
|
||||
resp, err := client.RemoveTower(context.Background(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printRespJSON(resp)
|
||||
return nil
|
||||
}
|
||||
|
||||
var listTowersCommand = cli.Command{
|
||||
Name: "towers",
|
||||
Usage: "Display information about all registered watchtowers.",
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "include_sessions",
|
||||
Usage: "include sessions with the watchtower in the " +
|
||||
"response",
|
||||
},
|
||||
},
|
||||
Action: actionDecorator(listTowers),
|
||||
}
|
||||
|
||||
func listTowers(ctx *cli.Context) error {
|
||||
// Display the command's help message if the number of arguments/flags
|
||||
// is not what we expect.
|
||||
if ctx.NArg() > 0 || ctx.NumFlags() > 1 {
|
||||
return cli.ShowCommandHelp(ctx, "towers")
|
||||
}
|
||||
|
||||
client, cleanUp := getWtclient(ctx)
|
||||
defer cleanUp()
|
||||
|
||||
req := &wtclientrpc.ListTowersRequest{
|
||||
IncludeSessions: ctx.Bool("include_sessions"),
|
||||
}
|
||||
resp, err := client.ListTowers(context.Background(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var listTowersResp = struct {
|
||||
Towers []*Tower `json:"towers"`
|
||||
}{
|
||||
Towers: make([]*Tower, len(resp.Towers)),
|
||||
}
|
||||
for i, tower := range resp.Towers {
|
||||
listTowersResp.Towers[i] = NewTowerFromProto(tower)
|
||||
}
|
||||
|
||||
printJSON(listTowersResp)
|
||||
return nil
|
||||
}
|
||||
|
||||
var getTowerCommand = cli.Command{
|
||||
Name: "tower",
|
||||
Usage: "Display information about a specific registered watchtower.",
|
||||
ArgsUsage: "pubkey",
|
||||
Flags: []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "include_sessions",
|
||||
Usage: "include sessions with the watchtower in the " +
|
||||
"response",
|
||||
},
|
||||
},
|
||||
Action: actionDecorator(getTower),
|
||||
}
|
||||
|
||||
func getTower(ctx *cli.Context) error {
|
||||
// Display the command's help message if the number of arguments/flags
|
||||
// is not what we expect.
|
||||
if ctx.NArg() != 1 || ctx.NumFlags() > 1 {
|
||||
return cli.ShowCommandHelp(ctx, "tower")
|
||||
}
|
||||
|
||||
// The command only has one argument, which we expect to be the
|
||||
// hex-encoded public key of the watchtower we'll display information
|
||||
// about.
|
||||
pubKey, err := hex.DecodeString(ctx.Args().Get(0))
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid public key: %v", err)
|
||||
}
|
||||
|
||||
client, cleanUp := getWtclient(ctx)
|
||||
defer cleanUp()
|
||||
|
||||
req := &wtclientrpc.GetTowerInfoRequest{
|
||||
Pubkey: pubKey,
|
||||
IncludeSessions: ctx.Bool("include_sessions"),
|
||||
}
|
||||
resp, err := client.GetTowerInfo(context.Background(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printJSON(NewTowerFromProto(resp))
|
||||
return nil
|
||||
}
|
||||
|
||||
var statsCommand = cli.Command{
|
||||
Name: "stats",
|
||||
Usage: "Display the session stats of the watchtower client.",
|
||||
Action: actionDecorator(stats),
|
||||
}
|
||||
|
||||
func stats(ctx *cli.Context) error {
|
||||
// Display the command's help message if the number of arguments/flags
|
||||
// is not what we expect.
|
||||
if ctx.NArg() > 0 || ctx.NumFlags() > 0 {
|
||||
return cli.ShowCommandHelp(ctx, "stats")
|
||||
}
|
||||
|
||||
client, cleanUp := getWtclient(ctx)
|
||||
defer cleanUp()
|
||||
|
||||
req := &wtclientrpc.StatsRequest{}
|
||||
resp, err := client.Stats(context.Background(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printRespJSON(resp)
|
||||
return nil
|
||||
}
|
||||
|
||||
var policyCommand = cli.Command{
|
||||
Name: "policy",
|
||||
Usage: "Display the active watchtower client policy configuration.",
|
||||
Action: actionDecorator(policy),
|
||||
}
|
||||
|
||||
func policy(ctx *cli.Context) error {
|
||||
// Display the command's help message if the number of arguments/flags
|
||||
// is not what we expect.
|
||||
if ctx.NArg() > 0 || ctx.NumFlags() > 0 {
|
||||
return cli.ShowCommandHelp(ctx, "policy")
|
||||
}
|
||||
|
||||
client, cleanUp := getWtclient(ctx)
|
||||
defer cleanUp()
|
||||
|
||||
req := &wtclientrpc.PolicyRequest{}
|
||||
resp, err := client.Policy(context.Background(), req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printRespJSON(resp)
|
||||
return nil
|
||||
}
|
10
cmd/lncli/wtclient_default.go
Normal file
10
cmd/lncli/wtclient_default.go
Normal file
@ -0,0 +1,10 @@
|
||||
// +build !wtclientrpc
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/urfave/cli"
|
||||
|
||||
// wtclientCommands will return nil for non-wtclientrpc builds.
|
||||
func wtclientCommands() []cli.Command {
|
||||
return nil
|
||||
}
|
52
cmd/lncli/wtclient_types.go
Normal file
52
cmd/lncli/wtclient_types.go
Normal file
@ -0,0 +1,52 @@
|
||||
// +build wtclientrpc
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnrpc/wtclientrpc"
|
||||
)
|
||||
|
||||
// TowerSession encompasses information about a tower session.
|
||||
type TowerSession struct {
|
||||
NumBackups uint32 `json:"num_backups"`
|
||||
NumPendingBackups uint32 `json:"num_pending_backups"`
|
||||
MaxBackups uint32 `json:"max_backups"`
|
||||
SweepSatPerByte uint32 `json:"sweep_sat_per_byte"`
|
||||
}
|
||||
|
||||
// NewTowerSessionsFromProto converts a set of tower sessions from their RPC
|
||||
// type to a CLI-friendly type.
|
||||
func NewTowerSessionsFromProto(sessions []*wtclientrpc.TowerSession) []*TowerSession {
|
||||
towerSessions := make([]*TowerSession, 0, len(sessions))
|
||||
for _, session := range sessions {
|
||||
towerSessions = append(towerSessions, &TowerSession{
|
||||
NumBackups: session.NumBackups,
|
||||
NumPendingBackups: session.NumPendingBackups,
|
||||
MaxBackups: session.MaxBackups,
|
||||
SweepSatPerByte: session.SweepSatPerByte,
|
||||
})
|
||||
}
|
||||
return towerSessions
|
||||
}
|
||||
|
||||
// Tower encompasses information about a registered watchtower.
|
||||
type Tower struct {
|
||||
PubKey string `json:"pubkey"`
|
||||
Addresses []string `json:"addresses"`
|
||||
ActiveSessionCandidate bool `json:"active_session_candidate"`
|
||||
NumSessions uint32 `json:"num_sessions"`
|
||||
Sessions []*TowerSession `json:"sessions"`
|
||||
}
|
||||
|
||||
// NewTowerFromProto converts a tower from its RPC type to a CLI-friendly type.
|
||||
func NewTowerFromProto(tower *wtclientrpc.Tower) *Tower {
|
||||
return &Tower{
|
||||
PubKey: hex.EncodeToString(tower.Pubkey),
|
||||
Addresses: tower.Addresses,
|
||||
ActiveSessionCandidate: tower.ActiveSessionCandidate,
|
||||
NumSessions: tower.NumSessions,
|
||||
Sessions: NewTowerSessionsFromProto(tower.Sessions),
|
||||
}
|
||||
}
|
12
config.go
12
config.go
@ -33,7 +33,6 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing"
|
||||
"github.com/lightningnetwork/lnd/tor"
|
||||
"github.com/lightningnetwork/lnd/watchtower"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -1091,17 +1090,6 @@ func loadConfig() (*config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the user provided private watchtower addresses, parse them to
|
||||
// obtain the LN addresses.
|
||||
if cfg.WtClient.IsActive() {
|
||||
err := cfg.WtClient.ParsePrivateTowers(
|
||||
watchtower.DefaultPeerPort, cfg.net.ResolveTCPAddr,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Finally, ensure that the user's color is correctly formatted,
|
||||
// otherwise the server will not be able to start after the unlocking
|
||||
// the wallet.
|
||||
|
@ -1,65 +1,38 @@
|
||||
package lncfg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
)
|
||||
import "errors"
|
||||
|
||||
// WtClient holds the configuration options for the daemon's watchtower client.
|
||||
type WtClient struct {
|
||||
// Active determines whether a watchtower client should be created to
|
||||
// back up channel states with registered watchtowers.
|
||||
Active bool `long:"active" description:"Whether the daemon should use private watchtowers to back up revoked channel states."`
|
||||
|
||||
// PrivateTowerURIs specifies the lightning URIs of the towers the
|
||||
// watchtower client should send new backups to.
|
||||
PrivateTowerURIs []string `long:"private-tower-uris" description:"Specifies the URIs of private watchtowers to use in backing up revoked states. URIs must be of the form <pubkey>@<addr>. Only 1 URI is supported at this time, if none are provided the tower will not be enabled."`
|
||||
|
||||
// PrivateTowers is the list of towers parsed from the URIs provided in
|
||||
// PrivateTowerURIs.
|
||||
PrivateTowers []*lnwire.NetAddress
|
||||
PrivateTowerURIs []string `long:"private-tower-uris" description:"(Deprecated) Specifies the URIs of private watchtowers to use in backing up revoked states. URIs must be of the form <pubkey>@<addr>. Only 1 URI is supported at this time, if none are provided the tower will not be enabled."`
|
||||
|
||||
// SweepFeeRate specifies the fee rate in sat/byte to be used when
|
||||
// constructing justice transactions sent to the tower.
|
||||
SweepFeeRate uint64 `long:"sweep-fee-rate" description:"Specifies the fee rate in sat/byte to be used when constructing justice transactions sent to the watchtower."`
|
||||
}
|
||||
|
||||
// Validate asserts that at most 1 private watchtower is requested.
|
||||
// Validate ensures the user has provided a valid configuration.
|
||||
//
|
||||
// NOTE: Part of the Validator interface.
|
||||
func (c *WtClient) Validate() error {
|
||||
if len(c.PrivateTowerURIs) > 1 {
|
||||
return fmt.Errorf("at most 1 private watchtower is supported, "+
|
||||
"found %d", len(c.PrivateTowerURIs))
|
||||
if len(c.PrivateTowerURIs) > 0 {
|
||||
return errors.New("`wtclient.private-tower-uris` is " +
|
||||
"deprecated and will be removed in the v0.8.0 " +
|
||||
"release, to specify watchtowers remove " +
|
||||
"`wtclient.private-tower-uris`, set " +
|
||||
"`wtclient.active`, and check out `lncli wtclient -h` " +
|
||||
"for more information on how to manage towers")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsActive returns true if the watchtower client should be active.
|
||||
func (c *WtClient) IsActive() bool {
|
||||
return len(c.PrivateTowerURIs) > 0
|
||||
}
|
||||
|
||||
// ParsePrivateTowers parses any private tower URIs held PrivateTowerURIs. The
|
||||
// value of port should be the default port to use when a URI does not have one.
|
||||
func (c *WtClient) ParsePrivateTowers(port int, resolver TCPResolver) error {
|
||||
towers := make([]*lnwire.NetAddress, 0, len(c.PrivateTowerURIs))
|
||||
for _, uri := range c.PrivateTowerURIs {
|
||||
addr, err := ParseLNAddressString(
|
||||
uri, strconv.Itoa(port), resolver,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse private "+
|
||||
"watchtower address: %v", err)
|
||||
}
|
||||
|
||||
towers = append(towers, addr)
|
||||
}
|
||||
|
||||
c.PrivateTowers = towers
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compile-time constraint to ensure WtClient implements the Validator
|
||||
// interface.
|
||||
var _ Validator = (*WtClient)(nil)
|
||||
|
2
lnd.go
2
lnd.go
@ -340,7 +340,7 @@ func Main() error {
|
||||
// If the watchtower client should be active, open the client database.
|
||||
// This is done here so that Close always executes when lndMain returns.
|
||||
var towerClientDB *wtdb.ClientDB
|
||||
if cfg.WtClient.IsActive() {
|
||||
if cfg.WtClient.Active {
|
||||
var err error
|
||||
towerClientDB, err = wtdb.OpenClientDB(graphDir)
|
||||
if err != nil {
|
||||
|
27
lnrpc/wtclientrpc/config_active.go
Normal file
27
lnrpc/wtclientrpc/config_active.go
Normal file
@ -0,0 +1,27 @@
|
||||
// +build wtclientrpc
|
||||
|
||||
package wtclientrpc
|
||||
|
||||
import (
|
||||
"github.com/lightningnetwork/lnd/lncfg"
|
||||
"github.com/lightningnetwork/lnd/watchtower/wtclient"
|
||||
)
|
||||
|
||||
// Config is the primary configuration struct for the watchtower RPC server. It
|
||||
// contains all the items required for the RPC server to carry out its duties.
|
||||
// The fields with struct tags are meant to be parsed as normal configuration
|
||||
// options, while if able to be populated, the latter fields MUST also be
|
||||
// specified.
|
||||
type Config struct {
|
||||
// Active indicates if the watchtower client is enabled.
|
||||
Active bool
|
||||
|
||||
// Client is the backing watchtower client that we'll interact with
|
||||
// through the watchtower RPC subserver.
|
||||
Client wtclient.Client
|
||||
|
||||
// Resolver is a custom resolver that will be used to resolve watchtower
|
||||
// addresses to ensure we don't leak any information when running over
|
||||
// non-clear networks, e.g. Tor, etc.
|
||||
Resolver lncfg.TCPResolver
|
||||
}
|
6
lnrpc/wtclientrpc/config_default.go
Normal file
6
lnrpc/wtclientrpc/config_default.go
Normal file
@ -0,0 +1,6 @@
|
||||
// +build !wtclientrpc
|
||||
|
||||
package wtclientrpc
|
||||
|
||||
// Config is empty for non-wtclientrpc builds.
|
||||
type Config struct{}
|
63
lnrpc/wtclientrpc/driver.go
Normal file
63
lnrpc/wtclientrpc/driver.go
Normal file
@ -0,0 +1,63 @@
|
||||
// +build wtclientrpc
|
||||
|
||||
package wtclientrpc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
)
|
||||
|
||||
// createNewSubServer is a helper method that will create the new sub server
|
||||
// given the main config dispatcher method. If we're unable to find the config
|
||||
// that is meant for us in the config dispatcher, then we'll exit with an
|
||||
// error.
|
||||
func createNewSubServer(configRegistry lnrpc.SubServerConfigDispatcher) (
|
||||
lnrpc.SubServer, lnrpc.MacaroonPerms, error) {
|
||||
|
||||
// We'll attempt to look up the config that we expect, according to our
|
||||
// subServerName name. If we can't find this, then we'll exit with an
|
||||
// error, as we're unable to properly initialize ourselves without this
|
||||
// config.
|
||||
subServerConf, ok := configRegistry.FetchConfig(subServerName)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("unable to find config for "+
|
||||
"subserver type %s", subServerName)
|
||||
}
|
||||
|
||||
// Now that we've found an object mapping to our service name, we'll
|
||||
// ensure that it's the type we need.
|
||||
config, ok := subServerConf.(*Config)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("wrong type of config for "+
|
||||
"subserver %s, expected %T got %T", subServerName,
|
||||
&Config{}, subServerConf)
|
||||
}
|
||||
|
||||
// Before we try to make the new service instance, we'll perform
|
||||
// some sanity checks on the arguments to ensure that they're useable.
|
||||
switch {
|
||||
case config.Resolver == nil:
|
||||
return nil, nil, errors.New("a lncfg.TCPResolver is required")
|
||||
}
|
||||
|
||||
return New(config)
|
||||
}
|
||||
|
||||
func init() {
|
||||
subServer := &lnrpc.SubServerDriver{
|
||||
SubServerName: subServerName,
|
||||
New: func(c lnrpc.SubServerConfigDispatcher) (lnrpc.SubServer,
|
||||
lnrpc.MacaroonPerms, error) {
|
||||
return createNewSubServer(c)
|
||||
},
|
||||
}
|
||||
|
||||
// If the build tag is active, then we'll register ourselves as a
|
||||
// sub-RPC server within the global lnrpc package namespace.
|
||||
if err := lnrpc.RegisterSubServer(subServer); err != nil {
|
||||
panic(fmt.Sprintf("failed to register sub server driver "+
|
||||
"'%s': %v", subServerName, err))
|
||||
}
|
||||
}
|
48
lnrpc/wtclientrpc/log.go
Normal file
48
lnrpc/wtclientrpc/log.go
Normal file
@ -0,0 +1,48 @@
|
||||
package wtclientrpc
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btclog"
|
||||
"github.com/lightningnetwork/lnd/build"
|
||||
)
|
||||
|
||||
// Subsystem defines the logging code for this subsystem.
|
||||
const Subsystem = "WTCL"
|
||||
|
||||
// log is a logger that is initialized with no output filters. This means the
|
||||
// package will not perform any logging by default until the caller requests
|
||||
// it.
|
||||
var log btclog.Logger
|
||||
|
||||
// The default amount of logging is none.
|
||||
func init() {
|
||||
UseLogger(build.NewSubLogger(Subsystem, nil))
|
||||
}
|
||||
|
||||
// DisableLog disables all library log output. Logging output is disabled by
|
||||
// by default until UseLogger is called.
|
||||
func DisableLog() {
|
||||
UseLogger(btclog.Disabled)
|
||||
}
|
||||
|
||||
// UseLogger uses a specified Logger to output package logging info. This
|
||||
// should be used in preference to SetLogWriter if the caller is also using
|
||||
// btclog.
|
||||
func UseLogger(logger btclog.Logger) {
|
||||
log = logger
|
||||
}
|
||||
|
||||
// logClosure is used to provide a closure over expensive logging operations so
|
||||
// don't have to be performed when the logging level doesn't warrant it.
|
||||
type logClosure func() string
|
||||
|
||||
// String invokes the underlying function and returns the result.
|
||||
func (c logClosure) String() string {
|
||||
return c()
|
||||
}
|
||||
|
||||
// newLogClosure returns a new closure over a function that returns a string
|
||||
// which itself provides a Stringer interface so that it can be used with the
|
||||
// logging system.
|
||||
func newLogClosure(c func() string) logClosure {
|
||||
return logClosure(c)
|
||||
}
|
309
lnrpc/wtclientrpc/wtclient.go
Normal file
309
lnrpc/wtclientrpc/wtclient.go
Normal file
@ -0,0 +1,309 @@
|
||||
// +build wtclientrpc
|
||||
|
||||
package wtclientrpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/lightningnetwork/lnd/lncfg"
|
||||
"github.com/lightningnetwork/lnd/lnrpc"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/watchtower"
|
||||
"github.com/lightningnetwork/lnd/watchtower/wtclient"
|
||||
"google.golang.org/grpc"
|
||||
"gopkg.in/macaroon-bakery.v2/bakery"
|
||||
)
|
||||
|
||||
const (
|
||||
// subServerName is the name of the sub rpc server. We'll use this name
|
||||
// to register ourselves, and we also require that the main
|
||||
// SubServerConfigDispatcher instance recognizes it as the name of our
|
||||
// RPC service.
|
||||
subServerName = "WatchtowerClientRPC"
|
||||
)
|
||||
|
||||
var (
|
||||
// macPermissions maps RPC calls to the permissions they require.
|
||||
//
|
||||
// TODO(wilmer): create tower macaroon?
|
||||
macPermissions = map[string][]bakery.Op{
|
||||
"/wtclientrpc.WatchtowerClient/AddTower": {{
|
||||
Entity: "offchain",
|
||||
Action: "write",
|
||||
}},
|
||||
"/wtclientrpc.WatchtowerClient/RemoveTower": {{
|
||||
Entity: "offchain",
|
||||
Action: "write",
|
||||
}},
|
||||
"/wtclientrpc.WatchtowerClient/ListTowers": {{
|
||||
Entity: "offchain",
|
||||
Action: "read",
|
||||
}},
|
||||
"/wtclientrpc.WatchtowerClient/GetTowerInfo": {{
|
||||
Entity: "offchain",
|
||||
Action: "read",
|
||||
}},
|
||||
"/wtclientrpc.WatchtowerClient/Stats": {{
|
||||
Entity: "offchain",
|
||||
Action: "read",
|
||||
}},
|
||||
"/wtclientrpc.WatchtowerClient/Policy": {{
|
||||
Entity: "offchain",
|
||||
Action: "read",
|
||||
}},
|
||||
}
|
||||
|
||||
// ErrWtclientNotActive signals that RPC calls cannot be processed
|
||||
// because the watchtower client is not active.
|
||||
ErrWtclientNotActive = errors.New("watchtower client not active")
|
||||
)
|
||||
|
||||
// WatchtowerClient is the RPC server we'll use to interact with the backing
|
||||
// active watchtower client.
|
||||
//
|
||||
// TODO(wilmer): better name?
|
||||
type WatchtowerClient struct {
|
||||
cfg Config
|
||||
}
|
||||
|
||||
// A compile time check to ensure that WatchtowerClient fully implements the
|
||||
// WatchtowerClientWatchtowerClient gRPC service.
|
||||
var _ WatchtowerClientServer = (*WatchtowerClient)(nil)
|
||||
|
||||
// New returns a new instance of the wtclientrpc WatchtowerClient sub-server.
|
||||
// We also return the set of permissions for the macaroons that we may create
|
||||
// within this method. If the macaroons we need aren't found in the filepath,
|
||||
// then we'll create them on start up. If we're unable to locate, or create the
|
||||
// macaroons we need, then we'll return with an error.
|
||||
func New(cfg *Config) (*WatchtowerClient, lnrpc.MacaroonPerms, error) {
|
||||
return &WatchtowerClient{*cfg}, macPermissions, nil
|
||||
}
|
||||
|
||||
// Start launches any helper goroutines required for the WatchtowerClient to
|
||||
// function.
|
||||
//
|
||||
// NOTE: This is part of the lnrpc.SubWatchtowerClient interface.
|
||||
func (c *WatchtowerClient) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop signals any active goroutines for a graceful closure.
|
||||
//
|
||||
// NOTE: This is part of the lnrpc.SubServer interface.
|
||||
func (c *WatchtowerClient) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Name returns a unique string representation of the sub-server. This can be
|
||||
// used to identify the sub-server and also de-duplicate them.
|
||||
//
|
||||
// NOTE: This is part of the lnrpc.SubServer interface.
|
||||
func (c *WatchtowerClient) Name() string {
|
||||
return subServerName
|
||||
}
|
||||
|
||||
// RegisterWithRootServer will be called by the root gRPC server to direct a sub
|
||||
// RPC server to register itself with the main gRPC root server. Until this is
|
||||
// called, each sub-server won't be able to have requests routed towards it.
|
||||
//
|
||||
// NOTE: This is part of the lnrpc.SubServer interface.
|
||||
func (c *WatchtowerClient) RegisterWithRootServer(grpcServer *grpc.Server) error {
|
||||
// We make sure that we register it with the main gRPC server to ensure
|
||||
// all our methods are routed properly.
|
||||
RegisterWatchtowerClientServer(grpcServer, c)
|
||||
|
||||
log.Debugf("WatchtowerClient RPC server successfully registered with " +
|
||||
"root gRPC server")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isActive returns nil if the watchtower client is initialized so that we can
|
||||
// process RPC requests.
|
||||
func (c *WatchtowerClient) isActive() error {
|
||||
if c.cfg.Active {
|
||||
return nil
|
||||
}
|
||||
return ErrWtclientNotActive
|
||||
}
|
||||
|
||||
// AddTower adds a new watchtower reachable at the given address and considers
|
||||
// it for new sessions. If the watchtower already exists, then any new addresses
|
||||
// included will be considered when dialing it for session negotiations and
|
||||
// backups.
|
||||
func (c *WatchtowerClient) AddTower(ctx context.Context,
|
||||
req *AddTowerRequest) (*AddTowerResponse, error) {
|
||||
|
||||
if err := c.isActive(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := btcec.ParsePubKey(req.Pubkey, btcec.S256())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addr, err := lncfg.ParseAddressString(
|
||||
req.Address, strconv.Itoa(watchtower.DefaultPeerPort),
|
||||
c.cfg.Resolver,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid address %v: %v", req.Address, err)
|
||||
}
|
||||
|
||||
towerAddr := &lnwire.NetAddress{
|
||||
IdentityKey: pubKey,
|
||||
Address: addr,
|
||||
}
|
||||
if err := c.cfg.Client.AddTower(towerAddr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AddTowerResponse{}, nil
|
||||
}
|
||||
|
||||
// RemoveTower removes a watchtower from being considered for future session
|
||||
// negotiations and from being used for any subsequent backups until it's added
|
||||
// again. If an address is provided, then this RPC only serves as a way of
|
||||
// removing the address from the watchtower instead.
|
||||
func (c *WatchtowerClient) RemoveTower(ctx context.Context,
|
||||
req *RemoveTowerRequest) (*RemoveTowerResponse, error) {
|
||||
|
||||
if err := c.isActive(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := btcec.ParsePubKey(req.Pubkey, btcec.S256())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var addr net.Addr
|
||||
if req.Address != "" {
|
||||
addr, err = lncfg.ParseAddressString(
|
||||
req.Address, strconv.Itoa(watchtower.DefaultPeerPort),
|
||||
c.cfg.Resolver,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse tower "+
|
||||
"address %v: %v", req.Address, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.cfg.Client.RemoveTower(pubKey, addr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &RemoveTowerResponse{}, nil
|
||||
}
|
||||
|
||||
// ListTowers returns the list of watchtowers registered with the client.
|
||||
func (c *WatchtowerClient) ListTowers(ctx context.Context,
|
||||
req *ListTowersRequest) (*ListTowersResponse, error) {
|
||||
|
||||
if err := c.isActive(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
towers, err := c.cfg.Client.RegisteredTowers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rpcTowers := make([]*Tower, 0, len(towers))
|
||||
for _, tower := range towers {
|
||||
rpcTower := marshallTower(tower, req.IncludeSessions)
|
||||
rpcTowers = append(rpcTowers, rpcTower)
|
||||
}
|
||||
|
||||
return &ListTowersResponse{Towers: rpcTowers}, nil
|
||||
}
|
||||
|
||||
// GetTowerInfo retrieves information for a registered watchtower.
|
||||
func (c *WatchtowerClient) GetTowerInfo(ctx context.Context,
|
||||
req *GetTowerInfoRequest) (*Tower, error) {
|
||||
|
||||
if err := c.isActive(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := btcec.ParsePubKey(req.Pubkey, btcec.S256())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tower, err := c.cfg.Client.LookupTower(pubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return marshallTower(tower, req.IncludeSessions), nil
|
||||
}
|
||||
|
||||
// Stats returns the in-memory statistics of the client since startup.
|
||||
func (c *WatchtowerClient) Stats(ctx context.Context,
|
||||
req *StatsRequest) (*StatsResponse, error) {
|
||||
|
||||
if err := c.isActive(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stats := c.cfg.Client.Stats()
|
||||
return &StatsResponse{
|
||||
NumBackups: uint32(stats.NumTasksAccepted),
|
||||
NumFailedBackups: uint32(stats.NumTasksIneligible),
|
||||
NumPendingBackups: uint32(stats.NumTasksReceived),
|
||||
NumSessionsAcquired: uint32(stats.NumSessionsAcquired),
|
||||
NumSessionsExhausted: uint32(stats.NumSessionsExhausted),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Policy returns the active watchtower client policy configuration.
|
||||
func (c *WatchtowerClient) Policy(ctx context.Context,
|
||||
req *PolicyRequest) (*PolicyResponse, error) {
|
||||
|
||||
if err := c.isActive(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policy := c.cfg.Client.Policy()
|
||||
return &PolicyResponse{
|
||||
MaxUpdates: uint32(policy.MaxUpdates),
|
||||
SweepSatPerByte: uint32(policy.SweepFeeRate.FeePerKVByte() / 1000),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// marshallTower converts a client registered watchtower into its corresponding
|
||||
// RPC type.
|
||||
func marshallTower(tower *wtclient.RegisteredTower, includeSessions bool) *Tower {
|
||||
rpcAddrs := make([]string, 0, len(tower.Addresses))
|
||||
for _, addr := range tower.Addresses {
|
||||
rpcAddrs = append(rpcAddrs, addr.String())
|
||||
}
|
||||
|
||||
var rpcSessions []*TowerSession
|
||||
if includeSessions {
|
||||
rpcSessions = make([]*TowerSession, 0, len(tower.Sessions))
|
||||
for _, session := range tower.Sessions {
|
||||
satPerByte := session.Policy.SweepFeeRate.FeePerKVByte() / 1000
|
||||
rpcSessions = append(rpcSessions, &TowerSession{
|
||||
NumBackups: uint32(len(session.AckedUpdates)),
|
||||
NumPendingBackups: uint32(len(session.CommittedUpdates)),
|
||||
MaxBackups: uint32(session.Policy.MaxUpdates),
|
||||
SweepSatPerByte: uint32(satPerByte),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return &Tower{
|
||||
Pubkey: tower.IdentityKey.SerializeCompressed(),
|
||||
Addresses: rpcAddrs,
|
||||
ActiveSessionCandidate: tower.ActiveSessionCandidate,
|
||||
NumSessions: uint32(len(tower.Sessions)),
|
||||
Sessions: rpcSessions,
|
||||
}
|
||||
}
|
988
lnrpc/wtclientrpc/wtclient.pb.go
Normal file
988
lnrpc/wtclientrpc/wtclient.pb.go
Normal file
@ -0,0 +1,988 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: wtclientrpc/wtclient.proto
|
||||
|
||||
package wtclientrpc
|
||||
|
||||
import (
|
||||
context "context"
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
grpc "google.golang.org/grpc"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type AddTowerRequest struct {
|
||||
// The identifying public key of the watchtower to add.
|
||||
Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"`
|
||||
// A network address the watchtower is reachable over.
|
||||
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AddTowerRequest) Reset() { *m = AddTowerRequest{} }
|
||||
func (m *AddTowerRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*AddTowerRequest) ProtoMessage() {}
|
||||
func (*AddTowerRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{0}
|
||||
}
|
||||
|
||||
func (m *AddTowerRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AddTowerRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *AddTowerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AddTowerRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *AddTowerRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AddTowerRequest.Merge(m, src)
|
||||
}
|
||||
func (m *AddTowerRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_AddTowerRequest.Size(m)
|
||||
}
|
||||
func (m *AddTowerRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AddTowerRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AddTowerRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *AddTowerRequest) GetPubkey() []byte {
|
||||
if m != nil {
|
||||
return m.Pubkey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *AddTowerRequest) GetAddress() string {
|
||||
if m != nil {
|
||||
return m.Address
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type AddTowerResponse struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AddTowerResponse) Reset() { *m = AddTowerResponse{} }
|
||||
func (m *AddTowerResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*AddTowerResponse) ProtoMessage() {}
|
||||
func (*AddTowerResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{1}
|
||||
}
|
||||
|
||||
func (m *AddTowerResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_AddTowerResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *AddTowerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_AddTowerResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *AddTowerResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_AddTowerResponse.Merge(m, src)
|
||||
}
|
||||
func (m *AddTowerResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_AddTowerResponse.Size(m)
|
||||
}
|
||||
func (m *AddTowerResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_AddTowerResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_AddTowerResponse proto.InternalMessageInfo
|
||||
|
||||
type RemoveTowerRequest struct {
|
||||
// The identifying public key of the watchtower to remove.
|
||||
Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"`
|
||||
//
|
||||
//If set, then the record for this address will be removed, indicating that is
|
||||
//is stale. Otherwise, the watchtower will no longer be used for future
|
||||
//session negotiations and backups.
|
||||
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RemoveTowerRequest) Reset() { *m = RemoveTowerRequest{} }
|
||||
func (m *RemoveTowerRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*RemoveTowerRequest) ProtoMessage() {}
|
||||
func (*RemoveTowerRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{2}
|
||||
}
|
||||
|
||||
func (m *RemoveTowerRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RemoveTowerRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *RemoveTowerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RemoveTowerRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *RemoveTowerRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RemoveTowerRequest.Merge(m, src)
|
||||
}
|
||||
func (m *RemoveTowerRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_RemoveTowerRequest.Size(m)
|
||||
}
|
||||
func (m *RemoveTowerRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_RemoveTowerRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_RemoveTowerRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *RemoveTowerRequest) GetPubkey() []byte {
|
||||
if m != nil {
|
||||
return m.Pubkey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *RemoveTowerRequest) GetAddress() string {
|
||||
if m != nil {
|
||||
return m.Address
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type RemoveTowerResponse struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RemoveTowerResponse) Reset() { *m = RemoveTowerResponse{} }
|
||||
func (m *RemoveTowerResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*RemoveTowerResponse) ProtoMessage() {}
|
||||
func (*RemoveTowerResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{3}
|
||||
}
|
||||
|
||||
func (m *RemoveTowerResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_RemoveTowerResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *RemoveTowerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_RemoveTowerResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *RemoveTowerResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_RemoveTowerResponse.Merge(m, src)
|
||||
}
|
||||
func (m *RemoveTowerResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_RemoveTowerResponse.Size(m)
|
||||
}
|
||||
func (m *RemoveTowerResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_RemoveTowerResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_RemoveTowerResponse proto.InternalMessageInfo
|
||||
|
||||
type GetTowerInfoRequest struct {
|
||||
// The identifying public key of the watchtower to retrieve information for.
|
||||
Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"`
|
||||
// Whether we should include sessions with the watchtower in the response.
|
||||
IncludeSessions bool `protobuf:"varint,2,opt,name=include_sessions,proto3" json:"include_sessions,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetTowerInfoRequest) Reset() { *m = GetTowerInfoRequest{} }
|
||||
func (m *GetTowerInfoRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetTowerInfoRequest) ProtoMessage() {}
|
||||
func (*GetTowerInfoRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{4}
|
||||
}
|
||||
|
||||
func (m *GetTowerInfoRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GetTowerInfoRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GetTowerInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GetTowerInfoRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GetTowerInfoRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GetTowerInfoRequest.Merge(m, src)
|
||||
}
|
||||
func (m *GetTowerInfoRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_GetTowerInfoRequest.Size(m)
|
||||
}
|
||||
func (m *GetTowerInfoRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GetTowerInfoRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GetTowerInfoRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *GetTowerInfoRequest) GetPubkey() []byte {
|
||||
if m != nil {
|
||||
return m.Pubkey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GetTowerInfoRequest) GetIncludeSessions() bool {
|
||||
if m != nil {
|
||||
return m.IncludeSessions
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type TowerSession struct {
|
||||
//
|
||||
//The total number of successful backups that have been made to the
|
||||
//watchtower session.
|
||||
NumBackups uint32 `protobuf:"varint,1,opt,name=num_backups,proto3" json:"num_backups,omitempty"`
|
||||
//
|
||||
//The total number of backups in the session that are currently pending to be
|
||||
//acknowledged by the watchtower.
|
||||
NumPendingBackups uint32 `protobuf:"varint,2,opt,name=num_pending_backups,proto3" json:"num_pending_backups,omitempty"`
|
||||
// The maximum number of backups allowed by the watchtower session.
|
||||
MaxBackups uint32 `protobuf:"varint,3,opt,name=max_backups,proto3" json:"max_backups,omitempty"`
|
||||
//
|
||||
//The fee rate, in satoshis per vbyte, that will be used by the watchtower for
|
||||
//the justice transaction in the event of a channel breach.
|
||||
SweepSatPerByte uint32 `protobuf:"varint,4,opt,name=sweep_sat_per_byte,proto3" json:"sweep_sat_per_byte,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TowerSession) Reset() { *m = TowerSession{} }
|
||||
func (m *TowerSession) String() string { return proto.CompactTextString(m) }
|
||||
func (*TowerSession) ProtoMessage() {}
|
||||
func (*TowerSession) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{5}
|
||||
}
|
||||
|
||||
func (m *TowerSession) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_TowerSession.Unmarshal(m, b)
|
||||
}
|
||||
func (m *TowerSession) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_TowerSession.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *TowerSession) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_TowerSession.Merge(m, src)
|
||||
}
|
||||
func (m *TowerSession) XXX_Size() int {
|
||||
return xxx_messageInfo_TowerSession.Size(m)
|
||||
}
|
||||
func (m *TowerSession) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_TowerSession.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_TowerSession proto.InternalMessageInfo
|
||||
|
||||
func (m *TowerSession) GetNumBackups() uint32 {
|
||||
if m != nil {
|
||||
return m.NumBackups
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *TowerSession) GetNumPendingBackups() uint32 {
|
||||
if m != nil {
|
||||
return m.NumPendingBackups
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *TowerSession) GetMaxBackups() uint32 {
|
||||
if m != nil {
|
||||
return m.MaxBackups
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *TowerSession) GetSweepSatPerByte() uint32 {
|
||||
if m != nil {
|
||||
return m.SweepSatPerByte
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Tower struct {
|
||||
// The identifying public key of the watchtower.
|
||||
Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"`
|
||||
// The list of addresses the watchtower is reachable over.
|
||||
Addresses []string `protobuf:"bytes,2,rep,name=addresses,proto3" json:"addresses,omitempty"`
|
||||
// Whether the watchtower is currently a candidate for new sessions.
|
||||
ActiveSessionCandidate bool `protobuf:"varint,3,opt,name=active_session_candidate,proto3" json:"active_session_candidate,omitempty"`
|
||||
// The number of sessions that have been negotiated with the watchtower.
|
||||
NumSessions uint32 `protobuf:"varint,4,opt,name=num_sessions,proto3" json:"num_sessions,omitempty"`
|
||||
// The list of sessions that have been negotiated with the watchtower.
|
||||
Sessions []*TowerSession `protobuf:"bytes,5,rep,name=sessions,proto3" json:"sessions,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Tower) Reset() { *m = Tower{} }
|
||||
func (m *Tower) String() string { return proto.CompactTextString(m) }
|
||||
func (*Tower) ProtoMessage() {}
|
||||
func (*Tower) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{6}
|
||||
}
|
||||
|
||||
func (m *Tower) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Tower.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Tower) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Tower.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Tower) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Tower.Merge(m, src)
|
||||
}
|
||||
func (m *Tower) XXX_Size() int {
|
||||
return xxx_messageInfo_Tower.Size(m)
|
||||
}
|
||||
func (m *Tower) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Tower.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Tower proto.InternalMessageInfo
|
||||
|
||||
func (m *Tower) GetPubkey() []byte {
|
||||
if m != nil {
|
||||
return m.Pubkey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Tower) GetAddresses() []string {
|
||||
if m != nil {
|
||||
return m.Addresses
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Tower) GetActiveSessionCandidate() bool {
|
||||
if m != nil {
|
||||
return m.ActiveSessionCandidate
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *Tower) GetNumSessions() uint32 {
|
||||
if m != nil {
|
||||
return m.NumSessions
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Tower) GetSessions() []*TowerSession {
|
||||
if m != nil {
|
||||
return m.Sessions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ListTowersRequest struct {
|
||||
// Whether we should include sessions with the watchtower in the response.
|
||||
IncludeSessions bool `protobuf:"varint,1,opt,name=include_sessions,proto3" json:"include_sessions,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListTowersRequest) Reset() { *m = ListTowersRequest{} }
|
||||
func (m *ListTowersRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListTowersRequest) ProtoMessage() {}
|
||||
func (*ListTowersRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{7}
|
||||
}
|
||||
|
||||
func (m *ListTowersRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListTowersRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListTowersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListTowersRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListTowersRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListTowersRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ListTowersRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ListTowersRequest.Size(m)
|
||||
}
|
||||
func (m *ListTowersRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListTowersRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListTowersRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ListTowersRequest) GetIncludeSessions() bool {
|
||||
if m != nil {
|
||||
return m.IncludeSessions
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type ListTowersResponse struct {
|
||||
// The list of watchtowers available for new backups.
|
||||
Towers []*Tower `protobuf:"bytes,1,rep,name=towers,proto3" json:"towers,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ListTowersResponse) Reset() { *m = ListTowersResponse{} }
|
||||
func (m *ListTowersResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ListTowersResponse) ProtoMessage() {}
|
||||
func (*ListTowersResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{8}
|
||||
}
|
||||
|
||||
func (m *ListTowersResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ListTowersResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ListTowersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ListTowersResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ListTowersResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ListTowersResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ListTowersResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ListTowersResponse.Size(m)
|
||||
}
|
||||
func (m *ListTowersResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ListTowersResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ListTowersResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ListTowersResponse) GetTowers() []*Tower {
|
||||
if m != nil {
|
||||
return m.Towers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type StatsRequest struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StatsRequest) Reset() { *m = StatsRequest{} }
|
||||
func (m *StatsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatsRequest) ProtoMessage() {}
|
||||
func (*StatsRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{9}
|
||||
}
|
||||
|
||||
func (m *StatsRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StatsRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StatsRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *StatsRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StatsRequest.Merge(m, src)
|
||||
}
|
||||
func (m *StatsRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_StatsRequest.Size(m)
|
||||
}
|
||||
func (m *StatsRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StatsRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StatsRequest proto.InternalMessageInfo
|
||||
|
||||
type StatsResponse struct {
|
||||
//
|
||||
//The total number of backups made to all active and exhausted watchtower
|
||||
//sessions.
|
||||
NumBackups uint32 `protobuf:"varint,1,opt,name=num_backups,proto3" json:"num_backups,omitempty"`
|
||||
//
|
||||
//The total number of backups that are pending to be acknowledged by all
|
||||
//active and exhausted watchtower sessions.
|
||||
NumPendingBackups uint32 `protobuf:"varint,2,opt,name=num_pending_backups,proto3" json:"num_pending_backups,omitempty"`
|
||||
//
|
||||
//The total number of backups that all active and exhausted watchtower
|
||||
//sessions have failed to acknowledge.
|
||||
NumFailedBackups uint32 `protobuf:"varint,3,opt,name=num_failed_backups,proto3" json:"num_failed_backups,omitempty"`
|
||||
// The total number of new sessions made to watchtowers.
|
||||
NumSessionsAcquired uint32 `protobuf:"varint,4,opt,name=num_sessions_acquired,proto3" json:"num_sessions_acquired,omitempty"`
|
||||
// The total number of watchtower sessions that have been exhausted.
|
||||
NumSessionsExhausted uint32 `protobuf:"varint,5,opt,name=num_sessions_exhausted,proto3" json:"num_sessions_exhausted,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StatsResponse) Reset() { *m = StatsResponse{} }
|
||||
func (m *StatsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StatsResponse) ProtoMessage() {}
|
||||
func (*StatsResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{10}
|
||||
}
|
||||
|
||||
func (m *StatsResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_StatsResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *StatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_StatsResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *StatsResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_StatsResponse.Merge(m, src)
|
||||
}
|
||||
func (m *StatsResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_StatsResponse.Size(m)
|
||||
}
|
||||
func (m *StatsResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_StatsResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_StatsResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *StatsResponse) GetNumBackups() uint32 {
|
||||
if m != nil {
|
||||
return m.NumBackups
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetNumPendingBackups() uint32 {
|
||||
if m != nil {
|
||||
return m.NumPendingBackups
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetNumFailedBackups() uint32 {
|
||||
if m != nil {
|
||||
return m.NumFailedBackups
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetNumSessionsAcquired() uint32 {
|
||||
if m != nil {
|
||||
return m.NumSessionsAcquired
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *StatsResponse) GetNumSessionsExhausted() uint32 {
|
||||
if m != nil {
|
||||
return m.NumSessionsExhausted
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type PolicyRequest struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PolicyRequest) Reset() { *m = PolicyRequest{} }
|
||||
func (m *PolicyRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*PolicyRequest) ProtoMessage() {}
|
||||
func (*PolicyRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{11}
|
||||
}
|
||||
|
||||
func (m *PolicyRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PolicyRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PolicyRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *PolicyRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PolicyRequest.Merge(m, src)
|
||||
}
|
||||
func (m *PolicyRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_PolicyRequest.Size(m)
|
||||
}
|
||||
func (m *PolicyRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PolicyRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PolicyRequest proto.InternalMessageInfo
|
||||
|
||||
type PolicyResponse struct {
|
||||
//
|
||||
//The maximum number of updates each session we negotiate with watchtowers
|
||||
//should allow.
|
||||
MaxUpdates uint32 `protobuf:"varint,1,opt,name=max_updates,proto3" json:"max_updates,omitempty"`
|
||||
//
|
||||
//The fee rate, in satoshis per vbyte, that will be used by watchtowers for
|
||||
//justice transactions in response to channel breaches.
|
||||
SweepSatPerByte uint32 `protobuf:"varint,2,opt,name=sweep_sat_per_byte,proto3" json:"sweep_sat_per_byte,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PolicyResponse) Reset() { *m = PolicyResponse{} }
|
||||
func (m *PolicyResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*PolicyResponse) ProtoMessage() {}
|
||||
func (*PolicyResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b5f4e7d95a641af2, []int{12}
|
||||
}
|
||||
|
||||
func (m *PolicyResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_PolicyResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *PolicyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_PolicyResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *PolicyResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_PolicyResponse.Merge(m, src)
|
||||
}
|
||||
func (m *PolicyResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_PolicyResponse.Size(m)
|
||||
}
|
||||
func (m *PolicyResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_PolicyResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_PolicyResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *PolicyResponse) GetMaxUpdates() uint32 {
|
||||
if m != nil {
|
||||
return m.MaxUpdates
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *PolicyResponse) GetSweepSatPerByte() uint32 {
|
||||
if m != nil {
|
||||
return m.SweepSatPerByte
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*AddTowerRequest)(nil), "wtclientrpc.AddTowerRequest")
|
||||
proto.RegisterType((*AddTowerResponse)(nil), "wtclientrpc.AddTowerResponse")
|
||||
proto.RegisterType((*RemoveTowerRequest)(nil), "wtclientrpc.RemoveTowerRequest")
|
||||
proto.RegisterType((*RemoveTowerResponse)(nil), "wtclientrpc.RemoveTowerResponse")
|
||||
proto.RegisterType((*GetTowerInfoRequest)(nil), "wtclientrpc.GetTowerInfoRequest")
|
||||
proto.RegisterType((*TowerSession)(nil), "wtclientrpc.TowerSession")
|
||||
proto.RegisterType((*Tower)(nil), "wtclientrpc.Tower")
|
||||
proto.RegisterType((*ListTowersRequest)(nil), "wtclientrpc.ListTowersRequest")
|
||||
proto.RegisterType((*ListTowersResponse)(nil), "wtclientrpc.ListTowersResponse")
|
||||
proto.RegisterType((*StatsRequest)(nil), "wtclientrpc.StatsRequest")
|
||||
proto.RegisterType((*StatsResponse)(nil), "wtclientrpc.StatsResponse")
|
||||
proto.RegisterType((*PolicyRequest)(nil), "wtclientrpc.PolicyRequest")
|
||||
proto.RegisterType((*PolicyResponse)(nil), "wtclientrpc.PolicyResponse")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("wtclientrpc/wtclient.proto", fileDescriptor_b5f4e7d95a641af2) }
|
||||
|
||||
var fileDescriptor_b5f4e7d95a641af2 = []byte{
|
||||
// 634 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcb, 0x6e, 0xd3, 0x40,
|
||||
0x14, 0x95, 0x13, 0x12, 0xd2, 0x9b, 0xf4, 0xc1, 0xad, 0x5a, 0x19, 0x53, 0x68, 0xe4, 0x55, 0xd4,
|
||||
0x45, 0x02, 0x2d, 0xb0, 0x60, 0x01, 0x94, 0x22, 0x2a, 0x24, 0x90, 0x2a, 0x17, 0x09, 0xc1, 0xc6,
|
||||
0xf2, 0x63, 0x9a, 0x8c, 0xea, 0x8c, 0x5d, 0xcf, 0xb8, 0x69, 0x97, 0xfc, 0x13, 0xbf, 0xc0, 0x1f,
|
||||
0xf0, 0x41, 0xc8, 0xe3, 0x47, 0xc6, 0x8d, 0x2d, 0x16, 0x88, 0x5d, 0xe6, 0x9c, 0x93, 0x33, 0x37,
|
||||
0xf7, 0x9e, 0xdc, 0x01, 0x63, 0x21, 0xbc, 0x80, 0x12, 0x26, 0xe2, 0xc8, 0x9b, 0x14, 0x9f, 0xc7,
|
||||
0x51, 0x1c, 0x8a, 0x10, 0xfb, 0x0a, 0x67, 0x9e, 0xc0, 0xe6, 0xb1, 0xef, 0x7f, 0x09, 0x17, 0x24,
|
||||
0xb6, 0xc8, 0x55, 0x42, 0xb8, 0xc0, 0x5d, 0xe8, 0x46, 0x89, 0x7b, 0x49, 0x6e, 0x75, 0x6d, 0xa8,
|
||||
0x8d, 0x06, 0x56, 0x7e, 0x42, 0x1d, 0xee, 0x3b, 0xbe, 0x1f, 0x13, 0xce, 0xf5, 0xd6, 0x50, 0x1b,
|
||||
0xad, 0x59, 0xc5, 0xd1, 0x44, 0xd8, 0x5a, 0x9a, 0xf0, 0x28, 0x64, 0x9c, 0x98, 0x1f, 0x00, 0x2d,
|
||||
0x32, 0x0f, 0xaf, 0xc9, 0x3f, 0x7a, 0xef, 0xc0, 0x76, 0xc5, 0x27, 0xb7, 0xff, 0x06, 0xdb, 0xa7,
|
||||
0x44, 0x48, 0xec, 0x23, 0xbb, 0x08, 0xff, 0xe6, 0x7f, 0x00, 0x5b, 0x94, 0x79, 0x41, 0xe2, 0x13,
|
||||
0x9b, 0x13, 0xce, 0x69, 0xc8, 0xb2, 0x8b, 0x7a, 0xd6, 0x0a, 0x6e, 0xfe, 0xd4, 0x60, 0x20, 0x8d,
|
||||
0xcf, 0x33, 0x04, 0x87, 0xd0, 0x67, 0xc9, 0xdc, 0x76, 0x1d, 0xef, 0x32, 0x89, 0xb8, 0x74, 0x5e,
|
||||
0xb7, 0x54, 0x08, 0x9f, 0xc2, 0x76, 0x7a, 0x8c, 0x08, 0xf3, 0x29, 0x9b, 0x96, 0xca, 0x96, 0x54,
|
||||
0xd6, 0x51, 0xa9, 0xe7, 0xdc, 0xb9, 0x29, 0x95, 0xed, 0xcc, 0x53, 0x81, 0x70, 0x0c, 0xc8, 0x17,
|
||||
0x84, 0x44, 0x36, 0x77, 0x84, 0x1d, 0x91, 0xd8, 0x76, 0x6f, 0x05, 0xd1, 0xef, 0x49, 0x61, 0x0d,
|
||||
0x63, 0xfe, 0xd6, 0xa0, 0x23, 0xcb, 0x6e, 0x6c, 0xc2, 0x1e, 0xac, 0xe5, 0x5d, 0x25, 0x69, 0x6d,
|
||||
0xed, 0xd1, 0x9a, 0xb5, 0x04, 0xf0, 0x15, 0xe8, 0x8e, 0x27, 0xe8, 0x75, 0xd9, 0x09, 0xdb, 0x73,
|
||||
0x98, 0x4f, 0x7d, 0x47, 0x10, 0x59, 0x5e, 0xcf, 0x6a, 0xe4, 0xd1, 0x84, 0x41, 0xfa, 0x23, 0xcb,
|
||||
0xd6, 0x66, 0x55, 0x56, 0x30, 0x7c, 0x01, 0xbd, 0x92, 0xef, 0x0c, 0xdb, 0xa3, 0xfe, 0xe1, 0xc3,
|
||||
0xb1, 0x92, 0xc4, 0xb1, 0xda, 0x72, 0xab, 0x94, 0x9a, 0x6f, 0xe0, 0xc1, 0x27, 0xca, 0xb3, 0x49,
|
||||
0xf3, 0x62, 0xcc, 0x75, 0xe3, 0xd4, 0x1a, 0xc6, 0xf9, 0x16, 0x50, 0x35, 0xc8, 0xf2, 0x83, 0x07,
|
||||
0xd0, 0x15, 0x12, 0xd1, 0x35, 0x59, 0x0b, 0xae, 0xd6, 0x62, 0xe5, 0x0a, 0x73, 0x03, 0x06, 0xe7,
|
||||
0xc2, 0x11, 0xc5, 0xed, 0xe6, 0x8f, 0x16, 0xac, 0xe7, 0x40, 0xee, 0xf6, 0x3f, 0x12, 0x32, 0x06,
|
||||
0x4c, 0xe1, 0x0b, 0x87, 0x06, 0xc4, 0xbf, 0x13, 0x94, 0x1a, 0x06, 0x9f, 0xc3, 0x8e, 0xda, 0x6f,
|
||||
0xdb, 0xf1, 0xae, 0x12, 0x1a, 0x13, 0x3f, 0x1f, 0x46, 0x3d, 0x89, 0x2f, 0x61, 0xb7, 0x42, 0x90,
|
||||
0x9b, 0x99, 0x93, 0x70, 0x41, 0x7c, 0xbd, 0x23, 0xbf, 0xd6, 0xc0, 0x9a, 0x9b, 0xb0, 0x7e, 0x16,
|
||||
0x06, 0xd4, 0xbb, 0x2d, 0x9a, 0xe2, 0xc2, 0x46, 0x01, 0x2c, 0x9b, 0x92, 0xe6, 0x39, 0x89, 0xd2,
|
||||
0x88, 0x94, 0x4d, 0x51, 0xa0, 0x86, 0x88, 0xb7, 0x9a, 0x22, 0x7e, 0xf8, 0xab, 0x0d, 0x5b, 0x5f,
|
||||
0x1d, 0xe1, 0xcd, 0xe4, 0x60, 0x4e, 0xe4, 0xb8, 0xf0, 0x14, 0x7a, 0xc5, 0xf2, 0xc1, 0xbd, 0xca,
|
||||
0x14, 0xef, 0x2c, 0x36, 0xe3, 0x71, 0x03, 0x9b, 0xd7, 0x7b, 0x06, 0x7d, 0x65, 0xd3, 0xe0, 0x7e,
|
||||
0x45, 0xbd, 0xba, 0xcb, 0x8c, 0x61, 0xb3, 0x20, 0x77, 0xfc, 0x0c, 0xb0, 0x8c, 0x1e, 0x3e, 0xa9,
|
||||
0xe8, 0x57, 0x42, 0x6d, 0xec, 0x37, 0xf2, 0xb9, 0xdd, 0x7b, 0x18, 0xa8, 0x3b, 0x0f, 0xab, 0x05,
|
||||
0xd4, 0xac, 0x43, 0xa3, 0x26, 0xd5, 0xf8, 0x1a, 0x3a, 0x32, 0xbc, 0x58, 0xfd, 0xfb, 0xa9, 0x09,
|
||||
0x37, 0x8c, 0x3a, 0x2a, 0xaf, 0xe2, 0x18, 0xba, 0xd9, 0xa0, 0xb1, 0xaa, 0xaa, 0xc4, 0xc1, 0x78,
|
||||
0x54, 0xcb, 0x65, 0x16, 0xef, 0x8e, 0xbe, 0x3f, 0x9b, 0x52, 0x31, 0x4b, 0xdc, 0xb1, 0x17, 0xce,
|
||||
0x27, 0x01, 0x9d, 0xce, 0x04, 0xa3, 0x6c, 0xca, 0x88, 0x58, 0x84, 0xf1, 0xe5, 0x24, 0x60, 0xfe,
|
||||
0x24, 0x60, 0xea, 0xcb, 0x15, 0x47, 0x9e, 0xdb, 0x95, 0xaf, 0xd7, 0xd1, 0x9f, 0x00, 0x00, 0x00,
|
||||
0xff, 0xff, 0xdd, 0x33, 0x97, 0x54, 0xdb, 0x06, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ context.Context
|
||||
var _ grpc.ClientConn
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
const _ = grpc.SupportPackageIsVersion4
|
||||
|
||||
// WatchtowerClientClient is the client API for WatchtowerClient service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type WatchtowerClientClient interface {
|
||||
//
|
||||
//AddTower adds a new watchtower reachable at the given address and
|
||||
//considers it for new sessions. If the watchtower already exists, then
|
||||
//any new addresses included will be considered when dialing it for
|
||||
//session negotiations and backups.
|
||||
AddTower(ctx context.Context, in *AddTowerRequest, opts ...grpc.CallOption) (*AddTowerResponse, error)
|
||||
//
|
||||
//RemoveTower removes a watchtower from being considered for future session
|
||||
//negotiations and from being used for any subsequent backups until it's added
|
||||
//again. If an address is provided, then this RPC only serves as a way of
|
||||
//removing the address from the watchtower instead.
|
||||
RemoveTower(ctx context.Context, in *RemoveTowerRequest, opts ...grpc.CallOption) (*RemoveTowerResponse, error)
|
||||
// ListTowers returns the list of watchtowers registered with the client.
|
||||
ListTowers(ctx context.Context, in *ListTowersRequest, opts ...grpc.CallOption) (*ListTowersResponse, error)
|
||||
// GetTowerInfo retrieves information for a registered watchtower.
|
||||
GetTowerInfo(ctx context.Context, in *GetTowerInfoRequest, opts ...grpc.CallOption) (*Tower, error)
|
||||
// Stats returns the in-memory statistics of the client since startup.
|
||||
Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error)
|
||||
// Policy returns the active watchtower client policy configuration.
|
||||
Policy(ctx context.Context, in *PolicyRequest, opts ...grpc.CallOption) (*PolicyResponse, error)
|
||||
}
|
||||
|
||||
type watchtowerClientClient struct {
|
||||
cc *grpc.ClientConn
|
||||
}
|
||||
|
||||
func NewWatchtowerClientClient(cc *grpc.ClientConn) WatchtowerClientClient {
|
||||
return &watchtowerClientClient{cc}
|
||||
}
|
||||
|
||||
func (c *watchtowerClientClient) AddTower(ctx context.Context, in *AddTowerRequest, opts ...grpc.CallOption) (*AddTowerResponse, error) {
|
||||
out := new(AddTowerResponse)
|
||||
err := c.cc.Invoke(ctx, "/wtclientrpc.WatchtowerClient/AddTower", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *watchtowerClientClient) RemoveTower(ctx context.Context, in *RemoveTowerRequest, opts ...grpc.CallOption) (*RemoveTowerResponse, error) {
|
||||
out := new(RemoveTowerResponse)
|
||||
err := c.cc.Invoke(ctx, "/wtclientrpc.WatchtowerClient/RemoveTower", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *watchtowerClientClient) ListTowers(ctx context.Context, in *ListTowersRequest, opts ...grpc.CallOption) (*ListTowersResponse, error) {
|
||||
out := new(ListTowersResponse)
|
||||
err := c.cc.Invoke(ctx, "/wtclientrpc.WatchtowerClient/ListTowers", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *watchtowerClientClient) GetTowerInfo(ctx context.Context, in *GetTowerInfoRequest, opts ...grpc.CallOption) (*Tower, error) {
|
||||
out := new(Tower)
|
||||
err := c.cc.Invoke(ctx, "/wtclientrpc.WatchtowerClient/GetTowerInfo", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *watchtowerClientClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) {
|
||||
out := new(StatsResponse)
|
||||
err := c.cc.Invoke(ctx, "/wtclientrpc.WatchtowerClient/Stats", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *watchtowerClientClient) Policy(ctx context.Context, in *PolicyRequest, opts ...grpc.CallOption) (*PolicyResponse, error) {
|
||||
out := new(PolicyResponse)
|
||||
err := c.cc.Invoke(ctx, "/wtclientrpc.WatchtowerClient/Policy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// WatchtowerClientServer is the server API for WatchtowerClient service.
|
||||
type WatchtowerClientServer interface {
|
||||
//
|
||||
//AddTower adds a new watchtower reachable at the given address and
|
||||
//considers it for new sessions. If the watchtower already exists, then
|
||||
//any new addresses included will be considered when dialing it for
|
||||
//session negotiations and backups.
|
||||
AddTower(context.Context, *AddTowerRequest) (*AddTowerResponse, error)
|
||||
//
|
||||
//RemoveTower removes a watchtower from being considered for future session
|
||||
//negotiations and from being used for any subsequent backups until it's added
|
||||
//again. If an address is provided, then this RPC only serves as a way of
|
||||
//removing the address from the watchtower instead.
|
||||
RemoveTower(context.Context, *RemoveTowerRequest) (*RemoveTowerResponse, error)
|
||||
// ListTowers returns the list of watchtowers registered with the client.
|
||||
ListTowers(context.Context, *ListTowersRequest) (*ListTowersResponse, error)
|
||||
// GetTowerInfo retrieves information for a registered watchtower.
|
||||
GetTowerInfo(context.Context, *GetTowerInfoRequest) (*Tower, error)
|
||||
// Stats returns the in-memory statistics of the client since startup.
|
||||
Stats(context.Context, *StatsRequest) (*StatsResponse, error)
|
||||
// Policy returns the active watchtower client policy configuration.
|
||||
Policy(context.Context, *PolicyRequest) (*PolicyResponse, error)
|
||||
}
|
||||
|
||||
func RegisterWatchtowerClientServer(s *grpc.Server, srv WatchtowerClientServer) {
|
||||
s.RegisterService(&_WatchtowerClient_serviceDesc, srv)
|
||||
}
|
||||
|
||||
func _WatchtowerClient_AddTower_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AddTowerRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(WatchtowerClientServer).AddTower(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/wtclientrpc.WatchtowerClient/AddTower",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(WatchtowerClientServer).AddTower(ctx, req.(*AddTowerRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _WatchtowerClient_RemoveTower_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RemoveTowerRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(WatchtowerClientServer).RemoveTower(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/wtclientrpc.WatchtowerClient/RemoveTower",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(WatchtowerClientServer).RemoveTower(ctx, req.(*RemoveTowerRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _WatchtowerClient_ListTowers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ListTowersRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(WatchtowerClientServer).ListTowers(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/wtclientrpc.WatchtowerClient/ListTowers",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(WatchtowerClientServer).ListTowers(ctx, req.(*ListTowersRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _WatchtowerClient_GetTowerInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetTowerInfoRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(WatchtowerClientServer).GetTowerInfo(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/wtclientrpc.WatchtowerClient/GetTowerInfo",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(WatchtowerClientServer).GetTowerInfo(ctx, req.(*GetTowerInfoRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _WatchtowerClient_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(StatsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(WatchtowerClientServer).Stats(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/wtclientrpc.WatchtowerClient/Stats",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(WatchtowerClientServer).Stats(ctx, req.(*StatsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _WatchtowerClient_Policy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PolicyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(WatchtowerClientServer).Policy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/wtclientrpc.WatchtowerClient/Policy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(WatchtowerClientServer).Policy(ctx, req.(*PolicyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _WatchtowerClient_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "wtclientrpc.WatchtowerClient",
|
||||
HandlerType: (*WatchtowerClientServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "AddTower",
|
||||
Handler: _WatchtowerClient_AddTower_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RemoveTower",
|
||||
Handler: _WatchtowerClient_RemoveTower_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListTowers",
|
||||
Handler: _WatchtowerClient_ListTowers_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetTowerInfo",
|
||||
Handler: _WatchtowerClient_GetTowerInfo_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Stats",
|
||||
Handler: _WatchtowerClient_Stats_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Policy",
|
||||
Handler: _WatchtowerClient_Policy_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "wtclientrpc/wtclient.proto",
|
||||
}
|
165
lnrpc/wtclientrpc/wtclient.proto
Normal file
165
lnrpc/wtclientrpc/wtclient.proto
Normal file
@ -0,0 +1,165 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package wtclientrpc;
|
||||
|
||||
option go_package = "github.com/lightningnetwork/lnd/lnrpc/wtclientrpc";
|
||||
|
||||
message AddTowerRequest {
|
||||
// The identifying public key of the watchtower to add.
|
||||
bytes pubkey = 1 [json_name = "pubkey"];
|
||||
|
||||
// A network address the watchtower is reachable over.
|
||||
string address = 2 [json_name = "address"];
|
||||
}
|
||||
|
||||
message AddTowerResponse {
|
||||
}
|
||||
|
||||
message RemoveTowerRequest {
|
||||
// The identifying public key of the watchtower to remove.
|
||||
bytes pubkey = 1 [json_name = "pubkey"];
|
||||
|
||||
/*
|
||||
If set, then the record for this address will be removed, indicating that is
|
||||
is stale. Otherwise, the watchtower will no longer be used for future
|
||||
session negotiations and backups.
|
||||
*/
|
||||
string address = 2 [json_name = "address"];
|
||||
}
|
||||
|
||||
message RemoveTowerResponse {
|
||||
}
|
||||
|
||||
message GetTowerInfoRequest {
|
||||
// The identifying public key of the watchtower to retrieve information for.
|
||||
bytes pubkey = 1 [json_name = "pubkey"];
|
||||
|
||||
// Whether we should include sessions with the watchtower in the response.
|
||||
bool include_sessions = 2 [json_name = "include_sessions"];
|
||||
}
|
||||
|
||||
message TowerSession {
|
||||
/*
|
||||
The total number of successful backups that have been made to the
|
||||
watchtower session.
|
||||
*/
|
||||
uint32 num_backups = 1 [json_name = "num_backups"];
|
||||
|
||||
/*
|
||||
The total number of backups in the session that are currently pending to be
|
||||
acknowledged by the watchtower.
|
||||
*/
|
||||
uint32 num_pending_backups = 2 [json_name = "num_pending_backups"];
|
||||
|
||||
// The maximum number of backups allowed by the watchtower session.
|
||||
uint32 max_backups = 3 [json_name = "max_backups"];
|
||||
|
||||
/*
|
||||
The fee rate, in satoshis per vbyte, that will be used by the watchtower for
|
||||
the justice transaction in the event of a channel breach.
|
||||
*/
|
||||
uint32 sweep_sat_per_byte = 4 [json_name = "sweep_sat_per_byte"];
|
||||
}
|
||||
|
||||
message Tower {
|
||||
// The identifying public key of the watchtower.
|
||||
bytes pubkey = 1 [json_name = "pubkey"];
|
||||
|
||||
// The list of addresses the watchtower is reachable over.
|
||||
repeated string addresses = 2 [json_name = "addresses"];
|
||||
|
||||
// Whether the watchtower is currently a candidate for new sessions.
|
||||
bool active_session_candidate = 3 [json_name = "active_session_candidate"];
|
||||
|
||||
// The number of sessions that have been negotiated with the watchtower.
|
||||
uint32 num_sessions = 4 [json_name = "num_sessions"];
|
||||
|
||||
// The list of sessions that have been negotiated with the watchtower.
|
||||
repeated TowerSession sessions = 5 [json_name = "sessions"];
|
||||
}
|
||||
|
||||
message ListTowersRequest {
|
||||
// Whether we should include sessions with the watchtower in the response.
|
||||
bool include_sessions = 1 [json_name = "include_sessions"];
|
||||
}
|
||||
|
||||
message ListTowersResponse {
|
||||
// The list of watchtowers available for new backups.
|
||||
repeated Tower towers = 1 [json_name = "towers"];
|
||||
}
|
||||
|
||||
message StatsRequest {
|
||||
}
|
||||
|
||||
message StatsResponse {
|
||||
/*
|
||||
The total number of backups made to all active and exhausted watchtower
|
||||
sessions.
|
||||
*/
|
||||
uint32 num_backups = 1 [json_name = "num_backups"];
|
||||
|
||||
/*
|
||||
The total number of backups that are pending to be acknowledged by all
|
||||
active and exhausted watchtower sessions.
|
||||
*/
|
||||
uint32 num_pending_backups = 2 [json_name = "num_pending_backups"];
|
||||
|
||||
/*
|
||||
The total number of backups that all active and exhausted watchtower
|
||||
sessions have failed to acknowledge.
|
||||
*/
|
||||
uint32 num_failed_backups = 3 [json_name = "num_failed_backups"];
|
||||
|
||||
// The total number of new sessions made to watchtowers.
|
||||
uint32 num_sessions_acquired = 4 [json_name = "num_sessions_acquired"];
|
||||
|
||||
// The total number of watchtower sessions that have been exhausted.
|
||||
uint32 num_sessions_exhausted = 5 [json_name = "num_sessions_exhausted"];
|
||||
}
|
||||
|
||||
message PolicyRequest {
|
||||
}
|
||||
|
||||
message PolicyResponse {
|
||||
/*
|
||||
The maximum number of updates each session we negotiate with watchtowers
|
||||
should allow.
|
||||
*/
|
||||
uint32 max_updates = 1 [json_name = "max_updates"];
|
||||
|
||||
/*
|
||||
The fee rate, in satoshis per vbyte, that will be used by watchtowers for
|
||||
justice transactions in response to channel breaches.
|
||||
*/
|
||||
uint32 sweep_sat_per_byte = 2 [json_name = "sweep_sat_per_byte"];
|
||||
}
|
||||
|
||||
service WatchtowerClient {
|
||||
/*
|
||||
AddTower adds a new watchtower reachable at the given address and
|
||||
considers it for new sessions. If the watchtower already exists, then
|
||||
any new addresses included will be considered when dialing it for
|
||||
session negotiations and backups.
|
||||
*/
|
||||
rpc AddTower(AddTowerRequest) returns (AddTowerResponse);
|
||||
|
||||
/*
|
||||
RemoveTower removes a watchtower from being considered for future session
|
||||
negotiations and from being used for any subsequent backups until it's added
|
||||
again. If an address is provided, then this RPC only serves as a way of
|
||||
removing the address from the watchtower instead.
|
||||
*/
|
||||
rpc RemoveTower(RemoveTowerRequest) returns (RemoveTowerResponse);
|
||||
|
||||
// ListTowers returns the list of watchtowers registered with the client.
|
||||
rpc ListTowers(ListTowersRequest) returns (ListTowersResponse);
|
||||
|
||||
// GetTowerInfo retrieves information for a registered watchtower.
|
||||
rpc GetTowerInfo(GetTowerInfoRequest) returns (Tower);
|
||||
|
||||
// Stats returns the in-memory statistics of the client since startup.
|
||||
rpc Stats(StatsRequest) returns (StatsResponse);
|
||||
|
||||
// Policy returns the active watchtower client policy configuration.
|
||||
rpc Policy(PolicyRequest) returns (PolicyResponse);
|
||||
}
|
@ -35,6 +35,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/wtclientrpc"
|
||||
"github.com/lightningnetwork/lnd/lntest"
|
||||
"github.com/lightningnetwork/lnd/lntypes"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
@ -7677,7 +7678,7 @@ func testRevokedCloseRetributionAltruistWatchtower(net *lntest.NetworkHarness,
|
||||
defer shutdownAndAssert(net, t, willy)
|
||||
|
||||
ctxt, _ := context.WithTimeout(ctxb, defaultTimeout)
|
||||
willyInfo, err := willy.WatchtowerClient.GetInfo(
|
||||
willyInfo, err := willy.Watchtower.GetInfo(
|
||||
ctxt, &watchtowerrpc.GetInfoRequest{},
|
||||
)
|
||||
if err != nil {
|
||||
@ -7708,22 +7709,27 @@ func testRevokedCloseRetributionAltruistWatchtower(net *lntest.NetworkHarness,
|
||||
externalIP, willyInfo.Uris[0])
|
||||
}
|
||||
|
||||
// Construct a URI from listening port and public key, since aren't
|
||||
// actually connecting remotely.
|
||||
willyTowerURI := fmt.Sprintf("%x@%s", willyInfo.Pubkey, listener)
|
||||
|
||||
// Dave will be the breached party. We set --nolisten to ensure Carol
|
||||
// won't be able to connect to him and trigger the channel data
|
||||
// protection logic automatically.
|
||||
dave, err := net.NewNode("Dave", []string{
|
||||
"--nolisten",
|
||||
"--wtclient.private-tower-uris=" + willyTowerURI,
|
||||
"--wtclient.active",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create new node: %v", err)
|
||||
}
|
||||
defer shutdownAndAssert(net, t, dave)
|
||||
|
||||
ctxt, _ = context.WithTimeout(ctxb, defaultTimeout)
|
||||
addTowerReq := &wtclientrpc.AddTowerRequest{
|
||||
Pubkey: willyInfo.Pubkey,
|
||||
Address: listener,
|
||||
}
|
||||
if _, err := dave.WatchtowerClient.AddTower(ctxt, addTowerReq); err != nil {
|
||||
t.Fatalf("unable to add willy's watchtower: %v", err)
|
||||
}
|
||||
|
||||
// We must let Dave have an open channel before she can send a node
|
||||
// announcement, so we open a channel with Carol,
|
||||
if err := net.ConnectNodes(ctxb, dave, carol); err != nil {
|
||||
|
@ -26,11 +26,12 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/wtclientrpc"
|
||||
"github.com/lightningnetwork/lnd/macaroons"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
macaroon "gopkg.in/macaroon.v2"
|
||||
"gopkg.in/macaroon.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -265,7 +266,8 @@ type HarnessNode struct {
|
||||
// because a name collision would occur with LightningClient.
|
||||
RouterClient routerrpc.RouterClient
|
||||
WalletKitClient walletrpc.WalletKitClient
|
||||
WatchtowerClient watchtowerrpc.WatchtowerClient
|
||||
Watchtower watchtowerrpc.WatchtowerClient
|
||||
WatchtowerClient wtclientrpc.WatchtowerClientClient
|
||||
}
|
||||
|
||||
// Assert *HarnessNode implements the lnrpc.LightningClient interface.
|
||||
@ -526,7 +528,8 @@ func (hn *HarnessNode) initLightningClient(conn *grpc.ClientConn) error {
|
||||
hn.InvoicesClient = invoicesrpc.NewInvoicesClient(conn)
|
||||
hn.RouterClient = routerrpc.NewRouterClient(conn)
|
||||
hn.WalletKitClient = walletrpc.NewWalletKitClient(conn)
|
||||
hn.WatchtowerClient = watchtowerrpc.NewWatchtowerClient(conn)
|
||||
hn.Watchtower = watchtowerrpc.NewWatchtowerClient(conn)
|
||||
hn.WatchtowerClient = wtclientrpc.NewWatchtowerClientClient(conn)
|
||||
|
||||
// Set the harness node's pubkey to what the node claims in GetInfo.
|
||||
err := hn.FetchNodeInfo()
|
||||
@ -729,6 +732,7 @@ func (hn *HarnessNode) stop() error {
|
||||
hn.processExit = nil
|
||||
hn.LightningClient = nil
|
||||
hn.WalletUnlockerClient = nil
|
||||
hn.Watchtower = nil
|
||||
hn.WatchtowerClient = nil
|
||||
return nil
|
||||
}
|
||||
|
2
log.go
2
log.go
@ -27,6 +27,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/wtclientrpc"
|
||||
"github.com/lightningnetwork/lnd/lnwallet"
|
||||
"github.com/lightningnetwork/lnd/monitoring"
|
||||
"github.com/lightningnetwork/lnd/netann"
|
||||
@ -120,6 +121,7 @@ func init() {
|
||||
wtclient.UseLogger(wtclLog)
|
||||
|
||||
addSubLogger(routerrpc.Subsystem, routerrpc.UseLogger)
|
||||
addSubLogger(wtclientrpc.Subsystem, wtclientrpc.UseLogger)
|
||||
}
|
||||
|
||||
// addSubLogger is a helper method to conveniently register the logger of a sub
|
||||
|
@ -56,7 +56,7 @@ endif
|
||||
|
||||
|
||||
# Construct the integration test command with the added build flags.
|
||||
ITEST_TAGS := $(DEV_TAGS) rpctest chainrpc walletrpc signrpc invoicesrpc autopilotrpc routerrpc watchtowerrpc
|
||||
ITEST_TAGS := $(DEV_TAGS) rpctest chainrpc walletrpc signrpc invoicesrpc autopilotrpc routerrpc watchtowerrpc wtclientrpc
|
||||
|
||||
# Default to btcd backend if not set.
|
||||
ifneq ($(backend),)
|
||||
|
@ -497,8 +497,8 @@ func newRPCServer(s *server, macService *macaroons.Service,
|
||||
err = subServerCgs.PopulateDependencies(
|
||||
s.cc, networkDir, macService, atpl, invoiceRegistry,
|
||||
s.htlcSwitch, activeNetParams.Params, s.chanRouter,
|
||||
routerBackend, s.nodeSigner, s.chanDB, s.sweeper,
|
||||
tower,
|
||||
routerBackend, s.nodeSigner, s.chanDB, s.sweeper, tower,
|
||||
s.towerClient, cfg.net.ResolveTCPAddr,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -1081,7 +1081,7 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.WtClient.IsActive() {
|
||||
if cfg.WtClient.Active {
|
||||
policy := wtpolicy.DefaultPolicy()
|
||||
|
||||
if cfg.WtClient.SweepFeeRate != 0 {
|
||||
@ -1104,7 +1104,6 @@ func newServer(listenAddrs []net.Addr, chanDB *channeldb.DB,
|
||||
Dial: cfg.net.Dial,
|
||||
AuthDial: wtclient.AuthDial,
|
||||
DB: towerClientDB,
|
||||
PrivateTower: cfg.WtClient.PrivateTowers[0],
|
||||
Policy: policy,
|
||||
ChainHash: *activeNetParams.GenesisHash,
|
||||
MinBackoff: 10 * time.Second,
|
||||
|
@ -9,6 +9,7 @@ import (
|
||||
"github.com/lightningnetwork/lnd/channeldb"
|
||||
"github.com/lightningnetwork/lnd/htlcswitch"
|
||||
"github.com/lightningnetwork/lnd/invoices"
|
||||
"github.com/lightningnetwork/lnd/lncfg"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/autopilotrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/chainrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
|
||||
@ -16,11 +17,13 @@ import (
|
||||
"github.com/lightningnetwork/lnd/lnrpc/signrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/watchtowerrpc"
|
||||
"github.com/lightningnetwork/lnd/lnrpc/wtclientrpc"
|
||||
"github.com/lightningnetwork/lnd/macaroons"
|
||||
"github.com/lightningnetwork/lnd/netann"
|
||||
"github.com/lightningnetwork/lnd/routing"
|
||||
"github.com/lightningnetwork/lnd/sweep"
|
||||
"github.com/lightningnetwork/lnd/watchtower"
|
||||
"github.com/lightningnetwork/lnd/watchtower/wtclient"
|
||||
)
|
||||
|
||||
// subRPCServerConfigs is special sub-config in the main configuration that
|
||||
@ -62,6 +65,12 @@ type subRPCServerConfigs struct {
|
||||
// WatchtowerRPC is a sub-RPC server that exposes functionality allowing
|
||||
// clients to monitor and control their embedded watchtower.
|
||||
WatchtowerRPC *watchtowerrpc.Config `group:"watchtowerrpc" namespace:"watchtowerrpc"`
|
||||
|
||||
// WatchtowerClientRPC is a sub-RPC server that exposes functionality
|
||||
// that allows clients to interact with the active watchtower client
|
||||
// instance within lnd in order to add, remove, list registered client
|
||||
// towers, etc.
|
||||
WatchtowerClientRPC *wtclientrpc.Config `group:"wtclientrpc" namespace:"wtclientrpc"`
|
||||
}
|
||||
|
||||
// PopulateDependencies attempts to iterate through all the sub-server configs
|
||||
@ -81,7 +90,9 @@ func (s *subRPCServerConfigs) PopulateDependencies(cc *chainControl,
|
||||
nodeSigner *netann.NodeSigner,
|
||||
chanDB *channeldb.DB,
|
||||
sweeper *sweep.UtxoSweeper,
|
||||
tower *watchtower.Standalone) error {
|
||||
tower *watchtower.Standalone,
|
||||
towerClient wtclient.Client,
|
||||
tcpResolver lncfg.TCPResolver) error {
|
||||
|
||||
// First, we'll use reflect to obtain a version of the config struct
|
||||
// that allows us to programmatically inspect its fields.
|
||||
@ -223,6 +234,21 @@ func (s *subRPCServerConfigs) PopulateDependencies(cc *chainControl,
|
||||
reflect.ValueOf(tower),
|
||||
)
|
||||
|
||||
case *wtclientrpc.Config:
|
||||
subCfgValue := extractReflectValue(subCfg)
|
||||
|
||||
if towerClient != nil {
|
||||
subCfgValue.FieldByName("Active").Set(
|
||||
reflect.ValueOf(towerClient != nil),
|
||||
)
|
||||
subCfgValue.FieldByName("Client").Set(
|
||||
reflect.ValueOf(towerClient),
|
||||
)
|
||||
}
|
||||
subCfgValue.FieldByName("Resolver").Set(
|
||||
reflect.ValueOf(tcpResolver),
|
||||
)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown field: %v, %T", fieldName,
|
||||
cfg)
|
||||
|
@ -2,6 +2,7 @@ package wtclient
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/lightningnetwork/lnd/watchtower/wtdb"
|
||||
@ -10,6 +11,20 @@ import (
|
||||
// TowerCandidateIterator provides an abstraction for iterating through possible
|
||||
// watchtower addresses when attempting to create a new session.
|
||||
type TowerCandidateIterator interface {
|
||||
// AddCandidate adds a new candidate tower to the iterator. If the
|
||||
// candidate already exists, then any new addresses are added to it.
|
||||
AddCandidate(*wtdb.Tower)
|
||||
|
||||
// RemoveCandidate removes an existing candidate tower from the
|
||||
// iterator. An optional address can be provided to indicate a stale
|
||||
// tower address to remove it. If it isn't provided, then the tower is
|
||||
// completely removed from the iterator.
|
||||
RemoveCandidate(wtdb.TowerID, net.Addr)
|
||||
|
||||
// IsActive determines whether a given tower is exists within the
|
||||
// iterator.
|
||||
IsActive(wtdb.TowerID) bool
|
||||
|
||||
// Reset clears any internal iterator state, making previously taken
|
||||
// candidates available as long as they remain in the set.
|
||||
Reset() error
|
||||
@ -18,17 +33,14 @@ type TowerCandidateIterator interface {
|
||||
// to return results in any particular order. If no more candidates are
|
||||
// available, ErrTowerCandidatesExhausted is returned.
|
||||
Next() (*wtdb.Tower, error)
|
||||
|
||||
// TowerIDs returns the set of tower IDs contained in the iterator,
|
||||
// which can be used to filter candidate sessions for the active tower.
|
||||
TowerIDs() map[wtdb.TowerID]struct{}
|
||||
}
|
||||
|
||||
// towerListIterator is a linked-list backed TowerCandidateIterator.
|
||||
type towerListIterator struct {
|
||||
mu sync.Mutex
|
||||
candidates *list.List
|
||||
queue *list.List
|
||||
nextCandidate *list.Element
|
||||
candidates map[wtdb.TowerID]*wtdb.Tower
|
||||
}
|
||||
|
||||
// Compile-time constraint to ensure *towerListIterator implements the
|
||||
@ -39,11 +51,13 @@ var _ TowerCandidateIterator = (*towerListIterator)(nil)
|
||||
// of lnwire.NetAddresses.
|
||||
func newTowerListIterator(candidates ...*wtdb.Tower) *towerListIterator {
|
||||
iter := &towerListIterator{
|
||||
candidates: list.New(),
|
||||
queue: list.New(),
|
||||
candidates: make(map[wtdb.TowerID]*wtdb.Tower),
|
||||
}
|
||||
|
||||
for _, candidate := range candidates {
|
||||
iter.candidates.PushBack(candidate)
|
||||
iter.queue.PushBack(candidate.ID)
|
||||
iter.candidates[candidate.ID] = candidate
|
||||
}
|
||||
iter.Reset()
|
||||
|
||||
@ -57,22 +71,11 @@ func (t *towerListIterator) Reset() error {
|
||||
defer t.mu.Unlock()
|
||||
|
||||
// Reset the next candidate to the front of the linked-list.
|
||||
t.nextCandidate = t.candidates.Front()
|
||||
t.nextCandidate = t.queue.Front()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TowerIDs returns the set of tower IDs contained in the iterator, which can be
|
||||
// used to filter candidate sessions for the active tower.
|
||||
func (t *towerListIterator) TowerIDs() map[wtdb.TowerID]struct{} {
|
||||
ids := make(map[wtdb.TowerID]struct{})
|
||||
for e := t.candidates.Front(); e != nil; e = e.Next() {
|
||||
tower := e.Value.(*wtdb.Tower)
|
||||
ids[tower.ID] = struct{}{}
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Next returns the next candidate tower. This iterator will always return
|
||||
// candidates in the order given when the iterator was instantiated. If no more
|
||||
// candidates are available, ErrTowerCandidatesExhausted is returned.
|
||||
@ -80,18 +83,76 @@ func (t *towerListIterator) Next() (*wtdb.Tower, error) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
// If the next candidate is nil, we've exhausted the list.
|
||||
if t.nextCandidate == nil {
|
||||
return nil, ErrTowerCandidatesExhausted
|
||||
for t.nextCandidate != nil {
|
||||
// Propose the tower at the front of the list.
|
||||
towerID := t.nextCandidate.Value.(wtdb.TowerID)
|
||||
|
||||
// Check whether this tower is still considered a candidate. If
|
||||
// it's not, we'll proceed to the next.
|
||||
tower, ok := t.candidates[towerID]
|
||||
if !ok {
|
||||
nextCandidate := t.nextCandidate.Next()
|
||||
t.queue.Remove(t.nextCandidate)
|
||||
t.nextCandidate = nextCandidate
|
||||
continue
|
||||
}
|
||||
|
||||
// Set the next candidate to the subsequent element.
|
||||
t.nextCandidate = t.nextCandidate.Next()
|
||||
return tower, nil
|
||||
}
|
||||
|
||||
// Propose the tower at the front of the list.
|
||||
tower := t.nextCandidate.Value.(*wtdb.Tower)
|
||||
return nil, ErrTowerCandidatesExhausted
|
||||
}
|
||||
|
||||
// Set the next candidate to the subsequent element.
|
||||
t.nextCandidate = t.nextCandidate.Next()
|
||||
// AddCandidate adds a new candidate tower to the iterator. If the candidate
|
||||
// already exists, then any new addresses are added to it.
|
||||
func (t *towerListIterator) AddCandidate(candidate *wtdb.Tower) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
return tower, nil
|
||||
if tower, ok := t.candidates[candidate.ID]; !ok {
|
||||
t.queue.PushBack(candidate.ID)
|
||||
t.candidates[candidate.ID] = candidate
|
||||
|
||||
// If we've reached the end of our queue, then this candidate
|
||||
// will become the next.
|
||||
if t.nextCandidate == nil {
|
||||
t.nextCandidate = t.queue.Back()
|
||||
}
|
||||
} else {
|
||||
for _, addr := range candidate.Addresses {
|
||||
tower.AddAddress(addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveCandidate removes an existing candidate tower from the iterator. An
|
||||
// optional address can be provided to indicate a stale tower address to remove
|
||||
// it. If it isn't provided, then the tower is completely removed from the
|
||||
// iterator.
|
||||
func (t *towerListIterator) RemoveCandidate(candidate wtdb.TowerID, addr net.Addr) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
tower, ok := t.candidates[candidate]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if addr != nil {
|
||||
tower.RemoveAddress(addr)
|
||||
} else {
|
||||
delete(t.candidates, candidate)
|
||||
}
|
||||
}
|
||||
|
||||
// IsActive determines whether a given tower is exists within the iterator.
|
||||
func (t *towerListIterator) IsActive(tower wtdb.TowerID) bool {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
|
||||
_, ok := t.candidates[tower]
|
||||
return ok
|
||||
}
|
||||
|
||||
// TODO(conner): implement graph-backed candidate iterator for public towers.
|
||||
|
157
watchtower/wtclient/candidate_iterator_test.go
Normal file
157
watchtower/wtclient/candidate_iterator_test.go
Normal file
@ -0,0 +1,157 @@
|
||||
package wtclient
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/rand"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/lightningnetwork/lnd/watchtower/wtdb"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().Unix())
|
||||
}
|
||||
|
||||
func randAddr(t *testing.T) net.Addr {
|
||||
var ip [4]byte
|
||||
if _, err := rand.Read(ip[:]); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var port [2]byte
|
||||
if _, err := rand.Read(port[:]); err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
}
|
||||
return &net.TCPAddr{
|
||||
IP: net.IP(ip[:]),
|
||||
Port: int(binary.BigEndian.Uint16(port[:])),
|
||||
}
|
||||
}
|
||||
|
||||
func randTower(t *testing.T) *wtdb.Tower {
|
||||
priv, err := btcec.NewPrivateKey(btcec.S256())
|
||||
if err != nil {
|
||||
t.Fatalf("unable to create private key: %v", err)
|
||||
}
|
||||
pubKey := priv.PubKey()
|
||||
pubKey.Curve = nil
|
||||
return &wtdb.Tower{
|
||||
ID: wtdb.TowerID(rand.Uint64()),
|
||||
IdentityKey: pubKey,
|
||||
Addresses: []net.Addr{randAddr(t)},
|
||||
}
|
||||
}
|
||||
|
||||
func copyTower(tower *wtdb.Tower) *wtdb.Tower {
|
||||
t := &wtdb.Tower{
|
||||
ID: tower.ID,
|
||||
IdentityKey: tower.IdentityKey,
|
||||
Addresses: make([]net.Addr, len(tower.Addresses)),
|
||||
}
|
||||
copy(t.Addresses, tower.Addresses)
|
||||
return t
|
||||
}
|
||||
|
||||
func assertActiveCandidate(t *testing.T, i TowerCandidateIterator,
|
||||
c *wtdb.Tower, active bool) {
|
||||
|
||||
isCandidate := i.IsActive(c.ID)
|
||||
if isCandidate && !active {
|
||||
t.Fatalf("expected tower %v to no longer be an active candidate",
|
||||
c.ID)
|
||||
}
|
||||
if !isCandidate && active {
|
||||
t.Fatalf("expected tower %v to be an active candidate", c.ID)
|
||||
}
|
||||
}
|
||||
|
||||
func assertNextCandidate(t *testing.T, i TowerCandidateIterator, c *wtdb.Tower) {
|
||||
t.Helper()
|
||||
|
||||
tower, err := i.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(tower, c) {
|
||||
t.Fatalf("expected tower: %v\ngot: %v", spew.Sdump(c),
|
||||
spew.Sdump(tower))
|
||||
}
|
||||
}
|
||||
|
||||
// TestTowerCandidateIterator asserts the internal state of a
|
||||
// TowerCandidateIterator after a series of updates to its candidates.
|
||||
func TestTowerCandidateIterator(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// We'll start our test by creating an iterator of four candidate
|
||||
// towers. We'll use copies of these towers within the iterator to
|
||||
// ensure the iterator properly updates the state of its candidates.
|
||||
const numTowers = 4
|
||||
towers := make([]*wtdb.Tower, 0, numTowers)
|
||||
for i := 0; i < numTowers; i++ {
|
||||
towers = append(towers, randTower(t))
|
||||
}
|
||||
towerCopies := make([]*wtdb.Tower, 0, numTowers)
|
||||
for _, tower := range towers {
|
||||
towerCopies = append(towerCopies, copyTower(tower))
|
||||
}
|
||||
towerIterator := newTowerListIterator(towerCopies...)
|
||||
|
||||
// We should expect to see all of our candidates in the order that they
|
||||
// were added.
|
||||
for _, expTower := range towers {
|
||||
tower, err := towerIterator.Next()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(tower, expTower) {
|
||||
t.Fatalf("expected tower: %v\ngot: %v",
|
||||
spew.Sdump(expTower), spew.Sdump(tower))
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := towerIterator.Next(); err != ErrTowerCandidatesExhausted {
|
||||
t.Fatalf("expected ErrTowerCandidatesExhausted, got %v", err)
|
||||
}
|
||||
towerIterator.Reset()
|
||||
|
||||
// We'll then attempt to test the RemoveCandidate behavior of the
|
||||
// iterator. We'll remove the address of the first tower, which should
|
||||
// result in it not having any addresses left, but still being an active
|
||||
// candidate.
|
||||
firstTower := towers[0]
|
||||
firstTowerAddr := firstTower.Addresses[0]
|
||||
firstTower.RemoveAddress(firstTowerAddr)
|
||||
towerIterator.RemoveCandidate(firstTower.ID, firstTowerAddr)
|
||||
assertActiveCandidate(t, towerIterator, firstTower, true)
|
||||
assertNextCandidate(t, towerIterator, firstTower)
|
||||
|
||||
// We'll then remove the second tower completely from the iterator by
|
||||
// not providing the optional address. Since it's been removed, we
|
||||
// should expect to see the third tower next.
|
||||
secondTower, thirdTower := towers[1], towers[2]
|
||||
towerIterator.RemoveCandidate(secondTower.ID, nil)
|
||||
assertActiveCandidate(t, towerIterator, secondTower, false)
|
||||
assertNextCandidate(t, towerIterator, thirdTower)
|
||||
|
||||
// We'll then update the fourth candidate with a new address. A
|
||||
// duplicate shouldn't be added since it already exists within the
|
||||
// iterator, but the new address should be.
|
||||
fourthTower := towers[3]
|
||||
assertActiveCandidate(t, towerIterator, fourthTower, true)
|
||||
fourthTower.AddAddress(randAddr(t))
|
||||
towerIterator.AddCandidate(fourthTower)
|
||||
assertNextCandidate(t, towerIterator, fourthTower)
|
||||
|
||||
// Finally, we'll attempt to add a new candidate to the end of the
|
||||
// iterator. Since it didn't already exist and we've reached the end, it
|
||||
// should be available as the next candidate.
|
||||
towerIterator.AddCandidate(secondTower)
|
||||
assertActiveCandidate(t, towerIterator, secondTower, true)
|
||||
assertNextCandidate(t, towerIterator, secondTower)
|
||||
}
|
@ -2,7 +2,9 @@ package wtclient
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -36,9 +38,48 @@ const (
|
||||
DefaultForceQuitDelay = 10 * time.Second
|
||||
)
|
||||
|
||||
// RegisteredTower encompasses information about a registered watchtower with
|
||||
// the client.
|
||||
type RegisteredTower struct {
|
||||
*wtdb.Tower
|
||||
|
||||
// Sessions is the set of sessions corresponding to the watchtower.
|
||||
Sessions map[wtdb.SessionID]*wtdb.ClientSession
|
||||
|
||||
// ActiveSessionCandidate determines whether the watchtower is currently
|
||||
// being considered for new sessions.
|
||||
ActiveSessionCandidate bool
|
||||
}
|
||||
|
||||
// Client is the primary interface used by the daemon to control a client's
|
||||
// lifecycle and backup revoked states.
|
||||
type Client interface {
|
||||
// AddTower adds a new watchtower reachable at the given address and
|
||||
// considers it for new sessions. If the watchtower already exists, then
|
||||
// any new addresses included will be considered when dialing it for
|
||||
// session negotiations and backups.
|
||||
AddTower(*lnwire.NetAddress) error
|
||||
|
||||
// RemoveTower removes a watchtower from being considered for future
|
||||
// session negotiations and from being used for any subsequent backups
|
||||
// until it's added again. If an address is provided, then this call
|
||||
// only serves as a way of removing the address from the watchtower
|
||||
// instead.
|
||||
RemoveTower(*btcec.PublicKey, net.Addr) error
|
||||
|
||||
// RegisteredTowers retrieves the list of watchtowers registered with
|
||||
// the client.
|
||||
RegisteredTowers() ([]*RegisteredTower, error)
|
||||
|
||||
// LookupTower retrieves a registered watchtower through its public key.
|
||||
LookupTower(*btcec.PublicKey) (*RegisteredTower, error)
|
||||
|
||||
// Stats returns the in-memory statistics of the client since startup.
|
||||
Stats() ClientStats
|
||||
|
||||
// Policy returns the active client policy configuration.
|
||||
Policy() wtpolicy.Policy
|
||||
|
||||
// RegisterChannel persistently initializes any channel-dependent
|
||||
// parameters within the client. This should be called during link
|
||||
// startup to ensure that the client is able to support the link during
|
||||
@ -100,10 +141,6 @@ type Config struct {
|
||||
// new sessions will be requested immediately.
|
||||
Policy wtpolicy.Policy
|
||||
|
||||
// PrivateTower is the net address of a private tower. The client will
|
||||
// try to create all sessions with this tower.
|
||||
PrivateTower *lnwire.NetAddress
|
||||
|
||||
// ChainHash identifies the chain that the client is on and for which
|
||||
// the tower must be watching to monitor for breaches.
|
||||
ChainHash chainhash.Hash
|
||||
@ -136,6 +173,39 @@ type Config struct {
|
||||
MaxBackoff time.Duration
|
||||
}
|
||||
|
||||
// newTowerMsg is an internal message we'll use within the TowerClient to signal
|
||||
// that a new tower can be considered.
|
||||
type newTowerMsg struct {
|
||||
// addr is the tower's reachable address that we'll use to establish a
|
||||
// connection with.
|
||||
addr *lnwire.NetAddress
|
||||
|
||||
// errChan is the channel through which we'll send a response back to
|
||||
// the caller when handling their request.
|
||||
//
|
||||
// NOTE: This channel must be buffered.
|
||||
errChan chan error
|
||||
}
|
||||
|
||||
// staleTowerMsg is an internal message we'll use within the TowerClient to
|
||||
// signal that a tower should no longer be considered.
|
||||
type staleTowerMsg struct {
|
||||
// pubKey is the identifying public key of the watchtower.
|
||||
pubKey *btcec.PublicKey
|
||||
|
||||
// addr is an optional field that when set signals that the address
|
||||
// should be removed from the watchtower's set of addresses, indicating
|
||||
// that it is stale. If it's not set, then the watchtower should be
|
||||
// no longer be considered for new sessions.
|
||||
addr net.Addr
|
||||
|
||||
// errChan is the channel through which we'll send a response back to
|
||||
// the caller when handling their request.
|
||||
//
|
||||
// NOTE: This channel must be buffered.
|
||||
errChan chan error
|
||||
}
|
||||
|
||||
// TowerClient is a concrete implementation of the Client interface, offering a
|
||||
// non-blocking, reliable subsystem for backing up revoked states to a specified
|
||||
// private tower.
|
||||
@ -149,9 +219,9 @@ type TowerClient struct {
|
||||
pipeline *taskPipeline
|
||||
|
||||
negotiator SessionNegotiator
|
||||
candidateTowers TowerCandidateIterator
|
||||
candidateSessions map[wtdb.SessionID]*wtdb.ClientSession
|
||||
activeSessions sessionQueueSet
|
||||
targetTowerIDs map[wtdb.TowerID]struct{}
|
||||
|
||||
sessionQueue *sessionQueue
|
||||
prevTask *backupTask
|
||||
@ -161,7 +231,10 @@ type TowerClient struct {
|
||||
chanCommitHeights map[lnwire.ChannelID]uint64
|
||||
|
||||
statTicker *time.Ticker
|
||||
stats clientStats
|
||||
stats *ClientStats
|
||||
|
||||
newTowers chan *newTowerMsg
|
||||
staleTowers chan *staleTowerMsg
|
||||
|
||||
wg sync.WaitGroup
|
||||
forceQuit chan struct{}
|
||||
@ -189,26 +262,74 @@ func New(config *Config) (*TowerClient, error) {
|
||||
cfg.WriteTimeout = DefaultWriteTimeout
|
||||
}
|
||||
|
||||
// Record the tower in our database, also loading any addresses
|
||||
// previously associated with its public key.
|
||||
tower, err := cfg.DB.CreateTower(cfg.PrivateTower)
|
||||
// Next, load all candidate sessions and towers from the database into
|
||||
// the client. We will use any of these session if their policies match
|
||||
// the current policy of the client, otherwise they will be ignored and
|
||||
// new sessions will be requested.
|
||||
sessions, err := cfg.DB.ListClientSessions(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Infof("Using private watchtower %s, offering policy %s",
|
||||
cfg.PrivateTower, cfg.Policy)
|
||||
candidateSessions := make(map[wtdb.SessionID]*wtdb.ClientSession)
|
||||
sessionTowers := make(map[wtdb.TowerID]*wtdb.Tower)
|
||||
for _, s := range sessions {
|
||||
// Candidate sessions must be in an active state.
|
||||
if s.Status != wtdb.CSessionActive {
|
||||
continue
|
||||
}
|
||||
|
||||
candidates := newTowerListIterator(tower)
|
||||
targetTowerIDs := candidates.TowerIDs()
|
||||
// Reload the tower from disk using the tower ID contained in
|
||||
// each candidate session. We will also rederive any session
|
||||
// keys needed to be able to communicate with the towers and
|
||||
// authenticate session requests. This prevents us from having
|
||||
// to store the private keys on disk.
|
||||
tower, ok := sessionTowers[s.TowerID]
|
||||
if !ok {
|
||||
var err error
|
||||
tower, err = cfg.DB.LoadTowerByID(s.TowerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
s.Tower = tower
|
||||
|
||||
sessionKey, err := DeriveSessionKey(cfg.SecretKeyRing, s.KeyIndex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.SessionPrivKey = sessionKey
|
||||
|
||||
candidateSessions[s.ID] = s
|
||||
sessionTowers[tower.ID] = tower
|
||||
}
|
||||
|
||||
var candidateTowers []*wtdb.Tower
|
||||
for _, tower := range sessionTowers {
|
||||
log.Infof("Using private watchtower %s, offering policy %s",
|
||||
tower, cfg.Policy)
|
||||
candidateTowers = append(candidateTowers, tower)
|
||||
}
|
||||
|
||||
// Load the sweep pkscripts that have been generated for all previously
|
||||
// registered channels.
|
||||
chanSummaries, err := cfg.DB.FetchChanSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &TowerClient{
|
||||
cfg: cfg,
|
||||
pipeline: newTaskPipeline(),
|
||||
activeSessions: make(sessionQueueSet),
|
||||
targetTowerIDs: targetTowerIDs,
|
||||
statTicker: time.NewTicker(DefaultStatInterval),
|
||||
forceQuit: make(chan struct{}),
|
||||
cfg: cfg,
|
||||
pipeline: newTaskPipeline(),
|
||||
candidateTowers: newTowerListIterator(candidateTowers...),
|
||||
candidateSessions: candidateSessions,
|
||||
activeSessions: make(sessionQueueSet),
|
||||
summaries: chanSummaries,
|
||||
statTicker: time.NewTicker(DefaultStatInterval),
|
||||
stats: new(ClientStats),
|
||||
newTowers: make(chan *newTowerMsg),
|
||||
staleTowers: make(chan *staleTowerMsg),
|
||||
forceQuit: make(chan struct{}),
|
||||
}
|
||||
c.negotiator = newSessionNegotiator(&NegotiatorConfig{
|
||||
DB: cfg.DB,
|
||||
@ -218,53 +339,15 @@ func New(config *Config) (*TowerClient, error) {
|
||||
SendMessage: c.sendMessage,
|
||||
ReadMessage: c.readMessage,
|
||||
Dial: c.dial,
|
||||
Candidates: candidates,
|
||||
Candidates: c.candidateTowers,
|
||||
MinBackoff: cfg.MinBackoff,
|
||||
MaxBackoff: cfg.MaxBackoff,
|
||||
})
|
||||
|
||||
// Next, load all active sessions from the db into the client. We will
|
||||
// use any of these session if their policies match the current policy
|
||||
// of the client, otherwise they will be ignored and new sessions will
|
||||
// be requested.
|
||||
c.candidateSessions, err = c.cfg.DB.ListClientSessions()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Reload any towers from disk using the tower IDs contained in each
|
||||
// candidate session. We will also rederive any session keys needed to
|
||||
// be able to communicate with the towers and authenticate session
|
||||
// requests. This prevents us from having to store the private keys on
|
||||
// disk.
|
||||
for _, s := range c.candidateSessions {
|
||||
tower, err := c.cfg.DB.LoadTower(s.TowerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sessionPriv, err := DeriveSessionKey(
|
||||
c.cfg.SecretKeyRing, s.KeyIndex,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.Tower = tower
|
||||
s.SessionPrivKey = sessionPriv
|
||||
}
|
||||
|
||||
// Reconstruct the highest commit height processed for each channel
|
||||
// under the client's current policy.
|
||||
c.buildHighestCommitHeights()
|
||||
|
||||
// Finally, load the sweep pkscripts that have been generated for all
|
||||
// previously registered channels.
|
||||
c.summaries, err = c.cfg.DB.FetchChanSummaries()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
@ -531,12 +614,6 @@ func (c *TowerClient) nextSessionQueue() *sessionQueue {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip any sessions that are still active, but are not for the
|
||||
// users currently configured tower.
|
||||
if _, ok := c.targetTowerIDs[sessionInfo.TowerID]; !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
candidateSession = sessionInfo
|
||||
break
|
||||
}
|
||||
@ -586,19 +663,38 @@ func (c *TowerClient) backupDispatcher() {
|
||||
c.candidateSessions[session.ID] = session
|
||||
c.stats.sessionAcquired()
|
||||
|
||||
// We'll continue to choose the newly negotiated
|
||||
// session as our active session queue.
|
||||
continue
|
||||
|
||||
case <-c.statTicker.C:
|
||||
log.Infof("Client stats: %s", c.stats)
|
||||
|
||||
// Instead of looping, we'll jump back into the
|
||||
// select case and await the delivery of the
|
||||
// session to prevent us from re-requesting
|
||||
// additional sessions.
|
||||
goto awaitSession
|
||||
// A new tower has been requested to be added. We'll
|
||||
// update our persisted and in-memory state and consider
|
||||
// its corresponding sessions, if any, as new
|
||||
// candidates.
|
||||
case msg := <-c.newTowers:
|
||||
msg.errChan <- c.handleNewTower(msg)
|
||||
|
||||
// A tower has been requested to be removed. We'll
|
||||
// immediately return an error as we want to avoid the
|
||||
// possibility of a new session being negotiated with
|
||||
// this request's tower.
|
||||
case msg := <-c.staleTowers:
|
||||
msg.errChan <- errors.New("removing towers " +
|
||||
"is disallowed while a new session " +
|
||||
"negotiation is in progress")
|
||||
|
||||
case <-c.forceQuit:
|
||||
return
|
||||
}
|
||||
|
||||
// Instead of looping, we'll jump back into the select
|
||||
// case and await the delivery of the session to prevent
|
||||
// us from re-requesting additional sessions.
|
||||
goto awaitSession
|
||||
|
||||
// No active session queue but have additional sessions.
|
||||
case c.sessionQueue == nil && len(c.candidateSessions) > 0:
|
||||
// We've exhausted the prior session, we'll pop another
|
||||
@ -633,7 +729,7 @@ func (c *TowerClient) backupDispatcher() {
|
||||
// we can request new sessions before the session is
|
||||
// fully empty, which this case would handle.
|
||||
case session := <-c.negotiator.NewSessions():
|
||||
log.Warnf("Acquired new session with id=%s",
|
||||
log.Warnf("Acquired new session with id=%s "+
|
||||
"while processing tasks", session.ID)
|
||||
c.candidateSessions[session.ID] = session
|
||||
c.stats.sessionAcquired()
|
||||
@ -654,6 +750,20 @@ func (c *TowerClient) backupDispatcher() {
|
||||
|
||||
c.stats.taskReceived()
|
||||
c.processTask(task)
|
||||
|
||||
// A new tower has been requested to be added. We'll
|
||||
// update our persisted and in-memory state and consider
|
||||
// its corresponding sessions, if any, as new
|
||||
// candidates.
|
||||
case msg := <-c.newTowers:
|
||||
msg.errChan <- c.handleNewTower(msg)
|
||||
|
||||
// A tower has been removed, so we'll remove certain
|
||||
// information that's persisted and also in our
|
||||
// in-memory state depending on the request, and set any
|
||||
// of its corresponding candidate sessions as inactive.
|
||||
case msg := <-c.staleTowers:
|
||||
msg.errChan <- c.handleStaleTower(msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -883,6 +993,207 @@ func (c *TowerClient) initActiveQueue(s *wtdb.ClientSession) *sessionQueue {
|
||||
return sq
|
||||
}
|
||||
|
||||
// AddTower adds a new watchtower reachable at the given address and considers
|
||||
// it for new sessions. If the watchtower already exists, then any new addresses
|
||||
// included will be considered when dialing it for session negotiations and
|
||||
// backups.
|
||||
func (c *TowerClient) AddTower(addr *lnwire.NetAddress) error {
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
select {
|
||||
case c.newTowers <- &newTowerMsg{
|
||||
addr: addr,
|
||||
errChan: errChan,
|
||||
}:
|
||||
case <-c.pipeline.quit:
|
||||
return ErrClientExiting
|
||||
case <-c.pipeline.forceQuit:
|
||||
return ErrClientExiting
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
return err
|
||||
case <-c.pipeline.quit:
|
||||
return ErrClientExiting
|
||||
case <-c.pipeline.forceQuit:
|
||||
return ErrClientExiting
|
||||
}
|
||||
}
|
||||
|
||||
// handleNewTower handles a request for a new tower to be added. If the tower
|
||||
// already exists, then its corresponding sessions, if any, will be set
|
||||
// considered as candidates.
|
||||
func (c *TowerClient) handleNewTower(msg *newTowerMsg) error {
|
||||
// We'll start by updating our persisted state, followed by our
|
||||
// in-memory state, with the new tower. This might not actually be a new
|
||||
// tower, but it might include a new address at which it can be reached.
|
||||
tower, err := c.cfg.DB.CreateTower(msg.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.candidateTowers.AddCandidate(tower)
|
||||
|
||||
// Include all of its corresponding sessions to our set of candidates.
|
||||
sessions, err := c.cfg.DB.ListClientSessions(&tower.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine sessions for tower %x: "+
|
||||
"%v", tower.IdentityKey.SerializeCompressed(), err)
|
||||
}
|
||||
for id, session := range sessions {
|
||||
c.candidateSessions[id] = session
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveTower removes a watchtower from being considered for future session
|
||||
// negotiations and from being used for any subsequent backups until it's added
|
||||
// again. If an address is provided, then this call only serves as a way of
|
||||
// removing the address from the watchtower instead.
|
||||
func (c *TowerClient) RemoveTower(pubKey *btcec.PublicKey, addr net.Addr) error {
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
select {
|
||||
case c.staleTowers <- &staleTowerMsg{
|
||||
pubKey: pubKey,
|
||||
addr: addr,
|
||||
errChan: errChan,
|
||||
}:
|
||||
case <-c.pipeline.quit:
|
||||
return ErrClientExiting
|
||||
case <-c.pipeline.forceQuit:
|
||||
return ErrClientExiting
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
return err
|
||||
case <-c.pipeline.quit:
|
||||
return ErrClientExiting
|
||||
case <-c.pipeline.forceQuit:
|
||||
return ErrClientExiting
|
||||
}
|
||||
}
|
||||
|
||||
// handleNewTower handles a request for an existing tower to be removed. If none
|
||||
// of the tower's sessions have pending updates, then they will become inactive
|
||||
// and removed as candidates. If the active session queue corresponds to any of
|
||||
// these sessions, a new one will be negotiated.
|
||||
func (c *TowerClient) handleStaleTower(msg *staleTowerMsg) error {
|
||||
// We'll load the tower before potentially removing it in order to
|
||||
// retrieve its ID within the database.
|
||||
tower, err := c.cfg.DB.LoadTower(msg.pubKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We'll update our persisted state, followed by our in-memory state,
|
||||
// with the stale tower.
|
||||
if err := c.cfg.DB.RemoveTower(msg.pubKey, msg.addr); err != nil {
|
||||
return err
|
||||
}
|
||||
c.candidateTowers.RemoveCandidate(tower.ID, msg.addr)
|
||||
|
||||
// If an address was provided, then we're only meant to remove the
|
||||
// address from the tower, so there's nothing left for us to do.
|
||||
if msg.addr != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise, the tower should no longer be used for future session
|
||||
// negotiations and backups.
|
||||
pubKey := msg.pubKey.SerializeCompressed()
|
||||
sessions, err := c.cfg.DB.ListClientSessions(&tower.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to retrieve sessions for tower %x: "+
|
||||
"%v", pubKey, err)
|
||||
}
|
||||
for sessionID := range sessions {
|
||||
delete(c.candidateSessions, sessionID)
|
||||
}
|
||||
|
||||
// If our active session queue corresponds to the stale tower, we'll
|
||||
// proceed to negotiate a new one.
|
||||
if c.sessionQueue != nil {
|
||||
activeTower := c.sessionQueue.towerAddr.IdentityKey.SerializeCompressed()
|
||||
if bytes.Equal(pubKey, activeTower) {
|
||||
c.sessionQueue = nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisteredTowers retrieves the list of watchtowers registered with the
|
||||
// client.
|
||||
func (c *TowerClient) RegisteredTowers() ([]*RegisteredTower, error) {
|
||||
// Retrieve all of our towers along with all of our sessions.
|
||||
towers, err := c.cfg.DB.ListTowers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientSessions, err := c.cfg.DB.ListClientSessions(nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Construct a lookup map that coalesces all of the sessions for a
|
||||
// specific watchtower.
|
||||
towerSessions := make(
|
||||
map[wtdb.TowerID]map[wtdb.SessionID]*wtdb.ClientSession,
|
||||
)
|
||||
for id, s := range clientSessions {
|
||||
sessions, ok := towerSessions[s.TowerID]
|
||||
if !ok {
|
||||
sessions = make(map[wtdb.SessionID]*wtdb.ClientSession)
|
||||
towerSessions[s.TowerID] = sessions
|
||||
}
|
||||
sessions[id] = s
|
||||
}
|
||||
|
||||
registeredTowers := make([]*RegisteredTower, 0, len(towerSessions))
|
||||
for _, tower := range towers {
|
||||
isActive := c.candidateTowers.IsActive(tower.ID)
|
||||
registeredTowers = append(registeredTowers, &RegisteredTower{
|
||||
Tower: tower,
|
||||
Sessions: towerSessions[tower.ID],
|
||||
ActiveSessionCandidate: isActive,
|
||||
})
|
||||
}
|
||||
|
||||
return registeredTowers, nil
|
||||
}
|
||||
|
||||
// LookupTower retrieves a registered watchtower through its public key.
|
||||
func (c *TowerClient) LookupTower(pubKey *btcec.PublicKey) (*RegisteredTower, error) {
|
||||
tower, err := c.cfg.DB.LoadTower(pubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
towerSessions, err := c.cfg.DB.ListClientSessions(&tower.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &RegisteredTower{
|
||||
Tower: tower,
|
||||
Sessions: towerSessions,
|
||||
ActiveSessionCandidate: c.candidateTowers.IsActive(tower.ID),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stats returns the in-memory statistics of the client since startup.
|
||||
func (c *TowerClient) Stats() ClientStats {
|
||||
return c.stats.Copy()
|
||||
}
|
||||
|
||||
// Policy returns the active client policy configuration.
|
||||
func (c *TowerClient) Policy() wtpolicy.Policy {
|
||||
return c.cfg.Policy
|
||||
}
|
||||
|
||||
// logMessage writes information about a message received from a remote peer,
|
||||
// using directional prepositions to signal whether the message was sent or
|
||||
// received.
|
||||
|
@ -26,7 +26,11 @@ import (
|
||||
"github.com/lightningnetwork/lnd/watchtower/wtserver"
|
||||
)
|
||||
|
||||
const csvDelay uint32 = 144
|
||||
const (
|
||||
csvDelay uint32 = 144
|
||||
|
||||
towerAddrStr = "18.28.243.2:9911"
|
||||
)
|
||||
|
||||
var (
|
||||
revPrivBytes = []byte{
|
||||
@ -387,7 +391,6 @@ type harnessCfg struct {
|
||||
}
|
||||
|
||||
func newHarness(t *testing.T, cfg harnessCfg) *testHarness {
|
||||
towerAddrStr := "18.28.243.2:9911"
|
||||
towerTCPAddr, err := net.ResolveTCPAddr("tcp", towerAddrStr)
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to resolve tower TCP addr: %v", err)
|
||||
@ -412,6 +415,7 @@ func newHarness(t *testing.T, cfg harnessCfg) *testHarness {
|
||||
DB: serverDB,
|
||||
ReadTimeout: timeout,
|
||||
WriteTimeout: timeout,
|
||||
NodePrivKey: privKey,
|
||||
NewAddress: func() (btcutil.Address, error) {
|
||||
return addr, nil
|
||||
},
|
||||
@ -435,7 +439,6 @@ func newHarness(t *testing.T, cfg harnessCfg) *testHarness {
|
||||
DB: clientDB,
|
||||
AuthDial: mockNet.AuthDial,
|
||||
SecretKeyRing: wtmock.NewSecretKeyRing(),
|
||||
PrivateTower: towerAddr,
|
||||
Policy: cfg.policy,
|
||||
NewAddress: func() ([]byte, error) {
|
||||
return addrScript, nil
|
||||
@ -458,6 +461,10 @@ func newHarness(t *testing.T, cfg harnessCfg) *testHarness {
|
||||
server.Stop()
|
||||
t.Fatalf("Unable to start wtclient: %v", err)
|
||||
}
|
||||
if err := client.AddTower(towerAddr); err != nil {
|
||||
server.Stop()
|
||||
t.Fatalf("Unable to add tower to wtclient: %v", err)
|
||||
}
|
||||
|
||||
h := &testHarness{
|
||||
t: t,
|
||||
@ -505,7 +512,15 @@ func (h *testHarness) startServer() {
|
||||
func (h *testHarness) startClient() {
|
||||
h.t.Helper()
|
||||
|
||||
var err error
|
||||
towerTCPAddr, err := net.ResolveTCPAddr("tcp", towerAddrStr)
|
||||
if err != nil {
|
||||
h.t.Fatalf("Unable to resolve tower TCP addr: %v", err)
|
||||
}
|
||||
towerAddr := &lnwire.NetAddress{
|
||||
IdentityKey: h.serverCfg.NodePrivKey.PubKey(),
|
||||
Address: towerTCPAddr,
|
||||
}
|
||||
|
||||
h.client, err = wtclient.New(h.clientCfg)
|
||||
if err != nil {
|
||||
h.t.Fatalf("unable to create wtclient: %v", err)
|
||||
@ -513,6 +528,9 @@ func (h *testHarness) startClient() {
|
||||
if err := h.client.Start(); err != nil {
|
||||
h.t.Fatalf("unable to start wtclient: %v", err)
|
||||
}
|
||||
if err := h.client.AddTower(towerAddr); err != nil {
|
||||
h.t.Fatalf("unable to add tower to wtclient: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// chanIDFromInt creates a unique channel id given a unique integral id.
|
||||
|
@ -17,11 +17,31 @@ type DB interface {
|
||||
// CreateTower initialize an address record used to communicate with a
|
||||
// watchtower. Each Tower is assigned a unique ID, that is used to
|
||||
// amortize storage costs of the public key when used by multiple
|
||||
// sessions.
|
||||
// sessions. If the tower already exists, the address is appended to the
|
||||
// list of all addresses used to that tower previously and its
|
||||
// corresponding sessions are marked as active.
|
||||
CreateTower(*lnwire.NetAddress) (*wtdb.Tower, error)
|
||||
|
||||
// LoadTower retrieves a tower by its tower ID.
|
||||
LoadTower(wtdb.TowerID) (*wtdb.Tower, error)
|
||||
// RemoveTower modifies a tower's record within the database. If an
|
||||
// address is provided, then _only_ the address record should be removed
|
||||
// from the tower's persisted state. Otherwise, we'll attempt to mark
|
||||
// the tower as inactive by marking all of its sessions inactive. If any
|
||||
// of its sessions has unacked updates, then ErrTowerUnackedUpdates is
|
||||
// returned. If the tower doesn't have any sessions at all, it'll be
|
||||
// completely removed from the database.
|
||||
//
|
||||
// NOTE: An error is not returned if the tower doesn't exist.
|
||||
RemoveTower(*btcec.PublicKey, net.Addr) error
|
||||
|
||||
// LoadTower retrieves a tower by its public key.
|
||||
LoadTower(*btcec.PublicKey) (*wtdb.Tower, error)
|
||||
|
||||
// LoadTowerByID retrieves a tower by its tower ID.
|
||||
LoadTowerByID(wtdb.TowerID) (*wtdb.Tower, error)
|
||||
|
||||
// ListTowers retrieves the list of towers available within the
|
||||
// database.
|
||||
ListTowers() ([]*wtdb.Tower, error)
|
||||
|
||||
// NextSessionKeyIndex reserves a new session key derivation index for a
|
||||
// particular tower id. The index is reserved for that tower until
|
||||
@ -38,8 +58,10 @@ type DB interface {
|
||||
|
||||
// ListClientSessions returns all sessions that have not yet been
|
||||
// exhausted. This is used on startup to find any sessions which may
|
||||
// still be able to accept state updates.
|
||||
ListClientSessions() (map[wtdb.SessionID]*wtdb.ClientSession, error)
|
||||
// still be able to accept state updates. An optional tower ID can be
|
||||
// used to filter out any client sessions in the response that do not
|
||||
// correspond to this tower.
|
||||
ListClientSessions(*wtdb.TowerID) (map[wtdb.SessionID]*wtdb.ClientSession, error)
|
||||
|
||||
// FetchChanSummaries loads a mapping from all registered channels to
|
||||
// their channel summaries.
|
||||
|
@ -240,9 +240,6 @@ retryWithBackoff:
|
||||
}
|
||||
}
|
||||
|
||||
// Before attempting a bout of session negotiation, reset the candidate
|
||||
// iterator to ensure the results are fresh.
|
||||
n.cfg.Candidates.Reset()
|
||||
for {
|
||||
select {
|
||||
case <-n.quit:
|
||||
@ -267,6 +264,13 @@ retryWithBackoff:
|
||||
log.Debugf("Unable to get new tower candidate, "+
|
||||
"retrying after %v -- reason: %v", backoff, err)
|
||||
|
||||
// Only reset the iterator once we've exhausted all
|
||||
// candidates. Doing so allows us to load balance
|
||||
// sessions better amongst all of the tower candidates.
|
||||
if err == ErrTowerCandidatesExhausted {
|
||||
n.cfg.Candidates.Reset()
|
||||
}
|
||||
|
||||
goto retryWithBackoff
|
||||
}
|
||||
|
||||
|
@ -145,9 +145,6 @@ func newSessionQueue(cfg *sessionQueueConfig) *sessionQueue {
|
||||
// backups.
|
||||
func (q *sessionQueue) Start() {
|
||||
q.started.Do(func() {
|
||||
// TODO(conner): load prior committed state updates from disk an
|
||||
// populate in queue.
|
||||
|
||||
go q.sessionManager()
|
||||
})
|
||||
}
|
||||
|
@ -1,51 +1,97 @@
|
||||
package wtclient
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type clientStats struct {
|
||||
numTasksReceived int
|
||||
numTasksAccepted int
|
||||
numTasksIneligible int
|
||||
numSessionsAcquired int
|
||||
numSessionsExhausted int
|
||||
// ClientStats is a collection of in-memory statistics of the actions the client
|
||||
// has performed since its creation.
|
||||
type ClientStats struct {
|
||||
mu sync.Mutex
|
||||
|
||||
// NumTasksReceived is the total number of backups that are pending to
|
||||
// be acknowledged by all active and exhausted watchtower sessions.
|
||||
NumTasksReceived int
|
||||
|
||||
// NumTasksAccepted is the total number of backups made to all active
|
||||
// and exhausted watchtower sessions.
|
||||
NumTasksAccepted int
|
||||
|
||||
// NumTasksIneligible is the total number of backups that all active and
|
||||
// exhausted watchtower sessions have failed to acknowledge.
|
||||
NumTasksIneligible int
|
||||
|
||||
// NumSessionsAcquired is the total number of new sessions made to
|
||||
// watchtowers.
|
||||
NumSessionsAcquired int
|
||||
|
||||
// NumSessionsExhausted is the total number of watchtower sessions that
|
||||
// have been exhausted.
|
||||
NumSessionsExhausted int
|
||||
}
|
||||
|
||||
// taskReceived increments the number to backup requests the client has received
|
||||
// from active channels.
|
||||
func (s *clientStats) taskReceived() {
|
||||
s.numTasksReceived++
|
||||
func (s *ClientStats) taskReceived() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.NumTasksReceived++
|
||||
}
|
||||
|
||||
// taskAccepted increments the number of tasks that have been assigned to active
|
||||
// session queues, and are awaiting upload to a tower.
|
||||
func (s *clientStats) taskAccepted() {
|
||||
s.numTasksAccepted++
|
||||
func (s *ClientStats) taskAccepted() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.NumTasksAccepted++
|
||||
}
|
||||
|
||||
// taskIneligible increments the number of tasks that were unable to satisfy the
|
||||
// active session queue's policy. These can potentially be retried later, but
|
||||
// typically this means that the balance created dust outputs, so it may not be
|
||||
// worth backing up at all.
|
||||
func (s *clientStats) taskIneligible() {
|
||||
s.numTasksIneligible++
|
||||
func (s *ClientStats) taskIneligible() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.NumTasksIneligible++
|
||||
}
|
||||
|
||||
// sessionAcquired increments the number of sessions that have been successfully
|
||||
// negotiated by the client during this execution.
|
||||
func (s *clientStats) sessionAcquired() {
|
||||
s.numSessionsAcquired++
|
||||
func (s *ClientStats) sessionAcquired() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.NumSessionsAcquired++
|
||||
}
|
||||
|
||||
// sessionExhausted increments the number of session that have become full as a
|
||||
// result of accepting backup tasks.
|
||||
func (s *clientStats) sessionExhausted() {
|
||||
s.numSessionsExhausted++
|
||||
func (s *ClientStats) sessionExhausted() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.NumSessionsExhausted++
|
||||
}
|
||||
|
||||
// String returns a human readable summary of the client's metrics.
|
||||
func (s clientStats) String() string {
|
||||
func (s *ClientStats) String() string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return fmt.Sprintf("tasks(received=%d accepted=%d ineligible=%d) "+
|
||||
"sessions(acquired=%d exhausted=%d)", s.numTasksReceived,
|
||||
s.numTasksAccepted, s.numTasksIneligible, s.numSessionsAcquired,
|
||||
s.numSessionsExhausted)
|
||||
"sessions(acquired=%d exhausted=%d)", s.NumTasksReceived,
|
||||
s.NumTasksAccepted, s.NumTasksIneligible, s.NumSessionsAcquired,
|
||||
s.NumSessionsExhausted)
|
||||
}
|
||||
|
||||
// Copy returns a copy of the current stats.
|
||||
func (s *ClientStats) Copy() ClientStats {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return ClientStats{
|
||||
NumTasksReceived: s.NumTasksReceived,
|
||||
NumTasksAccepted: s.NumTasksAccepted,
|
||||
NumTasksIneligible: s.NumTasksIneligible,
|
||||
NumSessionsAcquired: s.NumSessionsAcquired,
|
||||
NumSessionsExhausted: s.NumSessionsExhausted,
|
||||
}
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"math"
|
||||
"net"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/coreos/bbolt"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
)
|
||||
@ -55,6 +56,11 @@ var (
|
||||
// database.
|
||||
ErrTowerNotFound = errors.New("tower not found")
|
||||
|
||||
// ErrTowerUnackedUpdates is an error returned when we attempt to mark a
|
||||
// tower's sessions as inactive, but one of its sessions has unacked
|
||||
// updates.
|
||||
ErrTowerUnackedUpdates = errors.New("tower has unacked updates")
|
||||
|
||||
// ErrCorruptClientSession signals that the client session's on-disk
|
||||
// structure deviates from what is expected.
|
||||
ErrCorruptClientSession = errors.New("client session corrupted")
|
||||
@ -199,9 +205,11 @@ func (c *ClientDB) Close() error {
|
||||
return c.db.Close()
|
||||
}
|
||||
|
||||
// CreateTower initializes a database entry with the given lightning address. If
|
||||
// the tower exists, the address is append to the list of all addresses used to
|
||||
// that tower previously.
|
||||
// CreateTower initialize an address record used to communicate with a
|
||||
// watchtower. Each Tower is assigned a unique ID, that is used to amortize
|
||||
// storage costs of the public key when used by multiple sessions. If the tower
|
||||
// already exists, the address is appended to the list of all addresses used to
|
||||
// that tower previously and its corresponding sessions are marked as active.
|
||||
func (c *ClientDB) CreateTower(lnAddr *lnwire.NetAddress) (*Tower, error) {
|
||||
var towerPubKey [33]byte
|
||||
copy(towerPubKey[:], lnAddr.IdentityKey.SerializeCompressed())
|
||||
@ -233,6 +241,32 @@ func (c *ClientDB) CreateTower(lnAddr *lnwire.NetAddress) (*Tower, error) {
|
||||
// address is a duplicate, this will result in no
|
||||
// change.
|
||||
tower.AddAddress(lnAddr.Address)
|
||||
|
||||
// If there are any client sessions that correspond to
|
||||
// this tower, we'll mark them as active to ensure we
|
||||
// load them upon restarts.
|
||||
//
|
||||
// TODO(wilmer): with an index of tower -> sessions we
|
||||
// can avoid the linear lookup.
|
||||
sessions := tx.Bucket(cSessionBkt)
|
||||
if sessions == nil {
|
||||
return ErrUninitializedDB
|
||||
}
|
||||
towerID := TowerIDFromBytes(towerIDBytes)
|
||||
towerSessions, err := listClientSessions(
|
||||
sessions, &towerID,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, session := range towerSessions {
|
||||
err := markSessionStatus(
|
||||
sessions, session, CSessionActive,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No such tower exists, create a new tower id for our
|
||||
// new tower. The error is unhandled since NextSequence
|
||||
@ -265,8 +299,89 @@ func (c *ClientDB) CreateTower(lnAddr *lnwire.NetAddress) (*Tower, error) {
|
||||
return tower, nil
|
||||
}
|
||||
|
||||
// LoadTower retrieves a tower by its tower ID.
|
||||
func (c *ClientDB) LoadTower(towerID TowerID) (*Tower, error) {
|
||||
// RemoveTower modifies a tower's record within the database. If an address is
|
||||
// provided, then _only_ the address record should be removed from the tower's
|
||||
// persisted state. Otherwise, we'll attempt to mark the tower as inactive by
|
||||
// marking all of its sessions inactive. If any of its sessions has unacked
|
||||
// updates, then ErrTowerUnackedUpdates is returned. If the tower doesn't have
|
||||
// any sessions at all, it'll be completely removed from the database.
|
||||
//
|
||||
// NOTE: An error is not returned if the tower doesn't exist.
|
||||
func (c *ClientDB) RemoveTower(pubKey *btcec.PublicKey, addr net.Addr) error {
|
||||
return c.db.Update(func(tx *bbolt.Tx) error {
|
||||
towers := tx.Bucket(cTowerBkt)
|
||||
if towers == nil {
|
||||
return ErrUninitializedDB
|
||||
}
|
||||
towerIndex := tx.Bucket(cTowerIndexBkt)
|
||||
if towerIndex == nil {
|
||||
return ErrUninitializedDB
|
||||
}
|
||||
|
||||
// Don't return an error if the watchtower doesn't exist to act
|
||||
// as a NOP.
|
||||
pubKeyBytes := pubKey.SerializeCompressed()
|
||||
towerIDBytes := towerIndex.Get(pubKeyBytes)
|
||||
if towerIDBytes == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If an address is provided, then we should _only_ remove the
|
||||
// address record from the database.
|
||||
if addr != nil {
|
||||
tower, err := getTower(towers, towerIDBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tower.RemoveAddress(addr)
|
||||
return putTower(towers, tower)
|
||||
}
|
||||
|
||||
// Otherwise, we should attempt to mark the tower's sessions as
|
||||
// inactive.
|
||||
//
|
||||
// TODO(wilmer): with an index of tower -> sessions we can avoid
|
||||
// the linear lookup.
|
||||
sessions := tx.Bucket(cSessionBkt)
|
||||
if sessions == nil {
|
||||
return ErrUninitializedDB
|
||||
}
|
||||
towerID := TowerIDFromBytes(towerIDBytes)
|
||||
towerSessions, err := listClientSessions(sessions, &towerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If it doesn't have any, we can completely remove it from the
|
||||
// database.
|
||||
if len(towerSessions) == 0 {
|
||||
if err := towerIndex.Delete(pubKeyBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
return towers.Delete(towerIDBytes)
|
||||
}
|
||||
|
||||
// We'll mark its sessions as inactive as long as they don't
|
||||
// have any pending updates to ensure we don't load them upon
|
||||
// restarts.
|
||||
for _, session := range towerSessions {
|
||||
if len(session.CommittedUpdates) > 0 {
|
||||
return ErrTowerUnackedUpdates
|
||||
}
|
||||
err := markSessionStatus(
|
||||
sessions, session, CSessionInactive,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// LoadTowerByID retrieves a tower by its tower ID.
|
||||
func (c *ClientDB) LoadTowerByID(towerID TowerID) (*Tower, error) {
|
||||
var tower *Tower
|
||||
err := c.db.View(func(tx *bbolt.Tx) error {
|
||||
towers := tx.Bucket(cTowerBkt)
|
||||
@ -285,6 +400,60 @@ func (c *ClientDB) LoadTower(towerID TowerID) (*Tower, error) {
|
||||
return tower, nil
|
||||
}
|
||||
|
||||
// LoadTower retrieves a tower by its public key.
|
||||
func (c *ClientDB) LoadTower(pubKey *btcec.PublicKey) (*Tower, error) {
|
||||
var tower *Tower
|
||||
err := c.db.View(func(tx *bbolt.Tx) error {
|
||||
towers := tx.Bucket(cTowerBkt)
|
||||
if towers == nil {
|
||||
return ErrUninitializedDB
|
||||
}
|
||||
towerIndex := tx.Bucket(cTowerIndexBkt)
|
||||
if towerIndex == nil {
|
||||
return ErrUninitializedDB
|
||||
}
|
||||
|
||||
towerIDBytes := towerIndex.Get(pubKey.SerializeCompressed())
|
||||
if towerIDBytes == nil {
|
||||
return ErrTowerNotFound
|
||||
}
|
||||
|
||||
var err error
|
||||
tower, err = getTower(towers, towerIDBytes)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return tower, nil
|
||||
}
|
||||
|
||||
// ListTowers retrieves the list of towers available within the database.
|
||||
func (c *ClientDB) ListTowers() ([]*Tower, error) {
|
||||
var towers []*Tower
|
||||
err := c.db.View(func(tx *bbolt.Tx) error {
|
||||
towerBucket := tx.Bucket(cTowerBkt)
|
||||
if towerBucket == nil {
|
||||
return ErrUninitializedDB
|
||||
}
|
||||
|
||||
return towerBucket.ForEach(func(towerIDBytes, _ []byte) error {
|
||||
tower, err := getTower(towerBucket, towerIDBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
towers = append(towers, tower)
|
||||
return nil
|
||||
})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return towers, nil
|
||||
}
|
||||
|
||||
// NextSessionKeyIndex reserves a new session key derivation index for a
|
||||
// particular tower id. The index is reserved for that tower until
|
||||
// CreateClientSession is invoked for that tower and index, at which point a new
|
||||
@ -384,29 +553,53 @@ func (c *ClientDB) CreateClientSession(session *ClientSession) error {
|
||||
})
|
||||
}
|
||||
|
||||
// ListClientSessions returns the set of all client sessions known to the db.
|
||||
func (c *ClientDB) ListClientSessions() (map[SessionID]*ClientSession, error) {
|
||||
clientSessions := make(map[SessionID]*ClientSession)
|
||||
// ListClientSessions returns the set of all client sessions known to the db. An
|
||||
// optional tower ID can be used to filter out any client sessions in the
|
||||
// response that do not correspond to this tower.
|
||||
func (c *ClientDB) ListClientSessions(id *TowerID) (map[SessionID]*ClientSession, error) {
|
||||
var clientSessions map[SessionID]*ClientSession
|
||||
err := c.db.View(func(tx *bbolt.Tx) error {
|
||||
sessions := tx.Bucket(cSessionBkt)
|
||||
if sessions == nil {
|
||||
return ErrUninitializedDB
|
||||
}
|
||||
var err error
|
||||
clientSessions, err = listClientSessions(sessions, id)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sessions.ForEach(func(k, _ []byte) error {
|
||||
// We'll load the full client session since the client
|
||||
// will need the CommittedUpdates and AckedUpdates on
|
||||
// startup to resume committed updates and compute the
|
||||
// highest known commit height for each channel.
|
||||
session, err := getClientSession(sessions, k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return clientSessions, nil
|
||||
}
|
||||
|
||||
clientSessions[session.ID] = session
|
||||
// listClientSessions returns the set of all client sessions known to the db. An
|
||||
// optional tower ID can be used to filter out any client sessions in the
|
||||
// response that do not correspond to this tower.
|
||||
func listClientSessions(sessions *bbolt.Bucket,
|
||||
id *TowerID) (map[SessionID]*ClientSession, error) {
|
||||
|
||||
clientSessions := make(map[SessionID]*ClientSession)
|
||||
err := sessions.ForEach(func(k, _ []byte) error {
|
||||
// We'll load the full client session since the client will need
|
||||
// the CommittedUpdates and AckedUpdates on startup to resume
|
||||
// committed updates and compute the highest known commit height
|
||||
// for each channel.
|
||||
session, err := getClientSession(sessions, k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter out any sessions that don't correspond to the given
|
||||
// tower if one was set.
|
||||
if id != nil && session.TowerID != *id {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
clientSessions[session.ID] = session
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -847,6 +1040,15 @@ func putClientSessionBody(sessions *bbolt.Bucket,
|
||||
return sessionBkt.Put(cSessionBody, b.Bytes())
|
||||
}
|
||||
|
||||
// markSessionStatus updates the persisted state of the session to the new
|
||||
// status.
|
||||
func markSessionStatus(sessions *bbolt.Bucket, session *ClientSession,
|
||||
status CSessionStatus) error {
|
||||
|
||||
session.Status = status
|
||||
return putClientSessionBody(sessions, session)
|
||||
}
|
||||
|
||||
// getChanSummary loads a ClientChanSummary for the passed chanID.
|
||||
func getChanSummary(chanSummaries *bbolt.Bucket,
|
||||
chanID lnwire.ChannelID) (*ClientChanSummary, error) {
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/watchtower/blob"
|
||||
"github.com/lightningnetwork/lnd/watchtower/wtclient"
|
||||
@ -48,10 +49,10 @@ func (h *clientDBHarness) insertSession(session *wtdb.ClientSession, expErr erro
|
||||
}
|
||||
}
|
||||
|
||||
func (h *clientDBHarness) listSessions() map[wtdb.SessionID]*wtdb.ClientSession {
|
||||
func (h *clientDBHarness) listSessions(id *wtdb.TowerID) map[wtdb.SessionID]*wtdb.ClientSession {
|
||||
h.t.Helper()
|
||||
|
||||
sessions, err := h.db.ListClientSessions()
|
||||
sessions, err := h.db.ListClientSessions(id)
|
||||
if err != nil {
|
||||
h.t.Fatalf("unable to list client sessions: %v", err)
|
||||
}
|
||||
@ -89,13 +90,81 @@ func (h *clientDBHarness) createTower(lnAddr *lnwire.NetAddress,
|
||||
h.t.Fatalf("tower id should never be 0")
|
||||
}
|
||||
|
||||
for _, session := range h.listSessions(&tower.ID) {
|
||||
if session.Status != wtdb.CSessionActive {
|
||||
h.t.Fatalf("expected status for session %v to be %v, "+
|
||||
"got %v", session.ID, wtdb.CSessionActive,
|
||||
session.Status)
|
||||
}
|
||||
}
|
||||
|
||||
return tower
|
||||
}
|
||||
|
||||
func (h *clientDBHarness) loadTower(id wtdb.TowerID, expErr error) *wtdb.Tower {
|
||||
func (h *clientDBHarness) removeTower(pubKey *btcec.PublicKey, addr net.Addr,
|
||||
hasSessions bool, expErr error) {
|
||||
|
||||
h.t.Helper()
|
||||
|
||||
tower, err := h.db.LoadTower(id)
|
||||
if err := h.db.RemoveTower(pubKey, addr); err != expErr {
|
||||
h.t.Fatalf("expected remove tower error: %v, got %v", expErr, err)
|
||||
}
|
||||
if expErr != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if addr != nil {
|
||||
tower, err := h.db.LoadTower(pubKey)
|
||||
if err != nil {
|
||||
h.t.Fatalf("expected tower %x to still exist",
|
||||
pubKey.SerializeCompressed())
|
||||
}
|
||||
|
||||
removedAddr := addr.String()
|
||||
for _, towerAddr := range tower.Addresses {
|
||||
if towerAddr.String() == removedAddr {
|
||||
h.t.Fatalf("address %v not removed for tower %x",
|
||||
removedAddr, pubKey.SerializeCompressed())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tower, err := h.db.LoadTower(pubKey)
|
||||
if hasSessions && err != nil {
|
||||
h.t.Fatalf("expected tower %x with sessions to still "+
|
||||
"exist", pubKey.SerializeCompressed())
|
||||
}
|
||||
if !hasSessions && err == nil {
|
||||
h.t.Fatalf("expected tower %x with no sessions to not "+
|
||||
"exist", pubKey.SerializeCompressed())
|
||||
}
|
||||
if !hasSessions {
|
||||
return
|
||||
}
|
||||
for _, session := range h.listSessions(&tower.ID) {
|
||||
if session.Status != wtdb.CSessionInactive {
|
||||
h.t.Fatalf("expected status for session %v to "+
|
||||
"be %v, got %v", session.ID,
|
||||
wtdb.CSessionInactive, session.Status)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *clientDBHarness) loadTower(pubKey *btcec.PublicKey, expErr error) *wtdb.Tower {
|
||||
h.t.Helper()
|
||||
|
||||
tower, err := h.db.LoadTower(pubKey)
|
||||
if err != expErr {
|
||||
h.t.Fatalf("expected load tower error: %v, got: %v", expErr, err)
|
||||
}
|
||||
|
||||
return tower
|
||||
}
|
||||
|
||||
func (h *clientDBHarness) loadTowerByID(id wtdb.TowerID, expErr error) *wtdb.Tower {
|
||||
h.t.Helper()
|
||||
|
||||
tower, err := h.db.LoadTowerByID(id)
|
||||
if err != expErr {
|
||||
h.t.Fatalf("expected load tower error: %v, got: %v", expErr, err)
|
||||
}
|
||||
@ -172,7 +241,7 @@ func testCreateClientSession(h *clientDBHarness) {
|
||||
|
||||
// First, assert that this session is not already present in the
|
||||
// database.
|
||||
if _, ok := h.listSessions()[session.ID]; ok {
|
||||
if _, ok := h.listSessions(nil)[session.ID]; ok {
|
||||
h.t.Fatalf("session for id %x should not exist yet", session.ID)
|
||||
}
|
||||
|
||||
@ -202,7 +271,7 @@ func testCreateClientSession(h *clientDBHarness) {
|
||||
h.insertSession(session, nil)
|
||||
|
||||
// Verify that the session now exists in the database.
|
||||
if _, ok := h.listSessions()[session.ID]; !ok {
|
||||
if _, ok := h.listSessions(nil)[session.ID]; !ok {
|
||||
h.t.Fatalf("session for id %x should exist now", session.ID)
|
||||
}
|
||||
|
||||
@ -218,12 +287,57 @@ func testCreateClientSession(h *clientDBHarness) {
|
||||
}
|
||||
}
|
||||
|
||||
// testFilterClientSessions asserts that we can correctly filter client sessions
|
||||
// for a specific tower.
|
||||
func testFilterClientSessions(h *clientDBHarness) {
|
||||
// We'll create three client sessions, the first two belonging to one
|
||||
// tower, and the last belonging to another one.
|
||||
const numSessions = 3
|
||||
towerSessions := make(map[wtdb.TowerID][]wtdb.SessionID)
|
||||
for i := 0; i < numSessions; i++ {
|
||||
towerID := wtdb.TowerID(1)
|
||||
if i == numSessions-1 {
|
||||
towerID = wtdb.TowerID(2)
|
||||
}
|
||||
keyIndex := h.nextKeyIndex(towerID, nil)
|
||||
sessionID := wtdb.SessionID([33]byte{byte(i)})
|
||||
h.insertSession(&wtdb.ClientSession{
|
||||
ClientSessionBody: wtdb.ClientSessionBody{
|
||||
TowerID: towerID,
|
||||
Policy: wtpolicy.Policy{
|
||||
MaxUpdates: 100,
|
||||
},
|
||||
RewardPkScript: []byte{0x01, 0x02, 0x03},
|
||||
KeyIndex: keyIndex,
|
||||
},
|
||||
ID: sessionID,
|
||||
}, nil)
|
||||
towerSessions[towerID] = append(towerSessions[towerID], sessionID)
|
||||
}
|
||||
|
||||
// We should see the expected sessions for each tower when filtering
|
||||
// them.
|
||||
for towerID, expectedSessions := range towerSessions {
|
||||
sessions := h.listSessions(&towerID)
|
||||
if len(sessions) != len(expectedSessions) {
|
||||
h.t.Fatalf("expected %v sessions for tower %v, got %v",
|
||||
len(expectedSessions), towerID, len(sessions))
|
||||
}
|
||||
for _, expectedSession := range expectedSessions {
|
||||
if _, ok := sessions[expectedSession]; !ok {
|
||||
h.t.Fatalf("expected session %v for tower %v",
|
||||
expectedSession, towerID)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// testCreateTower asserts the behavior of creating new Tower objects within the
|
||||
// database, and that the latest address is always prepended to the list of
|
||||
// known addresses for the tower.
|
||||
func testCreateTower(h *clientDBHarness) {
|
||||
// Test that loading a tower with an arbitrary tower id fails.
|
||||
h.loadTower(20, wtdb.ErrTowerNotFound)
|
||||
h.loadTowerByID(20, wtdb.ErrTowerNotFound)
|
||||
|
||||
pk, err := randPubKey()
|
||||
if err != nil {
|
||||
@ -241,7 +355,12 @@ func testCreateTower(h *clientDBHarness) {
|
||||
|
||||
// Load the tower from the database and assert that it matches the tower
|
||||
// we created.
|
||||
tower2 := h.loadTower(tower.ID, nil)
|
||||
tower2 := h.loadTowerByID(tower.ID, nil)
|
||||
if !reflect.DeepEqual(tower, tower2) {
|
||||
h.t.Fatalf("loaded tower mismatch, want: %v, got: %v",
|
||||
tower, tower2)
|
||||
}
|
||||
tower2 = h.loadTower(pk, err)
|
||||
if !reflect.DeepEqual(tower, tower2) {
|
||||
h.t.Fatalf("loaded tower mismatch, want: %v, got: %v",
|
||||
tower, tower2)
|
||||
@ -272,7 +391,12 @@ func testCreateTower(h *clientDBHarness) {
|
||||
|
||||
// Load the tower from the database, and assert that it matches the
|
||||
// tower returned from creation.
|
||||
towerNewAddr2 := h.loadTower(tower.ID, nil)
|
||||
towerNewAddr2 := h.loadTowerByID(tower.ID, nil)
|
||||
if !reflect.DeepEqual(towerNewAddr, towerNewAddr2) {
|
||||
h.t.Fatalf("loaded tower mismatch, want: %v, got: %v",
|
||||
towerNewAddr, towerNewAddr2)
|
||||
}
|
||||
towerNewAddr2 = h.loadTower(pk, nil)
|
||||
if !reflect.DeepEqual(towerNewAddr, towerNewAddr2) {
|
||||
h.t.Fatalf("loaded tower mismatch, want: %v, got: %v",
|
||||
towerNewAddr, towerNewAddr2)
|
||||
@ -290,6 +414,82 @@ func testCreateTower(h *clientDBHarness) {
|
||||
}
|
||||
}
|
||||
|
||||
// testRemoveTower asserts the behavior of removing Tower objects as a whole and
|
||||
// removing addresses from Tower objects within the database.
|
||||
func testRemoveTower(h *clientDBHarness) {
|
||||
// Generate a random public key we'll use for our tower.
|
||||
pk, err := randPubKey()
|
||||
if err != nil {
|
||||
h.t.Fatalf("unable to generate pubkey: %v", err)
|
||||
}
|
||||
|
||||
// Removing a tower that does not exist within the database should
|
||||
// result in a NOP.
|
||||
h.removeTower(pk, nil, false, nil)
|
||||
|
||||
// We'll create a tower with two addresses.
|
||||
addr1 := &net.TCPAddr{IP: []byte{0x01, 0x00, 0x00, 0x00}, Port: 9911}
|
||||
addr2 := &net.TCPAddr{IP: []byte{0x02, 0x00, 0x00, 0x00}, Port: 9911}
|
||||
h.createTower(&lnwire.NetAddress{
|
||||
IdentityKey: pk,
|
||||
Address: addr1,
|
||||
}, nil)
|
||||
h.createTower(&lnwire.NetAddress{
|
||||
IdentityKey: pk,
|
||||
Address: addr2,
|
||||
}, nil)
|
||||
|
||||
// We'll then remove the second address. We should now only see the
|
||||
// first.
|
||||
h.removeTower(pk, addr2, false, nil)
|
||||
|
||||
// We'll then remove the first address. We should now see that the tower
|
||||
// has no addresses left.
|
||||
h.removeTower(pk, addr1, false, nil)
|
||||
|
||||
// Removing the tower as a whole from the database should succeed since
|
||||
// there aren't any active sessions for it.
|
||||
h.removeTower(pk, nil, false, nil)
|
||||
|
||||
// We'll then recreate the tower, but this time we'll create a session
|
||||
// for it.
|
||||
tower := h.createTower(&lnwire.NetAddress{
|
||||
IdentityKey: pk,
|
||||
Address: addr1,
|
||||
}, nil)
|
||||
|
||||
session := &wtdb.ClientSession{
|
||||
ClientSessionBody: wtdb.ClientSessionBody{
|
||||
TowerID: tower.ID,
|
||||
Policy: wtpolicy.Policy{
|
||||
MaxUpdates: 100,
|
||||
},
|
||||
RewardPkScript: []byte{0x01, 0x02, 0x03},
|
||||
KeyIndex: h.nextKeyIndex(tower.ID, nil),
|
||||
},
|
||||
ID: wtdb.SessionID([33]byte{0x01}),
|
||||
}
|
||||
h.insertSession(session, nil)
|
||||
update := randCommittedUpdate(h.t, 1)
|
||||
h.commitUpdate(&session.ID, update, nil)
|
||||
|
||||
// We should not be able to fully remove it from the database since
|
||||
// there's a session and it has unacked updates.
|
||||
h.removeTower(pk, nil, true, wtdb.ErrTowerUnackedUpdates)
|
||||
|
||||
// Removing the tower after all sessions no longer have unacked updates
|
||||
// should result in the sessions becoming inactive.
|
||||
h.ackUpdate(&session.ID, 1, 1, nil)
|
||||
h.removeTower(pk, nil, true, nil)
|
||||
|
||||
// Creating the tower again should mark all of the sessions active once
|
||||
// again.
|
||||
h.createTower(&lnwire.NetAddress{
|
||||
IdentityKey: pk,
|
||||
Address: addr1,
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// testChanSummaries tests the process of a registering a channel and its
|
||||
// associated sweep pkscript.
|
||||
func testChanSummaries(h *clientDBHarness) {
|
||||
@ -357,7 +557,7 @@ func testCommitUpdate(h *clientDBHarness) {
|
||||
// Assert that the committed update appears in the client session's
|
||||
// CommittedUpdates map when loaded from disk and that there are no
|
||||
// AckedUpdates.
|
||||
dbSession := h.listSessions()[session.ID]
|
||||
dbSession := h.listSessions(nil)[session.ID]
|
||||
checkCommittedUpdates(h.t, dbSession, []wtdb.CommittedUpdate{
|
||||
*update1,
|
||||
})
|
||||
@ -374,7 +574,7 @@ func testCommitUpdate(h *clientDBHarness) {
|
||||
}
|
||||
|
||||
// Assert that the loaded ClientSession is the same as before.
|
||||
dbSession = h.listSessions()[session.ID]
|
||||
dbSession = h.listSessions(nil)[session.ID]
|
||||
checkCommittedUpdates(h.t, dbSession, []wtdb.CommittedUpdate{
|
||||
*update1,
|
||||
})
|
||||
@ -396,7 +596,7 @@ func testCommitUpdate(h *clientDBHarness) {
|
||||
|
||||
// Check that both updates now appear as committed on the ClientSession
|
||||
// loaded from disk.
|
||||
dbSession = h.listSessions()[session.ID]
|
||||
dbSession = h.listSessions(nil)[session.ID]
|
||||
checkCommittedUpdates(h.t, dbSession, []wtdb.CommittedUpdate{
|
||||
*update1,
|
||||
*update2,
|
||||
@ -410,7 +610,7 @@ func testCommitUpdate(h *clientDBHarness) {
|
||||
h.commitUpdate(&session.ID, update4, wtdb.ErrCommitUnorderedUpdate)
|
||||
|
||||
// Assert that the ClientSession loaded from disk remains unchanged.
|
||||
dbSession = h.listSessions()[session.ID]
|
||||
dbSession = h.listSessions(nil)[session.ID]
|
||||
checkCommittedUpdates(h.t, dbSession, []wtdb.CommittedUpdate{
|
||||
*update1,
|
||||
*update2,
|
||||
@ -467,7 +667,7 @@ func testAckUpdate(h *clientDBHarness) {
|
||||
|
||||
// Assert that the ClientSession loaded from disk has one update in it's
|
||||
// AckedUpdates map, and that the committed update has been removed.
|
||||
dbSession := h.listSessions()[session.ID]
|
||||
dbSession := h.listSessions(nil)[session.ID]
|
||||
checkCommittedUpdates(h.t, dbSession, nil)
|
||||
checkAckedUpdates(h.t, dbSession, map[uint16]wtdb.BackupID{
|
||||
1: update1.BackupID,
|
||||
@ -487,7 +687,7 @@ func testAckUpdate(h *clientDBHarness) {
|
||||
h.ackUpdate(&session.ID, 2, 2, nil)
|
||||
|
||||
// Assert that both updates exist as AckedUpdates when loaded from disk.
|
||||
dbSession = h.listSessions()[session.ID]
|
||||
dbSession = h.listSessions(nil)[session.ID]
|
||||
checkCommittedUpdates(h.t, dbSession, nil)
|
||||
checkAckedUpdates(h.t, dbSession, map[uint16]wtdb.BackupID{
|
||||
1: update1.BackupID,
|
||||
@ -620,10 +820,18 @@ func TestClientDB(t *testing.T) {
|
||||
name: "create client session",
|
||||
run: testCreateClientSession,
|
||||
},
|
||||
{
|
||||
name: "filter client sessions",
|
||||
run: testFilterClientSessions,
|
||||
},
|
||||
{
|
||||
name: "create tower",
|
||||
run: testCreateTower,
|
||||
},
|
||||
{
|
||||
name: "remove tower",
|
||||
run: testRemoveTower,
|
||||
},
|
||||
{
|
||||
name: "chan summaries",
|
||||
run: testChanSummaries,
|
||||
|
@ -18,6 +18,10 @@ const (
|
||||
// CSessionActive indicates that the ClientSession is active and can be
|
||||
// used for backups.
|
||||
CSessionActive CSessionStatus = 0
|
||||
|
||||
// CSessionInactive indicates that the ClientSession is inactive and
|
||||
// cannot be used for backups.
|
||||
CSessionInactive CSessionStatus = 1
|
||||
)
|
||||
|
||||
// ClientSession encapsulates a SessionInfo returned from a successful
|
||||
|
@ -1,6 +1,8 @@
|
||||
package wtdb
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
@ -62,6 +64,19 @@ func (t *Tower) AddAddress(addr net.Addr) {
|
||||
t.Addresses = append([]net.Addr{addr}, t.Addresses...)
|
||||
}
|
||||
|
||||
// RemoveAddress removes the given address from the tower's in-memory list of
|
||||
// addresses. If the address doesn't exist, then this will act as a NOP.
|
||||
func (t *Tower) RemoveAddress(addr net.Addr) {
|
||||
addrStr := addr.String()
|
||||
for i, address := range t.Addresses {
|
||||
if address.String() != addrStr {
|
||||
continue
|
||||
}
|
||||
t.Addresses = append(t.Addresses[:i], t.Addresses[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// LNAddrs generates a list of lnwire.NetAddress from a Tower instance's
|
||||
// addresses. This can be used to have a client try multiple addresses for the
|
||||
// same Tower.
|
||||
@ -79,6 +94,15 @@ func (t *Tower) LNAddrs() []*lnwire.NetAddress {
|
||||
return addrs
|
||||
}
|
||||
|
||||
// String returns a user-friendly identifier of the tower.
|
||||
func (t *Tower) String() string {
|
||||
pubKey := hex.EncodeToString(t.IdentityKey.SerializeCompressed())
|
||||
if len(t.Addresses) == 0 {
|
||||
return pubKey
|
||||
}
|
||||
return fmt.Sprintf("%v@%v", pubKey, t.Addresses[0])
|
||||
}
|
||||
|
||||
// Encode writes the Tower to the passed io.Writer. The TowerID is not
|
||||
// serialized, since it acts as the key.
|
||||
func (t *Tower) Encode(w io.Writer) error {
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/watchtower/wtdb"
|
||||
)
|
||||
@ -37,9 +38,11 @@ func NewClientDB() *ClientDB {
|
||||
}
|
||||
}
|
||||
|
||||
// CreateTower initializes a database entry with the given lightning address. If
|
||||
// the tower exists, the address is append to the list of all addresses used to
|
||||
// that tower previously.
|
||||
// CreateTower initialize an address record used to communicate with a
|
||||
// watchtower. Each Tower is assigned a unique ID, that is used to amortize
|
||||
// storage costs of the public key when used by multiple sessions. If the tower
|
||||
// already exists, the address is appended to the list of all addresses used to
|
||||
// that tower previously and its corresponding sessions are marked as active.
|
||||
func (m *ClientDB) CreateTower(lnAddr *lnwire.NetAddress) (*wtdb.Tower, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
@ -52,6 +55,15 @@ func (m *ClientDB) CreateTower(lnAddr *lnwire.NetAddress) (*wtdb.Tower, error) {
|
||||
if ok {
|
||||
tower = m.towers[towerID]
|
||||
tower.AddAddress(lnAddr.Address)
|
||||
|
||||
towerSessions, err := m.listClientSessions(&towerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for id, session := range towerSessions {
|
||||
session.Status = wtdb.CSessionActive
|
||||
m.activeSessions[id] = session
|
||||
}
|
||||
} else {
|
||||
towerID = wtdb.TowerID(atomic.AddUint64(&m.nextTowerID, 1))
|
||||
tower = &wtdb.Tower{
|
||||
@ -67,8 +79,83 @@ func (m *ClientDB) CreateTower(lnAddr *lnwire.NetAddress) (*wtdb.Tower, error) {
|
||||
return copyTower(tower), nil
|
||||
}
|
||||
|
||||
// LoadTower retrieves a tower by its tower ID.
|
||||
func (m *ClientDB) LoadTower(towerID wtdb.TowerID) (*wtdb.Tower, error) {
|
||||
// RemoveTower modifies a tower's record within the database. If an address is
|
||||
// provided, then _only_ the address record should be removed from the tower's
|
||||
// persisted state. Otherwise, we'll attempt to mark the tower as inactive by
|
||||
// marking all of its sessions inactive. If any of its sessions has unacked
|
||||
// updates, then ErrTowerUnackedUpdates is returned. If the tower doesn't have
|
||||
// any sessions at all, it'll be completely removed from the database.
|
||||
//
|
||||
// NOTE: An error is not returned if the tower doesn't exist.
|
||||
func (m *ClientDB) RemoveTower(pubKey *btcec.PublicKey, addr net.Addr) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
tower, err := m.loadTower(pubKey)
|
||||
if err == wtdb.ErrTowerNotFound {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if addr != nil {
|
||||
tower.RemoveAddress(addr)
|
||||
m.towers[tower.ID] = tower
|
||||
return nil
|
||||
}
|
||||
|
||||
towerSessions, err := m.listClientSessions(&tower.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(towerSessions) == 0 {
|
||||
var towerPK towerPK
|
||||
copy(towerPK[:], pubKey.SerializeCompressed())
|
||||
delete(m.towerIndex, towerPK)
|
||||
delete(m.towers, tower.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
for id, session := range towerSessions {
|
||||
if len(session.CommittedUpdates) > 0 {
|
||||
return wtdb.ErrTowerUnackedUpdates
|
||||
}
|
||||
session.Status = wtdb.CSessionInactive
|
||||
m.activeSessions[id] = session
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadTower retrieves a tower by its public key.
|
||||
func (m *ClientDB) LoadTower(pubKey *btcec.PublicKey) (*wtdb.Tower, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.loadTower(pubKey)
|
||||
}
|
||||
|
||||
// loadTower retrieves a tower by its public key.
|
||||
//
|
||||
// NOTE: This method requires the database's lock to be acquired.
|
||||
func (m *ClientDB) loadTower(pubKey *btcec.PublicKey) (*wtdb.Tower, error) {
|
||||
var towerPK towerPK
|
||||
copy(towerPK[:], pubKey.SerializeCompressed())
|
||||
|
||||
towerID, ok := m.towerIndex[towerPK]
|
||||
if !ok {
|
||||
return nil, wtdb.ErrTowerNotFound
|
||||
}
|
||||
tower, ok := m.towers[towerID]
|
||||
if !ok {
|
||||
return nil, wtdb.ErrTowerNotFound
|
||||
}
|
||||
|
||||
return copyTower(tower), nil
|
||||
}
|
||||
|
||||
// LoadTowerByID retrieves a tower by its tower ID.
|
||||
func (m *ClientDB) LoadTowerByID(towerID wtdb.TowerID) (*wtdb.Tower, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
@ -79,6 +166,19 @@ func (m *ClientDB) LoadTower(towerID wtdb.TowerID) (*wtdb.Tower, error) {
|
||||
return nil, wtdb.ErrTowerNotFound
|
||||
}
|
||||
|
||||
// ListTowers retrieves the list of towers available within the database.
|
||||
func (m *ClientDB) ListTowers() ([]*wtdb.Tower, error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
towers := make([]*wtdb.Tower, 0, len(m.towers))
|
||||
for _, tower := range m.towers {
|
||||
towers = append(towers, copyTower(tower))
|
||||
}
|
||||
|
||||
return towers, nil
|
||||
}
|
||||
|
||||
// MarkBackupIneligible records that particular commit height is ineligible for
|
||||
// backup. This allows the client to track which updates it should not attempt
|
||||
// to retry after startup.
|
||||
@ -86,13 +186,28 @@ func (m *ClientDB) MarkBackupIneligible(chanID lnwire.ChannelID, commitHeight ui
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListClientSessions returns the set of all client sessions known to the db.
|
||||
func (m *ClientDB) ListClientSessions() (map[wtdb.SessionID]*wtdb.ClientSession, error) {
|
||||
// ListClientSessions returns the set of all client sessions known to the db. An
|
||||
// optional tower ID can be used to filter out any client sessions in the
|
||||
// response that do not correspond to this tower.
|
||||
func (m *ClientDB) ListClientSessions(
|
||||
tower *wtdb.TowerID) (map[wtdb.SessionID]*wtdb.ClientSession, error) {
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
return m.listClientSessions(tower)
|
||||
}
|
||||
|
||||
// listClientSessions returns the set of all client sessions known to the db. An
|
||||
// optional tower ID can be used to filter out any client sessions in the
|
||||
// response that do not correspond to this tower.
|
||||
func (m *ClientDB) listClientSessions(
|
||||
tower *wtdb.TowerID) (map[wtdb.SessionID]*wtdb.ClientSession, error) {
|
||||
|
||||
sessions := make(map[wtdb.SessionID]*wtdb.ClientSession)
|
||||
for _, session := range m.activeSessions {
|
||||
if tower != nil && *tower != session.TowerID {
|
||||
continue
|
||||
}
|
||||
sessions[session.ID] = session
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user